Merge remote-tracking branch 'iommu/next'
[deliverable/linux.git] / drivers / hid / intel-ish-hid / ishtp / client.h
1 /*
2 * ISHTP client logic
3 *
4 * Copyright (c) 2003-2016, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16 #ifndef _ISHTP_CLIENT_H_
17 #define _ISHTP_CLIENT_H_
18
19 #include <linux/types.h>
20 #include "ishtp-dev.h"
21
22 /* Client state */
23 enum cl_state {
24 ISHTP_CL_INITIALIZING = 0,
25 ISHTP_CL_CONNECTING,
26 ISHTP_CL_CONNECTED,
27 ISHTP_CL_DISCONNECTING,
28 ISHTP_CL_DISCONNECTED
29 };
30
31 /* Tx and Rx ring size */
32 #define CL_DEF_RX_RING_SIZE 2
33 #define CL_DEF_TX_RING_SIZE 2
34 #define CL_MAX_RX_RING_SIZE 32
35 #define CL_MAX_TX_RING_SIZE 32
36
37 #define DMA_SLOT_SIZE 4096
38 /* Number of IPC fragments after which it's worth sending via DMA */
39 #define DMA_WORTH_THRESHOLD 3
40
41 /* DMA/IPC Tx paths. Other the default means enforcement */
42 #define CL_TX_PATH_DEFAULT 0
43 #define CL_TX_PATH_IPC 1
44 #define CL_TX_PATH_DMA 2
45
46 /* Client Tx buffer list entry */
47 struct ishtp_cl_tx_ring {
48 struct list_head list;
49 struct ishtp_msg_data send_buf;
50 };
51
52 /* ISHTP client instance */
53 struct ishtp_cl {
54 struct list_head link;
55 struct ishtp_device *dev;
56 enum cl_state state;
57 int status;
58
59 /* Link to ISHTP bus device */
60 struct ishtp_cl_device *device;
61
62 /* ID of client connected */
63 uint8_t host_client_id;
64 uint8_t fw_client_id;
65 uint8_t ishtp_flow_ctrl_creds;
66 uint8_t out_flow_ctrl_creds;
67
68 /* dma */
69 int last_tx_path;
70 /* 0: ack wasn't received,1:ack was received */
71 int last_dma_acked;
72 unsigned char *last_dma_addr;
73 /* 0: ack wasn't received,1:ack was received */
74 int last_ipc_acked;
75
76 /* Rx ring buffer pool */
77 unsigned int rx_ring_size;
78 struct ishtp_cl_rb free_rb_list;
79 spinlock_t free_list_spinlock;
80 /* Rx in-process list */
81 struct ishtp_cl_rb in_process_list;
82 spinlock_t in_process_spinlock;
83
84 /* Client Tx buffers list */
85 unsigned int tx_ring_size;
86 struct ishtp_cl_tx_ring tx_list, tx_free_list;
87 spinlock_t tx_list_spinlock;
88 spinlock_t tx_free_list_spinlock;
89 size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
90
91 /**
92 * if we get a FC, and the list is not empty, we must know whether we
93 * are at the middle of sending.
94 * if so -need to increase FC counter, otherwise, need to start sending
95 * the first msg in list
96 * (!)This is for counting-FC implementation only. Within single-FC the
97 * other party may NOT send FC until it receives complete message
98 */
99 int sending;
100
101 /* Send FC spinlock */
102 spinlock_t fc_spinlock;
103
104 /* wait queue for connect and disconnect response from FW */
105 wait_queue_head_t wait_ctrl_res;
106
107 /* Error stats */
108 unsigned int err_send_msg;
109 unsigned int err_send_fc;
110
111 /* Send/recv stats */
112 unsigned int send_msg_cnt_ipc;
113 unsigned int send_msg_cnt_dma;
114 unsigned int recv_msg_cnt_ipc;
115 unsigned int recv_msg_cnt_dma;
116 unsigned int recv_msg_num_frags;
117 unsigned int ishtp_flow_ctrl_cnt;
118 unsigned int out_flow_ctrl_cnt;
119
120 /* Rx msg ... out FC timing */
121 struct timespec ts_rx;
122 struct timespec ts_out_fc;
123 struct timespec ts_max_fc_delay;
124 void *client_data;
125 };
126
127 /* Client connection managenment internal functions */
128 int ishtp_can_client_connect(struct ishtp_device *ishtp_dev, uuid_le *uuid);
129 int ishtp_fw_cl_by_id(struct ishtp_device *dev, uint8_t client_id);
130 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl);
131 void recv_ishtp_cl_msg(struct ishtp_device *dev,
132 struct ishtp_msg_hdr *ishtp_hdr);
133 int ishtp_cl_read_start(struct ishtp_cl *cl);
134
135 /* Ring Buffer I/F */
136 int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
137 int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
138 void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
139 void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
140
141 /* DMA I/F functions */
142 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
143 struct dma_xfer_hbm *hbm);
144 void ishtp_cl_alloc_dma_buf(struct ishtp_device *dev);
145 void ishtp_cl_free_dma_buf(struct ishtp_device *dev);
146 void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
147 uint32_t size);
148 void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
149 void *msg_addr,
150 uint8_t size);
151
152 /* Request blocks alloc/free I/F */
153 struct ishtp_cl_rb *ishtp_io_rb_init(struct ishtp_cl *cl);
154 void ishtp_io_rb_free(struct ishtp_cl_rb *priv_rb);
155 int ishtp_io_rb_alloc_buf(struct ishtp_cl_rb *rb, size_t length);
156
157 /**
158 * ishtp_cl_cmp_id - tells if file private data have same id
159 * returns true - if ids are the same and not NULL
160 */
161 static inline bool ishtp_cl_cmp_id(const struct ishtp_cl *cl1,
162 const struct ishtp_cl *cl2)
163 {
164 return cl1 && cl2 &&
165 (cl1->host_client_id == cl2->host_client_id) &&
166 (cl1->fw_client_id == cl2->fw_client_id);
167 }
168
169 /* exported functions from ISHTP under client management scope */
170 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev);
171 void ishtp_cl_free(struct ishtp_cl *cl);
172 int ishtp_cl_link(struct ishtp_cl *cl, int id);
173 void ishtp_cl_unlink(struct ishtp_cl *cl);
174 int ishtp_cl_disconnect(struct ishtp_cl *cl);
175 int ishtp_cl_connect(struct ishtp_cl *cl);
176 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length);
177 int ishtp_cl_flush_queues(struct ishtp_cl *cl);
178
179 /* exported functions from ISHTP client buffer management scope */
180 int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
181
182 #endif /* _ISHTP_CLIENT_H_ */
This page took 0.042389 seconds and 5 git commands to generate.