Merge remote-tracking branch 'net-next/master' into mac80211-next
[deliverable/linux.git] / net / rds / ib.h
1 #ifndef _RDS_IB_H
2 #define _RDS_IB_H
3
4 #include <rdma/ib_verbs.h>
5 #include <rdma/rdma_cm.h>
6 #include <linux/interrupt.h>
7 #include <linux/pci.h>
8 #include <linux/slab.h>
9 #include "rds.h"
10 #include "rdma_transport.h"
11
12 #define RDS_FMR_1M_POOL_SIZE (8192 / 2)
13 #define RDS_FMR_1M_MSG_SIZE 256
14 #define RDS_FMR_8K_MSG_SIZE 2
15 #define RDS_MR_8K_SCALE (256 / (RDS_FMR_8K_MSG_SIZE + 1))
16 #define RDS_FMR_8K_POOL_SIZE (RDS_MR_8K_SCALE * (8192 / 2))
17
18 #define RDS_IB_MAX_SGE 8
19 #define RDS_IB_RECV_SGE 2
20
21 #define RDS_IB_DEFAULT_RECV_WR 1024
22 #define RDS_IB_DEFAULT_SEND_WR 256
23
24 #define RDS_IB_DEFAULT_RETRY_COUNT 2
25
26 #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
27
28 #define RDS_IB_RECYCLE_BATCH_COUNT 32
29
30 #define RDS_IB_WC_MAX 32
31 #define RDS_IB_SEND_OP BIT_ULL(63)
32
33 extern struct rw_semaphore rds_ib_devices_lock;
34 extern struct list_head rds_ib_devices;
35
36 /*
37 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
38 * try and minimize the amount of memory tied up both the device and
39 * socket receive queues.
40 */
41 struct rds_page_frag {
42 struct list_head f_item;
43 struct list_head f_cache_entry;
44 struct scatterlist f_sg;
45 };
46
47 struct rds_ib_incoming {
48 struct list_head ii_frags;
49 struct list_head ii_cache_entry;
50 struct rds_incoming ii_inc;
51 };
52
53 struct rds_ib_cache_head {
54 struct list_head *first;
55 unsigned long count;
56 };
57
58 struct rds_ib_refill_cache {
59 struct rds_ib_cache_head __percpu *percpu;
60 struct list_head *xfer;
61 struct list_head *ready;
62 };
63
64 struct rds_ib_connect_private {
65 /* Add new fields at the end, and don't permute existing fields. */
66 __be32 dp_saddr;
67 __be32 dp_daddr;
68 u8 dp_protocol_major;
69 u8 dp_protocol_minor;
70 __be16 dp_protocol_minor_mask; /* bitmask */
71 __be32 dp_reserved1;
72 __be64 dp_ack_seq;
73 __be32 dp_credit; /* non-zero enables flow ctl */
74 };
75
76 struct rds_ib_send_work {
77 void *s_op;
78 struct ib_send_wr s_wr;
79 struct ib_sge s_sge[RDS_IB_MAX_SGE];
80 unsigned long s_queued;
81 };
82
83 struct rds_ib_recv_work {
84 struct rds_ib_incoming *r_ibinc;
85 struct rds_page_frag *r_frag;
86 struct ib_recv_wr r_wr;
87 struct ib_sge r_sge[2];
88 };
89
90 struct rds_ib_work_ring {
91 u32 w_nr;
92 u32 w_alloc_ptr;
93 u32 w_alloc_ctr;
94 u32 w_free_ptr;
95 atomic_t w_free_ctr;
96 };
97
98 /* Rings are posted with all the allocations they'll need to queue the
99 * incoming message to the receiving socket so this can't fail.
100 * All fragments start with a header, so we can make sure we're not receiving
101 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
102 */
103 struct rds_ib_ack_state {
104 u64 ack_next;
105 u64 ack_recv;
106 unsigned int ack_required:1;
107 unsigned int ack_next_valid:1;
108 unsigned int ack_recv_valid:1;
109 };
110
111
112 struct rds_ib_device;
113
114 struct rds_ib_connection {
115
116 struct list_head ib_node;
117 struct rds_ib_device *rds_ibdev;
118 struct rds_connection *conn;
119
120 /* alphabet soup, IBTA style */
121 struct rdma_cm_id *i_cm_id;
122 struct ib_pd *i_pd;
123 struct ib_cq *i_send_cq;
124 struct ib_cq *i_recv_cq;
125 struct ib_wc i_send_wc[RDS_IB_WC_MAX];
126 struct ib_wc i_recv_wc[RDS_IB_WC_MAX];
127
128 /* interrupt handling */
129 struct tasklet_struct i_send_tasklet;
130 struct tasklet_struct i_recv_tasklet;
131
132 /* tx */
133 struct rds_ib_work_ring i_send_ring;
134 struct rm_data_op *i_data_op;
135 struct rds_header *i_send_hdrs;
136 u64 i_send_hdrs_dma;
137 struct rds_ib_send_work *i_sends;
138 atomic_t i_signaled_sends;
139
140 /* rx */
141 struct mutex i_recv_mutex;
142 struct rds_ib_work_ring i_recv_ring;
143 struct rds_ib_incoming *i_ibinc;
144 u32 i_recv_data_rem;
145 struct rds_header *i_recv_hdrs;
146 u64 i_recv_hdrs_dma;
147 struct rds_ib_recv_work *i_recvs;
148 u64 i_ack_recv; /* last ACK received */
149 struct rds_ib_refill_cache i_cache_incs;
150 struct rds_ib_refill_cache i_cache_frags;
151
152 /* sending acks */
153 unsigned long i_ack_flags;
154 #ifdef KERNEL_HAS_ATOMIC64
155 atomic64_t i_ack_next; /* next ACK to send */
156 #else
157 spinlock_t i_ack_lock; /* protect i_ack_next */
158 u64 i_ack_next; /* next ACK to send */
159 #endif
160 struct rds_header *i_ack;
161 struct ib_send_wr i_ack_wr;
162 struct ib_sge i_ack_sge;
163 u64 i_ack_dma;
164 unsigned long i_ack_queued;
165
166 /* Flow control related information
167 *
168 * Our algorithm uses a pair variables that we need to access
169 * atomically - one for the send credits, and one posted
170 * recv credits we need to transfer to remote.
171 * Rather than protect them using a slow spinlock, we put both into
172 * a single atomic_t and update it using cmpxchg
173 */
174 atomic_t i_credits;
175
176 /* Protocol version specific information */
177 unsigned int i_flowctl:1; /* enable/disable flow ctl */
178
179 /* Batched completions */
180 unsigned int i_unsignaled_wrs;
181 };
182
183 /* This assumes that atomic_t is at least 32 bits */
184 #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
185 #define IB_GET_POST_CREDITS(v) ((v) >> 16)
186 #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
187 #define IB_SET_POST_CREDITS(v) ((v) << 16)
188
189 struct rds_ib_ipaddr {
190 struct list_head list;
191 __be32 ipaddr;
192 struct rcu_head rcu;
193 };
194
195 enum {
196 RDS_IB_MR_8K_POOL,
197 RDS_IB_MR_1M_POOL,
198 };
199
200 struct rds_ib_device {
201 struct list_head list;
202 struct list_head ipaddr_list;
203 struct list_head conn_list;
204 struct ib_device *dev;
205 struct ib_pd *pd;
206 unsigned int max_fmrs;
207 struct rds_ib_mr_pool *mr_1m_pool;
208 struct rds_ib_mr_pool *mr_8k_pool;
209 unsigned int fmr_max_remaps;
210 unsigned int max_8k_fmrs;
211 unsigned int max_1m_fmrs;
212 int max_sge;
213 unsigned int max_wrs;
214 unsigned int max_initiator_depth;
215 unsigned int max_responder_resources;
216 spinlock_t spinlock; /* protect the above */
217 atomic_t refcount;
218 struct work_struct free_work;
219 };
220
221 #define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
222 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
223
224 /* bits for i_ack_flags */
225 #define IB_ACK_IN_FLIGHT 0
226 #define IB_ACK_REQUESTED 1
227
228 /* Magic WR_ID for ACKs */
229 #define RDS_IB_ACK_WR_ID (~(u64) 0)
230
231 struct rds_ib_statistics {
232 uint64_t s_ib_connect_raced;
233 uint64_t s_ib_listen_closed_stale;
234 uint64_t s_ib_evt_handler_call;
235 uint64_t s_ib_tasklet_call;
236 uint64_t s_ib_tx_cq_event;
237 uint64_t s_ib_tx_ring_full;
238 uint64_t s_ib_tx_throttle;
239 uint64_t s_ib_tx_sg_mapping_failure;
240 uint64_t s_ib_tx_stalled;
241 uint64_t s_ib_tx_credit_updates;
242 uint64_t s_ib_rx_cq_event;
243 uint64_t s_ib_rx_ring_empty;
244 uint64_t s_ib_rx_refill_from_cq;
245 uint64_t s_ib_rx_refill_from_thread;
246 uint64_t s_ib_rx_alloc_limit;
247 uint64_t s_ib_rx_credit_updates;
248 uint64_t s_ib_ack_sent;
249 uint64_t s_ib_ack_send_failure;
250 uint64_t s_ib_ack_send_delayed;
251 uint64_t s_ib_ack_send_piggybacked;
252 uint64_t s_ib_ack_received;
253 uint64_t s_ib_rdma_mr_8k_alloc;
254 uint64_t s_ib_rdma_mr_8k_free;
255 uint64_t s_ib_rdma_mr_8k_used;
256 uint64_t s_ib_rdma_mr_8k_pool_flush;
257 uint64_t s_ib_rdma_mr_8k_pool_wait;
258 uint64_t s_ib_rdma_mr_8k_pool_depleted;
259 uint64_t s_ib_rdma_mr_1m_alloc;
260 uint64_t s_ib_rdma_mr_1m_free;
261 uint64_t s_ib_rdma_mr_1m_used;
262 uint64_t s_ib_rdma_mr_1m_pool_flush;
263 uint64_t s_ib_rdma_mr_1m_pool_wait;
264 uint64_t s_ib_rdma_mr_1m_pool_depleted;
265 uint64_t s_ib_atomic_cswp;
266 uint64_t s_ib_atomic_fadd;
267 };
268
269 extern struct workqueue_struct *rds_ib_wq;
270
271 /*
272 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
273 * doesn't define it.
274 */
275 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device *dev,
276 struct scatterlist *sglist,
277 unsigned int sg_dma_len,
278 int direction)
279 {
280 struct scatterlist *sg;
281 unsigned int i;
282
283 for_each_sg(sglist, sg, sg_dma_len, i) {
284 ib_dma_sync_single_for_cpu(dev,
285 ib_sg_dma_address(dev, sg),
286 ib_sg_dma_len(dev, sg),
287 direction);
288 }
289 }
290 #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
291
292 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device *dev,
293 struct scatterlist *sglist,
294 unsigned int sg_dma_len,
295 int direction)
296 {
297 struct scatterlist *sg;
298 unsigned int i;
299
300 for_each_sg(sglist, sg, sg_dma_len, i) {
301 ib_dma_sync_single_for_device(dev,
302 ib_sg_dma_address(dev, sg),
303 ib_sg_dma_len(dev, sg),
304 direction);
305 }
306 }
307 #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
308
309
310 /* ib.c */
311 extern struct rds_transport rds_ib_transport;
312 struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device);
313 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev);
314 extern struct ib_client rds_ib_client;
315
316 extern unsigned int rds_ib_fmr_1m_pool_size;
317 extern unsigned int rds_ib_fmr_8k_pool_size;
318 extern unsigned int rds_ib_retry_count;
319
320 extern spinlock_t ib_nodev_conns_lock;
321 extern struct list_head ib_nodev_conns;
322
323 /* ib_cm.c */
324 int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp);
325 void rds_ib_conn_free(void *arg);
326 int rds_ib_conn_connect(struct rds_connection *conn);
327 void rds_ib_conn_shutdown(struct rds_connection *conn);
328 void rds_ib_state_change(struct sock *sk);
329 int rds_ib_listen_init(void);
330 void rds_ib_listen_stop(void);
331 void __rds_ib_conn_error(struct rds_connection *conn, const char *, ...);
332 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
333 struct rdma_cm_event *event);
334 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
335 void rds_ib_cm_connect_complete(struct rds_connection *conn,
336 struct rdma_cm_event *event);
337
338
339 #define rds_ib_conn_error(conn, fmt...) \
340 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
341
342 /* ib_rdma.c */
343 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr);
344 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
345 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
346 void rds_ib_destroy_nodev_conns(void);
347 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_dev,
348 int npages);
349 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo);
350 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *);
351 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
352 struct rds_sock *rs, u32 *key_ret);
353 void rds_ib_sync_mr(void *trans_private, int dir);
354 void rds_ib_free_mr(void *trans_private, int invalidate);
355 void rds_ib_flush_mrs(void);
356 int rds_ib_fmr_init(void);
357 void rds_ib_fmr_exit(void);
358
359 /* ib_recv.c */
360 int rds_ib_recv_init(void);
361 void rds_ib_recv_exit(void);
362 int rds_ib_recv(struct rds_connection *conn);
363 int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
364 void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
365 void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
366 void rds_ib_inc_free(struct rds_incoming *inc);
367 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iov_iter *to);
368 void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc,
369 struct rds_ib_ack_state *state);
370 void rds_ib_recv_tasklet_fn(unsigned long data);
371 void rds_ib_recv_init_ring(struct rds_ib_connection *ic);
372 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic);
373 void rds_ib_recv_init_ack(struct rds_ib_connection *ic);
374 void rds_ib_attempt_ack(struct rds_ib_connection *ic);
375 void rds_ib_ack_send_complete(struct rds_ib_connection *ic);
376 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic);
377 void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq, int ack_required);
378
379 /* ib_ring.c */
380 void rds_ib_ring_init(struct rds_ib_work_ring *ring, u32 nr);
381 void rds_ib_ring_resize(struct rds_ib_work_ring *ring, u32 nr);
382 u32 rds_ib_ring_alloc(struct rds_ib_work_ring *ring, u32 val, u32 *pos);
383 void rds_ib_ring_free(struct rds_ib_work_ring *ring, u32 val);
384 void rds_ib_ring_unalloc(struct rds_ib_work_ring *ring, u32 val);
385 int rds_ib_ring_empty(struct rds_ib_work_ring *ring);
386 int rds_ib_ring_low(struct rds_ib_work_ring *ring);
387 u32 rds_ib_ring_oldest(struct rds_ib_work_ring *ring);
388 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest);
389 extern wait_queue_head_t rds_ib_ring_empty_wait;
390
391 /* ib_send.c */
392 void rds_ib_xmit_complete(struct rds_connection *conn);
393 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
394 unsigned int hdr_off, unsigned int sg, unsigned int off);
395 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
396 void rds_ib_send_init_ring(struct rds_ib_connection *ic);
397 void rds_ib_send_clear_ring(struct rds_ib_connection *ic);
398 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op);
399 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits);
400 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted);
401 int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted,
402 u32 *adv_credits, int need_posted, int max_posted);
403 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op);
404
405 /* ib_stats.c */
406 DECLARE_PER_CPU(struct rds_ib_statistics, rds_ib_stats);
407 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
408 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator *iter,
409 unsigned int avail);
410
411 /* ib_sysctl.c */
412 int rds_ib_sysctl_init(void);
413 void rds_ib_sysctl_exit(void);
414 extern unsigned long rds_ib_sysctl_max_send_wr;
415 extern unsigned long rds_ib_sysctl_max_recv_wr;
416 extern unsigned long rds_ib_sysctl_max_unsig_wrs;
417 extern unsigned long rds_ib_sysctl_max_unsig_bytes;
418 extern unsigned long rds_ib_sysctl_max_recv_allocation;
419 extern unsigned int rds_ib_sysctl_flow_control;
420
421 #endif
This page took 0.038952 seconds and 6 git commands to generate.