Commit | Line | Data |
---|---|---|
cfdda9d7 SW |
1 | /* |
2 | * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * - Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials | |
20 | * provided with the distribution. | |
21 | * | |
22 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
23 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
24 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
25 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
26 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
27 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
28 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
29 | * SOFTWARE. | |
30 | */ | |
31 | #ifndef __IW_CXGB4_H__ | |
32 | #define __IW_CXGB4_H__ | |
33 | ||
34 | #include <linux/mutex.h> | |
35 | #include <linux/list.h> | |
36 | #include <linux/spinlock.h> | |
37 | #include <linux/idr.h> | |
c337374b | 38 | #include <linux/completion.h> |
cfdda9d7 SW |
39 | #include <linux/netdevice.h> |
40 | #include <linux/sched.h> | |
41 | #include <linux/pci.h> | |
42 | #include <linux/dma-mapping.h> | |
43 | #include <linux/inet.h> | |
44 | #include <linux/wait.h> | |
45 | #include <linux/kref.h> | |
46 | #include <linux/timer.h> | |
47 | #include <linux/io.h> | |
cfdda9d7 SW |
48 | |
49 | #include <asm/byteorder.h> | |
50 | ||
51 | #include <net/net_namespace.h> | |
52 | ||
53 | #include <rdma/ib_verbs.h> | |
54 | #include <rdma/iw_cm.h> | |
55 | ||
56 | #include "cxgb4.h" | |
57 | #include "cxgb4_uld.h" | |
58 | #include "l2t.h" | |
59 | #include "user.h" | |
60 | ||
61 | #define DRV_NAME "iw_cxgb4" | |
62 | #define MOD DRV_NAME ":" | |
63 | ||
64 | extern int c4iw_debug; | |
65 | #define PDBG(fmt, args...) \ | |
66 | do { \ | |
67 | if (c4iw_debug) \ | |
68 | printk(MOD fmt, ## args); \ | |
69 | } while (0) | |
70 | ||
71 | #include "t4.h" | |
72 | ||
73 | #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start) | |
74 | #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start) | |
75 | ||
76 | static inline void *cplhdr(struct sk_buff *skb) | |
77 | { | |
78 | return skb->data; | |
79 | } | |
80 | ||
ec3eead2 VP |
81 | #define C4IW_ID_TABLE_F_RANDOM 1 /* Pseudo-randomize the id's returned */ |
82 | #define C4IW_ID_TABLE_F_EMPTY 2 /* Table is initially empty */ | |
83 | ||
84 | struct c4iw_id_table { | |
85 | u32 flags; | |
86 | u32 start; /* logical minimal id */ | |
87 | u32 last; /* hint for find */ | |
88 | u32 max; | |
89 | spinlock_t lock; | |
90 | unsigned long *table; | |
91 | }; | |
92 | ||
cfdda9d7 | 93 | struct c4iw_resource { |
ec3eead2 VP |
94 | struct c4iw_id_table tpt_table; |
95 | struct c4iw_id_table qid_table; | |
96 | struct c4iw_id_table pdid_table; | |
cfdda9d7 SW |
97 | }; |
98 | ||
99 | struct c4iw_qid_list { | |
100 | struct list_head entry; | |
101 | u32 qid; | |
102 | }; | |
103 | ||
104 | struct c4iw_dev_ucontext { | |
105 | struct list_head qpids; | |
106 | struct list_head cqids; | |
107 | struct mutex lock; | |
108 | }; | |
109 | ||
110 | enum c4iw_rdev_flags { | |
111 | T4_FATAL_ERROR = (1<<0), | |
112 | }; | |
113 | ||
8d81ef34 VP |
114 | struct c4iw_stat { |
115 | u64 total; | |
116 | u64 cur; | |
117 | u64 max; | |
ec3eead2 | 118 | u64 fail; |
8d81ef34 VP |
119 | }; |
120 | ||
121 | struct c4iw_stats { | |
122 | struct mutex lock; | |
123 | struct c4iw_stat qid; | |
124 | struct c4iw_stat pd; | |
125 | struct c4iw_stat stag; | |
126 | struct c4iw_stat pbl; | |
127 | struct c4iw_stat rqt; | |
128 | struct c4iw_stat ocqp; | |
2c974781 VP |
129 | u64 db_full; |
130 | u64 db_empty; | |
131 | u64 db_drop; | |
422eea0a | 132 | u64 db_state_transitions; |
5be78ee9 | 133 | u64 tcam_full; |
8d81ef34 VP |
134 | }; |
135 | ||
cfdda9d7 SW |
136 | struct c4iw_rdev { |
137 | struct c4iw_resource resource; | |
138 | unsigned long qpshift; | |
139 | u32 qpmask; | |
140 | unsigned long cqshift; | |
141 | u32 cqmask; | |
142 | struct c4iw_dev_ucontext uctx; | |
143 | struct gen_pool *pbl_pool; | |
144 | struct gen_pool *rqt_pool; | |
c6d7b267 | 145 | struct gen_pool *ocqp_pool; |
cfdda9d7 SW |
146 | u32 flags; |
147 | struct cxgb4_lld_info lldi; | |
c6d7b267 SW |
148 | unsigned long oc_mw_pa; |
149 | void __iomem *oc_mw_kva; | |
8d81ef34 | 150 | struct c4iw_stats stats; |
cfdda9d7 SW |
151 | }; |
152 | ||
153 | static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) | |
154 | { | |
155 | return rdev->flags & T4_FATAL_ERROR; | |
156 | } | |
157 | ||
158 | static inline int c4iw_num_stags(struct c4iw_rdev *rdev) | |
159 | { | |
160 | return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); | |
161 | } | |
162 | ||
aadc4df3 SW |
163 | #define C4IW_WR_TO (10*HZ) |
164 | ||
165 | struct c4iw_wr_wait { | |
c337374b | 166 | struct completion completion; |
aadc4df3 SW |
167 | int ret; |
168 | }; | |
169 | ||
170 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | |
171 | { | |
172 | wr_waitp->ret = 0; | |
c337374b | 173 | init_completion(&wr_waitp->completion); |
aadc4df3 SW |
174 | } |
175 | ||
d9594d99 SW |
176 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) |
177 | { | |
178 | wr_waitp->ret = ret; | |
c337374b | 179 | complete(&wr_waitp->completion); |
d9594d99 SW |
180 | } |
181 | ||
aadc4df3 SW |
182 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, |
183 | struct c4iw_wr_wait *wr_waitp, | |
184 | u32 hwtid, u32 qpid, | |
185 | const char *func) | |
186 | { | |
187 | unsigned to = C4IW_WR_TO; | |
d9594d99 | 188 | int ret; |
aadc4df3 | 189 | |
d9594d99 | 190 | do { |
c337374b | 191 | ret = wait_for_completion_timeout(&wr_waitp->completion, to); |
d9594d99 | 192 | if (!ret) { |
aadc4df3 SW |
193 | printk(KERN_ERR MOD "%s - Device %s not responding - " |
194 | "tid %u qpid %u\n", func, | |
195 | pci_name(rdev->lldi.pdev), hwtid, qpid); | |
2f25e9a5 SW |
196 | if (c4iw_fatal_error(rdev)) { |
197 | wr_waitp->ret = -EIO; | |
198 | break; | |
199 | } | |
aadc4df3 SW |
200 | to = to << 2; |
201 | } | |
d9594d99 | 202 | } while (!ret); |
aadc4df3 | 203 | if (wr_waitp->ret) |
30c95c2d SW |
204 | PDBG("%s: FW reply %d tid %u qpid %u\n", |
205 | pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); | |
aadc4df3 SW |
206 | return wr_waitp->ret; |
207 | } | |
208 | ||
2c974781 VP |
209 | enum db_state { |
210 | NORMAL = 0, | |
211 | FLOW_CONTROL = 1, | |
212 | RECOVERY = 2 | |
213 | }; | |
214 | ||
cfdda9d7 SW |
215 | struct c4iw_dev { |
216 | struct ib_device ibdev; | |
217 | struct c4iw_rdev rdev; | |
218 | u32 device_cap_flags; | |
219 | struct idr cqidr; | |
220 | struct idr qpidr; | |
221 | struct idr mmidr; | |
222 | spinlock_t lock; | |
2c974781 | 223 | struct mutex db_mutex; |
cfdda9d7 | 224 | struct dentry *debugfs_root; |
2c974781 | 225 | enum db_state db_state; |
422eea0a | 226 | int qpcnt; |
cfdda9d7 SW |
227 | }; |
228 | ||
229 | static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) | |
230 | { | |
231 | return container_of(ibdev, struct c4iw_dev, ibdev); | |
232 | } | |
233 | ||
234 | static inline struct c4iw_dev *rdev_to_c4iw_dev(struct c4iw_rdev *rdev) | |
235 | { | |
236 | return container_of(rdev, struct c4iw_dev, rdev); | |
237 | } | |
238 | ||
239 | static inline struct c4iw_cq *get_chp(struct c4iw_dev *rhp, u32 cqid) | |
240 | { | |
241 | return idr_find(&rhp->cqidr, cqid); | |
242 | } | |
243 | ||
244 | static inline struct c4iw_qp *get_qhp(struct c4iw_dev *rhp, u32 qpid) | |
245 | { | |
246 | return idr_find(&rhp->qpidr, qpid); | |
247 | } | |
248 | ||
249 | static inline struct c4iw_mr *get_mhp(struct c4iw_dev *rhp, u32 mmid) | |
250 | { | |
251 | return idr_find(&rhp->mmidr, mmid); | |
252 | } | |
253 | ||
2c974781 VP |
254 | static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr, |
255 | void *handle, u32 id, int lock) | |
cfdda9d7 SW |
256 | { |
257 | int ret; | |
258 | int newid; | |
259 | ||
260 | do { | |
4984037b | 261 | if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) |
cfdda9d7 | 262 | return -ENOMEM; |
2c974781 VP |
263 | if (lock) |
264 | spin_lock_irq(&rhp->lock); | |
cfdda9d7 | 265 | ret = idr_get_new_above(idr, handle, id, &newid); |
ec3eead2 | 266 | BUG_ON(!ret && newid != id); |
2c974781 VP |
267 | if (lock) |
268 | spin_unlock_irq(&rhp->lock); | |
cfdda9d7 SW |
269 | } while (ret == -EAGAIN); |
270 | ||
271 | return ret; | |
272 | } | |
273 | ||
2c974781 VP |
274 | static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, |
275 | void *handle, u32 id) | |
276 | { | |
277 | return _insert_handle(rhp, idr, handle, id, 1); | |
278 | } | |
279 | ||
280 | static inline int insert_handle_nolock(struct c4iw_dev *rhp, struct idr *idr, | |
281 | void *handle, u32 id) | |
282 | { | |
283 | return _insert_handle(rhp, idr, handle, id, 0); | |
284 | } | |
285 | ||
422eea0a VP |
286 | static inline void _remove_handle(struct c4iw_dev *rhp, struct idr *idr, |
287 | u32 id, int lock) | |
cfdda9d7 | 288 | { |
422eea0a VP |
289 | if (lock) |
290 | spin_lock_irq(&rhp->lock); | |
cfdda9d7 | 291 | idr_remove(idr, id); |
422eea0a VP |
292 | if (lock) |
293 | spin_unlock_irq(&rhp->lock); | |
294 | } | |
295 | ||
296 | static inline void remove_handle(struct c4iw_dev *rhp, struct idr *idr, u32 id) | |
297 | { | |
298 | _remove_handle(rhp, idr, id, 1); | |
299 | } | |
300 | ||
301 | static inline void remove_handle_nolock(struct c4iw_dev *rhp, | |
302 | struct idr *idr, u32 id) | |
303 | { | |
304 | _remove_handle(rhp, idr, id, 0); | |
cfdda9d7 SW |
305 | } |
306 | ||
307 | struct c4iw_pd { | |
308 | struct ib_pd ibpd; | |
309 | u32 pdid; | |
310 | struct c4iw_dev *rhp; | |
311 | }; | |
312 | ||
313 | static inline struct c4iw_pd *to_c4iw_pd(struct ib_pd *ibpd) | |
314 | { | |
315 | return container_of(ibpd, struct c4iw_pd, ibpd); | |
316 | } | |
317 | ||
318 | struct tpt_attributes { | |
319 | u64 len; | |
320 | u64 va_fbo; | |
321 | enum fw_ri_mem_perms perms; | |
322 | u32 stag; | |
323 | u32 pdid; | |
324 | u32 qpid; | |
325 | u32 pbl_addr; | |
326 | u32 pbl_size; | |
327 | u32 state:1; | |
328 | u32 type:2; | |
329 | u32 rsvd:1; | |
330 | u32 remote_invaliate_disable:1; | |
331 | u32 zbva:1; | |
332 | u32 mw_bind_enable:1; | |
333 | u32 page_size:5; | |
334 | }; | |
335 | ||
336 | struct c4iw_mr { | |
337 | struct ib_mr ibmr; | |
338 | struct ib_umem *umem; | |
339 | struct c4iw_dev *rhp; | |
340 | u64 kva; | |
341 | struct tpt_attributes attr; | |
342 | }; | |
343 | ||
344 | static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr) | |
345 | { | |
346 | return container_of(ibmr, struct c4iw_mr, ibmr); | |
347 | } | |
348 | ||
349 | struct c4iw_mw { | |
350 | struct ib_mw ibmw; | |
351 | struct c4iw_dev *rhp; | |
352 | u64 kva; | |
353 | struct tpt_attributes attr; | |
354 | }; | |
355 | ||
356 | static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw) | |
357 | { | |
358 | return container_of(ibmw, struct c4iw_mw, ibmw); | |
359 | } | |
360 | ||
361 | struct c4iw_fr_page_list { | |
362 | struct ib_fast_reg_page_list ibpl; | |
f38926aa | 363 | DEFINE_DMA_UNMAP_ADDR(mapping); |
cfdda9d7 SW |
364 | dma_addr_t dma_addr; |
365 | struct c4iw_dev *dev; | |
366 | int size; | |
367 | }; | |
368 | ||
369 | static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( | |
370 | struct ib_fast_reg_page_list *ibpl) | |
371 | { | |
372 | return container_of(ibpl, struct c4iw_fr_page_list, ibpl); | |
373 | } | |
374 | ||
375 | struct c4iw_cq { | |
376 | struct ib_cq ibcq; | |
377 | struct c4iw_dev *rhp; | |
378 | struct t4_cq cq; | |
379 | spinlock_t lock; | |
581bbe2c | 380 | spinlock_t comp_handler_lock; |
cfdda9d7 SW |
381 | atomic_t refcnt; |
382 | wait_queue_head_t wait; | |
383 | }; | |
384 | ||
385 | static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) | |
386 | { | |
387 | return container_of(ibcq, struct c4iw_cq, ibcq); | |
388 | } | |
389 | ||
390 | struct c4iw_mpa_attributes { | |
391 | u8 initiator; | |
392 | u8 recv_marker_enabled; | |
393 | u8 xmit_marker_enabled; | |
394 | u8 crc_enabled; | |
d2fe99e8 | 395 | u8 enhanced_rdma_conn; |
cfdda9d7 SW |
396 | u8 version; |
397 | u8 p2p_type; | |
398 | }; | |
399 | ||
400 | struct c4iw_qp_attributes { | |
401 | u32 scq; | |
402 | u32 rcq; | |
403 | u32 sq_num_entries; | |
404 | u32 rq_num_entries; | |
405 | u32 sq_max_sges; | |
406 | u32 sq_max_sges_rdma_write; | |
407 | u32 rq_max_sges; | |
408 | u32 state; | |
409 | u8 enable_rdma_read; | |
410 | u8 enable_rdma_write; | |
411 | u8 enable_bind; | |
412 | u8 enable_mmid0_fastreg; | |
413 | u32 max_ord; | |
414 | u32 max_ird; | |
415 | u32 pd; | |
416 | u32 next_state; | |
417 | char terminate_buffer[52]; | |
418 | u32 terminate_msg_len; | |
419 | u8 is_terminate_local; | |
420 | struct c4iw_mpa_attributes mpa_attr; | |
421 | struct c4iw_ep *llp_stream_handle; | |
d2fe99e8 KS |
422 | u8 layer_etype; |
423 | u8 ecode; | |
2c974781 VP |
424 | u16 sq_db_inc; |
425 | u16 rq_db_inc; | |
cfdda9d7 SW |
426 | }; |
427 | ||
428 | struct c4iw_qp { | |
429 | struct ib_qp ibqp; | |
430 | struct c4iw_dev *rhp; | |
431 | struct c4iw_ep *ep; | |
432 | struct c4iw_qp_attributes attr; | |
433 | struct t4_wq wq; | |
434 | spinlock_t lock; | |
2f5b48c3 | 435 | struct mutex mutex; |
cfdda9d7 SW |
436 | atomic_t refcnt; |
437 | wait_queue_head_t wait; | |
438 | struct timer_list timer; | |
439 | }; | |
440 | ||
441 | static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp) | |
442 | { | |
443 | return container_of(ibqp, struct c4iw_qp, ibqp); | |
444 | } | |
445 | ||
446 | struct c4iw_ucontext { | |
447 | struct ib_ucontext ibucontext; | |
448 | struct c4iw_dev_ucontext uctx; | |
449 | u32 key; | |
450 | spinlock_t mmap_lock; | |
451 | struct list_head mmaps; | |
452 | }; | |
453 | ||
454 | static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c) | |
455 | { | |
456 | return container_of(c, struct c4iw_ucontext, ibucontext); | |
457 | } | |
458 | ||
459 | struct c4iw_mm_entry { | |
460 | struct list_head entry; | |
461 | u64 addr; | |
462 | u32 key; | |
463 | unsigned len; | |
464 | }; | |
465 | ||
466 | static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, | |
467 | u32 key, unsigned len) | |
468 | { | |
469 | struct list_head *pos, *nxt; | |
470 | struct c4iw_mm_entry *mm; | |
471 | ||
472 | spin_lock(&ucontext->mmap_lock); | |
473 | list_for_each_safe(pos, nxt, &ucontext->mmaps) { | |
474 | ||
475 | mm = list_entry(pos, struct c4iw_mm_entry, entry); | |
476 | if (mm->key == key && mm->len == len) { | |
477 | list_del_init(&mm->entry); | |
478 | spin_unlock(&ucontext->mmap_lock); | |
479 | PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__, | |
480 | key, (unsigned long long) mm->addr, mm->len); | |
481 | return mm; | |
482 | } | |
483 | } | |
484 | spin_unlock(&ucontext->mmap_lock); | |
485 | return NULL; | |
486 | } | |
487 | ||
488 | static inline void insert_mmap(struct c4iw_ucontext *ucontext, | |
489 | struct c4iw_mm_entry *mm) | |
490 | { | |
491 | spin_lock(&ucontext->mmap_lock); | |
492 | PDBG("%s key 0x%x addr 0x%llx len %d\n", __func__, | |
493 | mm->key, (unsigned long long) mm->addr, mm->len); | |
494 | list_add_tail(&mm->entry, &ucontext->mmaps); | |
495 | spin_unlock(&ucontext->mmap_lock); | |
496 | } | |
497 | ||
498 | enum c4iw_qp_attr_mask { | |
499 | C4IW_QP_ATTR_NEXT_STATE = 1 << 0, | |
2c974781 VP |
500 | C4IW_QP_ATTR_SQ_DB = 1<<1, |
501 | C4IW_QP_ATTR_RQ_DB = 1<<2, | |
cfdda9d7 SW |
502 | C4IW_QP_ATTR_ENABLE_RDMA_READ = 1 << 7, |
503 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE = 1 << 8, | |
504 | C4IW_QP_ATTR_ENABLE_RDMA_BIND = 1 << 9, | |
505 | C4IW_QP_ATTR_MAX_ORD = 1 << 11, | |
506 | C4IW_QP_ATTR_MAX_IRD = 1 << 12, | |
507 | C4IW_QP_ATTR_LLP_STREAM_HANDLE = 1 << 22, | |
508 | C4IW_QP_ATTR_STREAM_MSG_BUFFER = 1 << 23, | |
509 | C4IW_QP_ATTR_MPA_ATTR = 1 << 24, | |
510 | C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE = 1 << 25, | |
511 | C4IW_QP_ATTR_VALID_MODIFY = (C4IW_QP_ATTR_ENABLE_RDMA_READ | | |
512 | C4IW_QP_ATTR_ENABLE_RDMA_WRITE | | |
513 | C4IW_QP_ATTR_MAX_ORD | | |
514 | C4IW_QP_ATTR_MAX_IRD | | |
515 | C4IW_QP_ATTR_LLP_STREAM_HANDLE | | |
516 | C4IW_QP_ATTR_STREAM_MSG_BUFFER | | |
517 | C4IW_QP_ATTR_MPA_ATTR | | |
518 | C4IW_QP_ATTR_QP_CONTEXT_ACTIVATE) | |
519 | }; | |
520 | ||
521 | int c4iw_modify_qp(struct c4iw_dev *rhp, | |
522 | struct c4iw_qp *qhp, | |
523 | enum c4iw_qp_attr_mask mask, | |
524 | struct c4iw_qp_attributes *attrs, | |
525 | int internal); | |
526 | ||
527 | enum c4iw_qp_state { | |
528 | C4IW_QP_STATE_IDLE, | |
529 | C4IW_QP_STATE_RTS, | |
530 | C4IW_QP_STATE_ERROR, | |
531 | C4IW_QP_STATE_TERMINATE, | |
532 | C4IW_QP_STATE_CLOSING, | |
533 | C4IW_QP_STATE_TOT | |
534 | }; | |
535 | ||
536 | static inline int c4iw_convert_state(enum ib_qp_state ib_state) | |
537 | { | |
538 | switch (ib_state) { | |
539 | case IB_QPS_RESET: | |
540 | case IB_QPS_INIT: | |
541 | return C4IW_QP_STATE_IDLE; | |
542 | case IB_QPS_RTS: | |
543 | return C4IW_QP_STATE_RTS; | |
544 | case IB_QPS_SQD: | |
545 | return C4IW_QP_STATE_CLOSING; | |
546 | case IB_QPS_SQE: | |
547 | return C4IW_QP_STATE_TERMINATE; | |
548 | case IB_QPS_ERR: | |
549 | return C4IW_QP_STATE_ERROR; | |
550 | default: | |
551 | return -1; | |
552 | } | |
553 | } | |
554 | ||
67bbc055 VP |
555 | static inline int to_ib_qp_state(int c4iw_qp_state) |
556 | { | |
557 | switch (c4iw_qp_state) { | |
558 | case C4IW_QP_STATE_IDLE: | |
559 | return IB_QPS_INIT; | |
560 | case C4IW_QP_STATE_RTS: | |
561 | return IB_QPS_RTS; | |
562 | case C4IW_QP_STATE_CLOSING: | |
563 | return IB_QPS_SQD; | |
564 | case C4IW_QP_STATE_TERMINATE: | |
565 | return IB_QPS_SQE; | |
566 | case C4IW_QP_STATE_ERROR: | |
567 | return IB_QPS_ERR; | |
568 | } | |
569 | return IB_QPS_ERR; | |
570 | } | |
571 | ||
cfdda9d7 SW |
572 | static inline u32 c4iw_ib_to_tpt_access(int a) |
573 | { | |
574 | return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | | |
575 | (a & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0) | | |
576 | (a & IB_ACCESS_LOCAL_WRITE ? FW_RI_MEM_ACCESS_LOCAL_WRITE : 0) | | |
577 | FW_RI_MEM_ACCESS_LOCAL_READ; | |
578 | } | |
579 | ||
580 | static inline u32 c4iw_ib_to_tpt_bind_access(int acc) | |
581 | { | |
582 | return (acc & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) | | |
583 | (acc & IB_ACCESS_REMOTE_READ ? FW_RI_MEM_ACCESS_REM_READ : 0); | |
584 | } | |
585 | ||
586 | enum c4iw_mmid_state { | |
587 | C4IW_STAG_STATE_VALID, | |
588 | C4IW_STAG_STATE_INVALID | |
589 | }; | |
590 | ||
591 | #define C4IW_NODE_DESC "cxgb4 Chelsio Communications" | |
592 | ||
593 | #define MPA_KEY_REQ "MPA ID Req Frame" | |
594 | #define MPA_KEY_REP "MPA ID Rep Frame" | |
595 | ||
596 | #define MPA_MAX_PRIVATE_DATA 256 | |
d2fe99e8 | 597 | #define MPA_ENHANCED_RDMA_CONN 0x10 |
cfdda9d7 SW |
598 | #define MPA_REJECT 0x20 |
599 | #define MPA_CRC 0x40 | |
600 | #define MPA_MARKERS 0x80 | |
601 | #define MPA_FLAGS_MASK 0xE0 | |
602 | ||
d2fe99e8 KS |
603 | #define MPA_V2_PEER2PEER_MODEL 0x8000 |
604 | #define MPA_V2_ZERO_LEN_FPDU_RTR 0x4000 | |
605 | #define MPA_V2_RDMA_WRITE_RTR 0x8000 | |
606 | #define MPA_V2_RDMA_READ_RTR 0x4000 | |
607 | #define MPA_V2_IRD_ORD_MASK 0x3FFF | |
608 | ||
cfdda9d7 SW |
609 | #define c4iw_put_ep(ep) { \ |
610 | PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__, \ | |
611 | ep, atomic_read(&((ep)->kref.refcount))); \ | |
612 | WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \ | |
613 | kref_put(&((ep)->kref), _c4iw_free_ep); \ | |
614 | } | |
615 | ||
616 | #define c4iw_get_ep(ep) { \ | |
617 | PDBG("get_ep (via %s:%u) ep %p, refcnt %d\n", __func__, __LINE__, \ | |
618 | ep, atomic_read(&((ep)->kref.refcount))); \ | |
619 | kref_get(&((ep)->kref)); \ | |
620 | } | |
621 | void _c4iw_free_ep(struct kref *kref); | |
622 | ||
623 | struct mpa_message { | |
624 | u8 key[16]; | |
625 | u8 flags; | |
626 | u8 revision; | |
627 | __be16 private_data_size; | |
628 | u8 private_data[0]; | |
629 | }; | |
630 | ||
d2fe99e8 KS |
631 | struct mpa_v2_conn_params { |
632 | __be16 ird; | |
633 | __be16 ord; | |
634 | }; | |
635 | ||
cfdda9d7 SW |
636 | struct terminate_message { |
637 | u8 layer_etype; | |
638 | u8 ecode; | |
639 | __be16 hdrct_rsvd; | |
640 | u8 len_hdrs[0]; | |
641 | }; | |
642 | ||
643 | #define TERM_MAX_LENGTH (sizeof(struct terminate_message) + 2 + 18 + 28) | |
644 | ||
645 | enum c4iw_layers_types { | |
646 | LAYER_RDMAP = 0x00, | |
647 | LAYER_DDP = 0x10, | |
648 | LAYER_MPA = 0x20, | |
649 | RDMAP_LOCAL_CATA = 0x00, | |
650 | RDMAP_REMOTE_PROT = 0x01, | |
651 | RDMAP_REMOTE_OP = 0x02, | |
652 | DDP_LOCAL_CATA = 0x00, | |
653 | DDP_TAGGED_ERR = 0x01, | |
654 | DDP_UNTAGGED_ERR = 0x02, | |
655 | DDP_LLP = 0x03 | |
656 | }; | |
657 | ||
658 | enum c4iw_rdma_ecodes { | |
659 | RDMAP_INV_STAG = 0x00, | |
660 | RDMAP_BASE_BOUNDS = 0x01, | |
661 | RDMAP_ACC_VIOL = 0x02, | |
662 | RDMAP_STAG_NOT_ASSOC = 0x03, | |
663 | RDMAP_TO_WRAP = 0x04, | |
664 | RDMAP_INV_VERS = 0x05, | |
665 | RDMAP_INV_OPCODE = 0x06, | |
666 | RDMAP_STREAM_CATA = 0x07, | |
667 | RDMAP_GLOBAL_CATA = 0x08, | |
668 | RDMAP_CANT_INV_STAG = 0x09, | |
669 | RDMAP_UNSPECIFIED = 0xff | |
670 | }; | |
671 | ||
672 | enum c4iw_ddp_ecodes { | |
673 | DDPT_INV_STAG = 0x00, | |
674 | DDPT_BASE_BOUNDS = 0x01, | |
675 | DDPT_STAG_NOT_ASSOC = 0x02, | |
676 | DDPT_TO_WRAP = 0x03, | |
677 | DDPT_INV_VERS = 0x04, | |
678 | DDPU_INV_QN = 0x01, | |
679 | DDPU_INV_MSN_NOBUF = 0x02, | |
680 | DDPU_INV_MSN_RANGE = 0x03, | |
681 | DDPU_INV_MO = 0x04, | |
682 | DDPU_MSG_TOOBIG = 0x05, | |
683 | DDPU_INV_VERS = 0x06 | |
684 | }; | |
685 | ||
686 | enum c4iw_mpa_ecodes { | |
687 | MPA_CRC_ERR = 0x02, | |
d2fe99e8 KS |
688 | MPA_MARKER_ERR = 0x03, |
689 | MPA_LOCAL_CATA = 0x05, | |
690 | MPA_INSUFF_IRD = 0x06, | |
691 | MPA_NOMATCH_RTR = 0x07, | |
cfdda9d7 SW |
692 | }; |
693 | ||
694 | enum c4iw_ep_state { | |
695 | IDLE = 0, | |
696 | LISTEN, | |
697 | CONNECTING, | |
698 | MPA_REQ_WAIT, | |
699 | MPA_REQ_SENT, | |
700 | MPA_REQ_RCVD, | |
701 | MPA_REP_SENT, | |
702 | FPDU_MODE, | |
703 | ABORTING, | |
704 | CLOSING, | |
705 | MORIBUND, | |
706 | DEAD, | |
707 | }; | |
708 | ||
709 | enum c4iw_ep_flags { | |
710 | PEER_ABORT_IN_PROGRESS = 0, | |
711 | ABORT_REQ_IN_PROGRESS = 1, | |
712 | RELEASE_RESOURCES = 2, | |
713 | CLOSE_SENT = 3, | |
714 | }; | |
715 | ||
716 | struct c4iw_ep_common { | |
717 | struct iw_cm_id *cm_id; | |
718 | struct c4iw_qp *qp; | |
719 | struct c4iw_dev *dev; | |
720 | enum c4iw_ep_state state; | |
721 | struct kref kref; | |
2f5b48c3 | 722 | struct mutex mutex; |
cfdda9d7 SW |
723 | struct sockaddr_in local_addr; |
724 | struct sockaddr_in remote_addr; | |
aadc4df3 | 725 | struct c4iw_wr_wait wr_wait; |
cfdda9d7 SW |
726 | unsigned long flags; |
727 | }; | |
728 | ||
729 | struct c4iw_listen_ep { | |
730 | struct c4iw_ep_common com; | |
731 | unsigned int stid; | |
732 | int backlog; | |
733 | }; | |
734 | ||
735 | struct c4iw_ep { | |
736 | struct c4iw_ep_common com; | |
737 | struct c4iw_ep *parent_ep; | |
738 | struct timer_list timer; | |
be4c9bad | 739 | struct list_head entry; |
cfdda9d7 SW |
740 | unsigned int atid; |
741 | u32 hwtid; | |
742 | u32 snd_seq; | |
743 | u32 rcv_seq; | |
744 | struct l2t_entry *l2t; | |
745 | struct dst_entry *dst; | |
746 | struct sk_buff *mpa_skb; | |
747 | struct c4iw_mpa_attributes mpa_attr; | |
748 | u8 mpa_pkt[sizeof(struct mpa_message) + MPA_MAX_PRIVATE_DATA]; | |
749 | unsigned int mpa_pkt_len; | |
750 | u32 ird; | |
751 | u32 ord; | |
752 | u32 smac_idx; | |
753 | u32 tx_chan; | |
754 | u32 mtu; | |
755 | u16 mss; | |
756 | u16 emss; | |
757 | u16 plen; | |
758 | u16 rss_qid; | |
759 | u16 txq_idx; | |
d4f1a5c6 | 760 | u16 ctrlq_idx; |
cfdda9d7 | 761 | u8 tos; |
d2fe99e8 KS |
762 | u8 retry_with_mpa_v1; |
763 | u8 tried_with_mpa_v1; | |
cfdda9d7 SW |
764 | }; |
765 | ||
766 | static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) | |
767 | { | |
768 | return cm_id->provider_data; | |
769 | } | |
770 | ||
771 | static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) | |
772 | { | |
773 | return cm_id->provider_data; | |
774 | } | |
775 | ||
776 | static inline int compute_wscale(int win) | |
777 | { | |
778 | int wscale = 0; | |
779 | ||
780 | while (wscale < 14 && (65535<<wscale) < win) | |
781 | wscale++; | |
782 | return wscale; | |
783 | } | |
784 | ||
ec3eead2 VP |
785 | u32 c4iw_id_alloc(struct c4iw_id_table *alloc); |
786 | void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); | |
787 | int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, | |
788 | u32 reserved, u32 flags); | |
789 | void c4iw_id_table_free(struct c4iw_id_table *alloc); | |
790 | ||
cfdda9d7 SW |
791 | typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb); |
792 | ||
793 | int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | |
794 | struct l2t_entry *l2t); | |
795 | void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid, | |
796 | struct c4iw_dev_ucontext *uctx); | |
ec3eead2 VP |
797 | u32 c4iw_get_resource(struct c4iw_id_table *id_table); |
798 | void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry); | |
cfdda9d7 SW |
799 | int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid); |
800 | int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev); | |
801 | int c4iw_pblpool_create(struct c4iw_rdev *rdev); | |
802 | int c4iw_rqtpool_create(struct c4iw_rdev *rdev); | |
c6d7b267 | 803 | int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev); |
cfdda9d7 SW |
804 | void c4iw_pblpool_destroy(struct c4iw_rdev *rdev); |
805 | void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev); | |
c6d7b267 | 806 | void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev); |
cfdda9d7 SW |
807 | void c4iw_destroy_resource(struct c4iw_resource *rscp); |
808 | int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev); | |
809 | int c4iw_register_device(struct c4iw_dev *dev); | |
810 | void c4iw_unregister_device(struct c4iw_dev *dev); | |
811 | int __init c4iw_cm_init(void); | |
812 | void __exit c4iw_cm_term(void); | |
813 | void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, | |
814 | struct c4iw_dev_ucontext *uctx); | |
815 | void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, | |
816 | struct c4iw_dev_ucontext *uctx); | |
817 | int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | |
818 | int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |
819 | struct ib_send_wr **bad_wr); | |
820 | int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |
821 | struct ib_recv_wr **bad_wr); | |
822 | int c4iw_bind_mw(struct ib_qp *qp, struct ib_mw *mw, | |
823 | struct ib_mw_bind *mw_bind); | |
824 | int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); | |
825 | int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog); | |
826 | int c4iw_destroy_listen(struct iw_cm_id *cm_id); | |
827 | int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); | |
828 | int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); | |
829 | void c4iw_qp_add_ref(struct ib_qp *qp); | |
830 | void c4iw_qp_rem_ref(struct ib_qp *qp); | |
831 | void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *page_list); | |
832 | struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl( | |
833 | struct ib_device *device, | |
834 | int page_list_len); | |
835 | struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth); | |
836 | int c4iw_dealloc_mw(struct ib_mw *mw); | |
837 | struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd); | |
838 | struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, | |
839 | u64 length, u64 virt, int acc, | |
840 | struct ib_udata *udata); | |
841 | struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc); | |
842 | struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd, | |
843 | struct ib_phys_buf *buffer_list, | |
844 | int num_phys_buf, | |
845 | int acc, | |
846 | u64 *iova_start); | |
847 | int c4iw_reregister_phys_mem(struct ib_mr *mr, | |
848 | int mr_rereg_mask, | |
849 | struct ib_pd *pd, | |
850 | struct ib_phys_buf *buffer_list, | |
851 | int num_phys_buf, | |
852 | int acc, u64 *iova_start); | |
853 | int c4iw_dereg_mr(struct ib_mr *ib_mr); | |
854 | int c4iw_destroy_cq(struct ib_cq *ib_cq); | |
855 | struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |
856 | int vector, | |
857 | struct ib_ucontext *ib_context, | |
858 | struct ib_udata *udata); | |
859 | int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata); | |
860 | int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); | |
861 | int c4iw_destroy_qp(struct ib_qp *ib_qp); | |
862 | struct ib_qp *c4iw_create_qp(struct ib_pd *pd, | |
863 | struct ib_qp_init_attr *attrs, | |
864 | struct ib_udata *udata); | |
865 | int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |
866 | int attr_mask, struct ib_udata *udata); | |
67bbc055 VP |
867 | int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, |
868 | int attr_mask, struct ib_qp_init_attr *init_attr); | |
cfdda9d7 SW |
869 | struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn); |
870 | u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size); | |
871 | void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size); | |
872 | u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size); | |
873 | void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); | |
c6d7b267 SW |
874 | u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); |
875 | void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); | |
cfdda9d7 SW |
876 | int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb); |
877 | void c4iw_flush_hw_cq(struct t4_cq *cq); | |
878 | void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); | |
879 | void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count); | |
880 | int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); | |
881 | int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); | |
882 | int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count); | |
883 | int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid); | |
884 | u16 c4iw_rqes_posted(struct c4iw_qp *qhp); | |
cfdda9d7 SW |
885 | int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); |
886 | u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); | |
887 | void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, | |
888 | struct c4iw_dev_ucontext *uctx); | |
889 | u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx); | |
890 | void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, | |
891 | struct c4iw_dev_ucontext *uctx); | |
892 | void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe); | |
893 | ||
894 | extern struct cxgb4_client t4c_client; | |
895 | extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; | |
be4c9bad | 896 | extern int c4iw_max_read_depth; |
422eea0a VP |
897 | extern int db_fc_threshold; |
898 | ||
cfdda9d7 SW |
899 | |
900 | #endif |