Commit | Line | Data |
---|---|---|
fe2caefc PP |
1 | /******************************************************************* |
2 | * This file is part of the Emulex RoCE Device Driver for * | |
3 | * RoCE (RDMA over Converged Ethernet) adapters. * | |
4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | |
5 | * EMULEX and SLI are trademarks of Emulex. * | |
6 | * www.emulex.com * | |
7 | * * | |
8 | * This program is free software; you can redistribute it and/or * | |
9 | * modify it under the terms of version 2 of the GNU General * | |
10 | * Public License as published by the Free Software Foundation. * | |
11 | * This program is distributed in the hope that it will be useful. * | |
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | |
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | |
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | |
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | |
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | |
17 | * more details, a copy of which can be found in the file COPYING * | |
18 | * included with this package. * | |
19 | * | |
20 | * Contact Information: | |
21 | * linux-drivers@emulex.com | |
22 | * | |
23 | * Emulex | |
24 | * 3333 Susan Street | |
25 | * Costa Mesa, CA 92626 | |
26 | *******************************************************************/ | |
27 | ||
28 | #ifndef __OCRDMA_H__ | |
29 | #define __OCRDMA_H__ | |
30 | ||
31 | #include <linux/mutex.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/pci.h> | |
35 | ||
36 | #include <rdma/ib_verbs.h> | |
37 | #include <rdma/ib_user_verbs.h> | |
38 | ||
39 | #include <be_roce.h> | |
40 | #include "ocrdma_sli.h" | |
41 | ||
42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" | |
43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | |
44 | ||
fe2caefc PP |
45 | #define OCRDMA_MAX_AH 512 |
46 | ||
47 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | |
48 | ||
49 | struct ocrdma_dev_attr { | |
50 | u8 fw_ver[32]; | |
51 | u32 vendor_id; | |
52 | u32 device_id; | |
53 | u16 max_pd; | |
54 | u16 max_cq; | |
55 | u16 max_cqe; | |
56 | u16 max_qp; | |
57 | u16 max_wqe; | |
58 | u16 max_rqe; | |
7c33880c | 59 | u16 max_srq; |
fe2caefc PP |
60 | u32 max_inline_data; |
61 | int max_send_sge; | |
62 | int max_recv_sge; | |
634c5796 | 63 | int max_srq_sge; |
45e86b33 | 64 | int max_rdma_sge; |
fe2caefc PP |
65 | int max_mr; |
66 | u64 max_mr_size; | |
67 | u32 max_num_mr_pbl; | |
68 | int max_fmr; | |
69 | int max_map_per_fmr; | |
70 | int max_pages_per_frmr; | |
71 | u16 max_ord_per_qp; | |
72 | u16 max_ird_per_qp; | |
73 | ||
74 | int device_cap_flags; | |
75 | u8 cq_overflow_detect; | |
76 | u8 srq_supported; | |
77 | ||
78 | u32 wqe_size; | |
79 | u32 rqe_size; | |
80 | u32 ird_page_size; | |
81 | u8 local_ca_ack_delay; | |
82 | u8 ird; | |
83 | u8 num_ird_pages; | |
84 | }; | |
85 | ||
86 | struct ocrdma_pbl { | |
87 | void *va; | |
88 | dma_addr_t pa; | |
89 | }; | |
90 | ||
91 | struct ocrdma_queue_info { | |
92 | void *va; | |
93 | dma_addr_t dma; | |
94 | u32 size; | |
95 | u16 len; | |
96 | u16 entry_size; /* Size of an element in the queue */ | |
97 | u16 id; /* qid, where to ring the doorbell. */ | |
98 | u16 head, tail; | |
99 | bool created; | |
fe2caefc PP |
100 | }; |
101 | ||
102 | struct ocrdma_eq { | |
103 | struct ocrdma_queue_info q; | |
104 | u32 vector; | |
105 | int cq_cnt; | |
106 | struct ocrdma_dev *dev; | |
107 | char irq_name[32]; | |
108 | }; | |
109 | ||
110 | struct ocrdma_mq { | |
111 | struct ocrdma_queue_info sq; | |
112 | struct ocrdma_queue_info cq; | |
113 | bool rearm_cq; | |
114 | }; | |
115 | ||
116 | struct mqe_ctx { | |
117 | struct mutex lock; /* for serializing mailbox commands on MQ */ | |
118 | wait_queue_head_t cmd_wait; | |
119 | u32 tag; | |
120 | u16 cqe_status; | |
121 | u16 ext_status; | |
122 | bool cmd_done; | |
123 | }; | |
124 | ||
125 | struct ocrdma_dev { | |
126 | struct ib_device ibdev; | |
127 | struct ocrdma_dev_attr attr; | |
128 | ||
129 | struct mutex dev_lock; /* provides syncronise access to device data */ | |
130 | spinlock_t flush_q_lock ____cacheline_aligned; | |
131 | ||
132 | struct ocrdma_cq **cq_tbl; | |
133 | struct ocrdma_qp **qp_tbl; | |
134 | ||
135 | struct ocrdma_eq meq; | |
136 | struct ocrdma_eq *qp_eq_tbl; | |
137 | int eq_cnt; | |
138 | u16 base_eqid; | |
139 | u16 max_eq; | |
140 | ||
141 | union ib_gid *sgid_tbl; | |
142 | /* provided synchronization to sgid table for | |
143 | * updating gid entries triggered by notifier. | |
144 | */ | |
145 | spinlock_t sgid_lock; | |
146 | ||
147 | int gsi_qp_created; | |
148 | struct ocrdma_cq *gsi_sqcq; | |
149 | struct ocrdma_cq *gsi_rqcq; | |
150 | ||
151 | struct { | |
152 | struct ocrdma_av *va; | |
153 | dma_addr_t pa; | |
154 | u32 size; | |
155 | u32 num_ah; | |
156 | /* provide synchronization for av | |
157 | * entry allocations. | |
158 | */ | |
159 | spinlock_t lock; | |
160 | u32 ahid; | |
161 | struct ocrdma_pbl pbl; | |
162 | } av_tbl; | |
163 | ||
164 | void *mbx_cmd; | |
165 | struct ocrdma_mq mq; | |
166 | struct mqe_ctx mqe_ctx; | |
167 | ||
168 | struct be_dev_info nic_info; | |
169 | ||
170 | struct list_head entry; | |
3e4d60a8 | 171 | struct rcu_head rcu; |
fe2caefc | 172 | int id; |
7c33880c | 173 | u64 stag_arr[OCRDMA_MAX_STAG]; |
fe2caefc PP |
174 | }; |
175 | ||
176 | struct ocrdma_cq { | |
177 | struct ib_cq ibcq; | |
fe2caefc PP |
178 | struct ocrdma_cqe *va; |
179 | u32 phase; | |
180 | u32 getp; /* pointer to pending wrs to | |
181 | * return to stack, wrap arounds | |
182 | * at max_hw_cqe | |
183 | */ | |
184 | u32 max_hw_cqe; | |
185 | bool phase_change; | |
186 | bool armed, solicited; | |
187 | bool arm_needed; | |
188 | ||
189 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization | |
190 | * to cq polling | |
191 | */ | |
192 | /* syncronizes cq completion handler invoked from multiple context */ | |
193 | spinlock_t comp_handler_lock ____cacheline_aligned; | |
194 | u16 id; | |
195 | u16 eqn; | |
196 | ||
197 | struct ocrdma_ucontext *ucontext; | |
198 | dma_addr_t pa; | |
199 | u32 len; | |
fe2caefc PP |
200 | |
201 | /* head of all qp's sq and rq for which cqes need to be flushed | |
202 | * by the software. | |
203 | */ | |
204 | struct list_head sq_head, rq_head; | |
205 | }; | |
206 | ||
207 | struct ocrdma_pd { | |
208 | struct ib_pd ibpd; | |
209 | struct ocrdma_dev *dev; | |
210 | struct ocrdma_ucontext *uctx; | |
fe2caefc PP |
211 | u32 id; |
212 | int num_dpp_qp; | |
213 | u32 dpp_page; | |
214 | bool dpp_enabled; | |
215 | }; | |
216 | ||
217 | struct ocrdma_ah { | |
218 | struct ib_ah ibah; | |
fe2caefc PP |
219 | struct ocrdma_av *av; |
220 | u16 sgid_index; | |
221 | u32 id; | |
222 | }; | |
223 | ||
224 | struct ocrdma_qp_hwq_info { | |
225 | u8 *va; /* virtual address */ | |
226 | u32 max_sges; | |
227 | u32 head, tail; | |
228 | u32 entry_size; | |
229 | u32 max_cnt; | |
230 | u32 max_wqe_idx; | |
fe2caefc PP |
231 | u16 dbid; /* qid, where to ring the doorbell. */ |
232 | u32 len; | |
233 | dma_addr_t pa; | |
234 | }; | |
235 | ||
236 | struct ocrdma_srq { | |
237 | struct ib_srq ibsrq; | |
fe2caefc | 238 | u8 __iomem *db; |
9884bcdc NG |
239 | struct ocrdma_qp_hwq_info rq; |
240 | u64 *rqe_wr_id_tbl; | |
241 | u32 *idx_bit_fields; | |
242 | u32 bit_fields_len; | |
243 | ||
fe2caefc PP |
244 | /* provide synchronization to multiple context(s) posting rqe */ |
245 | spinlock_t q_lock ____cacheline_aligned; | |
246 | ||
fe2caefc | 247 | struct ocrdma_pd *pd; |
fe2caefc | 248 | u32 id; |
fe2caefc PP |
249 | }; |
250 | ||
251 | struct ocrdma_qp { | |
252 | struct ib_qp ibqp; | |
253 | struct ocrdma_dev *dev; | |
254 | ||
255 | u8 __iomem *sq_db; | |
fe2caefc PP |
256 | struct ocrdma_qp_hwq_info sq; |
257 | struct { | |
258 | uint64_t wrid; | |
259 | uint16_t dpp_wqe_idx; | |
260 | uint16_t dpp_wqe; | |
261 | uint8_t signaled; | |
262 | uint8_t rsvd[3]; | |
263 | } *wqe_wr_id_tbl; | |
264 | u32 max_inline_data; | |
9884bcdc NG |
265 | |
266 | /* provide synchronization to multiple context(s) posting wqe, rqe */ | |
267 | spinlock_t q_lock ____cacheline_aligned; | |
fe2caefc PP |
268 | struct ocrdma_cq *sq_cq; |
269 | /* list maintained per CQ to flush SQ errors */ | |
270 | struct list_head sq_entry; | |
271 | ||
272 | u8 __iomem *rq_db; | |
273 | struct ocrdma_qp_hwq_info rq; | |
274 | u64 *rqe_wr_id_tbl; | |
275 | struct ocrdma_cq *rq_cq; | |
276 | struct ocrdma_srq *srq; | |
277 | /* list maintained per CQ to flush RQ errors */ | |
278 | struct list_head rq_entry; | |
279 | ||
280 | enum ocrdma_qp_state state; /* QP state */ | |
281 | int cap_flags; | |
282 | u32 max_ord, max_ird; | |
283 | ||
284 | u32 id; | |
285 | struct ocrdma_pd *pd; | |
286 | ||
287 | enum ib_qp_type qp_type; | |
288 | ||
289 | int sgid_idx; | |
290 | u32 qkey; | |
291 | bool dpp_enabled; | |
292 | u8 *ird_q_va; | |
45e86b33 | 293 | u16 db_cache; |
fe2caefc PP |
294 | }; |
295 | ||
fe2caefc | 296 | struct ocrdma_hw_mr { |
fe2caefc PP |
297 | u32 lkey; |
298 | u8 fr_mr; | |
299 | u8 remote_atomic; | |
300 | u8 remote_rd; | |
301 | u8 remote_wr; | |
302 | u8 local_rd; | |
303 | u8 local_wr; | |
304 | u8 mw_bind; | |
305 | u8 rsvd; | |
306 | u64 len; | |
307 | struct ocrdma_pbl *pbl_table; | |
308 | u32 num_pbls; | |
309 | u32 num_pbes; | |
310 | u32 pbl_size; | |
311 | u32 pbe_size; | |
312 | u64 fbo; | |
313 | u64 va; | |
314 | }; | |
315 | ||
316 | struct ocrdma_mr { | |
317 | struct ib_mr ibmr; | |
318 | struct ib_umem *umem; | |
319 | struct ocrdma_hw_mr hwmr; | |
fe2caefc PP |
320 | }; |
321 | ||
322 | struct ocrdma_ucontext { | |
323 | struct ib_ucontext ibucontext; | |
fe2caefc PP |
324 | |
325 | struct list_head mm_head; | |
326 | struct mutex mm_list_lock; /* protects list entries of mm type */ | |
327 | struct { | |
328 | u32 *va; | |
329 | dma_addr_t pa; | |
330 | u32 len; | |
331 | } ah_tbl; | |
332 | }; | |
333 | ||
334 | struct ocrdma_mm { | |
335 | struct { | |
336 | u64 phy_addr; | |
337 | unsigned long len; | |
338 | } key; | |
339 | struct list_head entry; | |
340 | }; | |
341 | ||
342 | static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) | |
343 | { | |
344 | return container_of(ibdev, struct ocrdma_dev, ibdev); | |
345 | } | |
346 | ||
347 | static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext | |
348 | *ibucontext) | |
349 | { | |
350 | return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); | |
351 | } | |
352 | ||
353 | static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) | |
354 | { | |
355 | return container_of(ibpd, struct ocrdma_pd, ibpd); | |
356 | } | |
357 | ||
358 | static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) | |
359 | { | |
360 | return container_of(ibcq, struct ocrdma_cq, ibcq); | |
361 | } | |
362 | ||
363 | static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) | |
364 | { | |
365 | return container_of(ibqp, struct ocrdma_qp, ibqp); | |
366 | } | |
367 | ||
368 | static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) | |
369 | { | |
370 | return container_of(ibmr, struct ocrdma_mr, ibmr); | |
371 | } | |
372 | ||
373 | static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) | |
374 | { | |
375 | return container_of(ibah, struct ocrdma_ah, ibah); | |
376 | } | |
377 | ||
378 | static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) | |
379 | { | |
380 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); | |
381 | } | |
382 | ||
df176ea0 NG |
383 | |
384 | static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp) | |
385 | { | |
386 | return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY && | |
43a6b402 | 387 | qp->id < 128) ? 24 : 16); |
df176ea0 NG |
388 | } |
389 | ||
390 | static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) | |
391 | { | |
392 | int cqe_valid; | |
393 | cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; | |
f99b1649 | 394 | return (cqe_valid == cq->phase); |
df176ea0 NG |
395 | } |
396 | ||
397 | static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) | |
398 | { | |
399 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
400 | OCRDMA_CQE_QTYPE) ? 0 : 1; | |
401 | } | |
402 | ||
403 | static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) | |
404 | { | |
405 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
406 | OCRDMA_CQE_INVALIDATE) ? 1 : 0; | |
407 | } | |
408 | ||
409 | static inline int is_cqe_imm(struct ocrdma_cqe *cqe) | |
410 | { | |
411 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
412 | OCRDMA_CQE_IMM) ? 1 : 0; | |
413 | } | |
414 | ||
415 | static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) | |
416 | { | |
417 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
418 | OCRDMA_CQE_WRITE_IMM) ? 1 : 0; | |
419 | } | |
420 | ||
421 | ||
fe2caefc | 422 | #endif |