Commit | Line | Data |
---|---|---|
fe2caefc PP |
1 | /******************************************************************* |
2 | * This file is part of the Emulex RoCE Device Driver for * | |
3 | * RoCE (RDMA over Converged Ethernet) adapters. * | |
4 | * Copyright (C) 2008-2012 Emulex. All rights reserved. * | |
5 | * EMULEX and SLI are trademarks of Emulex. * | |
6 | * www.emulex.com * | |
7 | * * | |
8 | * This program is free software; you can redistribute it and/or * | |
9 | * modify it under the terms of version 2 of the GNU General * | |
10 | * Public License as published by the Free Software Foundation. * | |
11 | * This program is distributed in the hope that it will be useful. * | |
12 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * | |
13 | * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * | |
14 | * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * | |
15 | * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * | |
16 | * TO BE LEGALLY INVALID. See the GNU General Public License for * | |
17 | * more details, a copy of which can be found in the file COPYING * | |
18 | * included with this package. * | |
19 | * | |
20 | * Contact Information: | |
21 | * linux-drivers@emulex.com | |
22 | * | |
23 | * Emulex | |
24 | * 3333 Susan Street | |
25 | * Costa Mesa, CA 92626 | |
26 | *******************************************************************/ | |
27 | ||
28 | #ifndef __OCRDMA_H__ | |
29 | #define __OCRDMA_H__ | |
30 | ||
31 | #include <linux/mutex.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/pci.h> | |
35 | ||
36 | #include <rdma/ib_verbs.h> | |
37 | #include <rdma/ib_user_verbs.h> | |
38 | ||
39 | #include <be_roce.h> | |
40 | #include "ocrdma_sli.h" | |
41 | ||
42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" | |
43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | |
44 | ||
fe2caefc PP |
45 | #define OCRDMA_MAX_AH 512 |
46 | ||
47 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | |
48 | ||
49 | struct ocrdma_dev_attr { | |
50 | u8 fw_ver[32]; | |
51 | u32 vendor_id; | |
52 | u32 device_id; | |
53 | u16 max_pd; | |
54 | u16 max_cq; | |
55 | u16 max_cqe; | |
56 | u16 max_qp; | |
57 | u16 max_wqe; | |
58 | u16 max_rqe; | |
7c33880c | 59 | u16 max_srq; |
fe2caefc PP |
60 | u32 max_inline_data; |
61 | int max_send_sge; | |
62 | int max_recv_sge; | |
634c5796 | 63 | int max_srq_sge; |
45e86b33 | 64 | int max_rdma_sge; |
fe2caefc PP |
65 | int max_mr; |
66 | u64 max_mr_size; | |
67 | u32 max_num_mr_pbl; | |
68 | int max_fmr; | |
69 | int max_map_per_fmr; | |
70 | int max_pages_per_frmr; | |
71 | u16 max_ord_per_qp; | |
72 | u16 max_ird_per_qp; | |
73 | ||
74 | int device_cap_flags; | |
75 | u8 cq_overflow_detect; | |
76 | u8 srq_supported; | |
77 | ||
78 | u32 wqe_size; | |
79 | u32 rqe_size; | |
80 | u32 ird_page_size; | |
81 | u8 local_ca_ack_delay; | |
82 | u8 ird; | |
83 | u8 num_ird_pages; | |
84 | }; | |
85 | ||
86 | struct ocrdma_pbl { | |
87 | void *va; | |
88 | dma_addr_t pa; | |
89 | }; | |
90 | ||
91 | struct ocrdma_queue_info { | |
92 | void *va; | |
93 | dma_addr_t dma; | |
94 | u32 size; | |
95 | u16 len; | |
96 | u16 entry_size; /* Size of an element in the queue */ | |
97 | u16 id; /* qid, where to ring the doorbell. */ | |
98 | u16 head, tail; | |
99 | bool created; | |
fe2caefc PP |
100 | }; |
101 | ||
102 | struct ocrdma_eq { | |
103 | struct ocrdma_queue_info q; | |
104 | u32 vector; | |
105 | int cq_cnt; | |
106 | struct ocrdma_dev *dev; | |
107 | char irq_name[32]; | |
108 | }; | |
109 | ||
110 | struct ocrdma_mq { | |
111 | struct ocrdma_queue_info sq; | |
112 | struct ocrdma_queue_info cq; | |
113 | bool rearm_cq; | |
114 | }; | |
115 | ||
116 | struct mqe_ctx { | |
117 | struct mutex lock; /* for serializing mailbox commands on MQ */ | |
118 | wait_queue_head_t cmd_wait; | |
119 | u32 tag; | |
120 | u16 cqe_status; | |
121 | u16 ext_status; | |
122 | bool cmd_done; | |
123 | }; | |
124 | ||
1852d1da NG |
125 | struct ocrdma_hw_mr { |
126 | u32 lkey; | |
127 | u8 fr_mr; | |
128 | u8 remote_atomic; | |
129 | u8 remote_rd; | |
130 | u8 remote_wr; | |
131 | u8 local_rd; | |
132 | u8 local_wr; | |
133 | u8 mw_bind; | |
134 | u8 rsvd; | |
135 | u64 len; | |
136 | struct ocrdma_pbl *pbl_table; | |
137 | u32 num_pbls; | |
138 | u32 num_pbes; | |
139 | u32 pbl_size; | |
140 | u32 pbe_size; | |
141 | u64 fbo; | |
142 | u64 va; | |
143 | }; | |
144 | ||
145 | struct ocrdma_mr { | |
146 | struct ib_mr ibmr; | |
147 | struct ib_umem *umem; | |
148 | struct ocrdma_hw_mr hwmr; | |
149 | }; | |
150 | ||
fe2caefc PP |
151 | struct ocrdma_dev { |
152 | struct ib_device ibdev; | |
153 | struct ocrdma_dev_attr attr; | |
154 | ||
155 | struct mutex dev_lock; /* provides syncronise access to device data */ | |
156 | spinlock_t flush_q_lock ____cacheline_aligned; | |
157 | ||
158 | struct ocrdma_cq **cq_tbl; | |
159 | struct ocrdma_qp **qp_tbl; | |
160 | ||
c88bd03f | 161 | struct ocrdma_eq *eq_tbl; |
fe2caefc PP |
162 | int eq_cnt; |
163 | u16 base_eqid; | |
164 | u16 max_eq; | |
165 | ||
166 | union ib_gid *sgid_tbl; | |
167 | /* provided synchronization to sgid table for | |
168 | * updating gid entries triggered by notifier. | |
169 | */ | |
170 | spinlock_t sgid_lock; | |
171 | ||
172 | int gsi_qp_created; | |
173 | struct ocrdma_cq *gsi_sqcq; | |
174 | struct ocrdma_cq *gsi_rqcq; | |
175 | ||
176 | struct { | |
177 | struct ocrdma_av *va; | |
178 | dma_addr_t pa; | |
179 | u32 size; | |
180 | u32 num_ah; | |
181 | /* provide synchronization for av | |
182 | * entry allocations. | |
183 | */ | |
184 | spinlock_t lock; | |
185 | u32 ahid; | |
186 | struct ocrdma_pbl pbl; | |
187 | } av_tbl; | |
188 | ||
189 | void *mbx_cmd; | |
190 | struct ocrdma_mq mq; | |
191 | struct mqe_ctx mqe_ctx; | |
192 | ||
193 | struct be_dev_info nic_info; | |
194 | ||
195 | struct list_head entry; | |
3e4d60a8 | 196 | struct rcu_head rcu; |
fe2caefc | 197 | int id; |
1852d1da | 198 | struct ocrdma_mr *stag_arr[OCRDMA_MAX_STAG]; |
84b105db | 199 | u16 pvid; |
21c3391a | 200 | u32 asic_id; |
fe2caefc PP |
201 | }; |
202 | ||
203 | struct ocrdma_cq { | |
204 | struct ib_cq ibcq; | |
fe2caefc PP |
205 | struct ocrdma_cqe *va; |
206 | u32 phase; | |
207 | u32 getp; /* pointer to pending wrs to | |
208 | * return to stack, wrap arounds | |
209 | * at max_hw_cqe | |
210 | */ | |
211 | u32 max_hw_cqe; | |
212 | bool phase_change; | |
ea617626 DS |
213 | bool deferred_arm, deferred_sol; |
214 | bool first_arm; | |
fe2caefc PP |
215 | |
216 | spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization | |
217 | * to cq polling | |
218 | */ | |
219 | /* syncronizes cq completion handler invoked from multiple context */ | |
220 | spinlock_t comp_handler_lock ____cacheline_aligned; | |
221 | u16 id; | |
222 | u16 eqn; | |
223 | ||
224 | struct ocrdma_ucontext *ucontext; | |
225 | dma_addr_t pa; | |
226 | u32 len; | |
ea617626 | 227 | u32 cqe_cnt; |
fe2caefc PP |
228 | |
229 | /* head of all qp's sq and rq for which cqes need to be flushed | |
230 | * by the software. | |
231 | */ | |
232 | struct list_head sq_head, rq_head; | |
233 | }; | |
234 | ||
235 | struct ocrdma_pd { | |
236 | struct ib_pd ibpd; | |
237 | struct ocrdma_dev *dev; | |
238 | struct ocrdma_ucontext *uctx; | |
fe2caefc PP |
239 | u32 id; |
240 | int num_dpp_qp; | |
241 | u32 dpp_page; | |
242 | bool dpp_enabled; | |
243 | }; | |
244 | ||
245 | struct ocrdma_ah { | |
246 | struct ib_ah ibah; | |
fe2caefc PP |
247 | struct ocrdma_av *av; |
248 | u16 sgid_index; | |
249 | u32 id; | |
250 | }; | |
251 | ||
252 | struct ocrdma_qp_hwq_info { | |
253 | u8 *va; /* virtual address */ | |
254 | u32 max_sges; | |
255 | u32 head, tail; | |
256 | u32 entry_size; | |
257 | u32 max_cnt; | |
258 | u32 max_wqe_idx; | |
fe2caefc PP |
259 | u16 dbid; /* qid, where to ring the doorbell. */ |
260 | u32 len; | |
261 | dma_addr_t pa; | |
262 | }; | |
263 | ||
264 | struct ocrdma_srq { | |
265 | struct ib_srq ibsrq; | |
fe2caefc | 266 | u8 __iomem *db; |
9884bcdc NG |
267 | struct ocrdma_qp_hwq_info rq; |
268 | u64 *rqe_wr_id_tbl; | |
269 | u32 *idx_bit_fields; | |
270 | u32 bit_fields_len; | |
271 | ||
fe2caefc PP |
272 | /* provide synchronization to multiple context(s) posting rqe */ |
273 | spinlock_t q_lock ____cacheline_aligned; | |
274 | ||
fe2caefc | 275 | struct ocrdma_pd *pd; |
fe2caefc | 276 | u32 id; |
fe2caefc PP |
277 | }; |
278 | ||
279 | struct ocrdma_qp { | |
280 | struct ib_qp ibqp; | |
281 | struct ocrdma_dev *dev; | |
282 | ||
283 | u8 __iomem *sq_db; | |
fe2caefc PP |
284 | struct ocrdma_qp_hwq_info sq; |
285 | struct { | |
286 | uint64_t wrid; | |
287 | uint16_t dpp_wqe_idx; | |
288 | uint16_t dpp_wqe; | |
289 | uint8_t signaled; | |
290 | uint8_t rsvd[3]; | |
291 | } *wqe_wr_id_tbl; | |
292 | u32 max_inline_data; | |
9884bcdc NG |
293 | |
294 | /* provide synchronization to multiple context(s) posting wqe, rqe */ | |
295 | spinlock_t q_lock ____cacheline_aligned; | |
fe2caefc PP |
296 | struct ocrdma_cq *sq_cq; |
297 | /* list maintained per CQ to flush SQ errors */ | |
298 | struct list_head sq_entry; | |
299 | ||
300 | u8 __iomem *rq_db; | |
301 | struct ocrdma_qp_hwq_info rq; | |
302 | u64 *rqe_wr_id_tbl; | |
303 | struct ocrdma_cq *rq_cq; | |
304 | struct ocrdma_srq *srq; | |
305 | /* list maintained per CQ to flush RQ errors */ | |
306 | struct list_head rq_entry; | |
307 | ||
308 | enum ocrdma_qp_state state; /* QP state */ | |
309 | int cap_flags; | |
310 | u32 max_ord, max_ird; | |
311 | ||
312 | u32 id; | |
313 | struct ocrdma_pd *pd; | |
314 | ||
315 | enum ib_qp_type qp_type; | |
316 | ||
317 | int sgid_idx; | |
318 | u32 qkey; | |
319 | bool dpp_enabled; | |
320 | u8 *ird_q_va; | |
2b51a9b9 | 321 | bool signaled; |
45e86b33 | 322 | u16 db_cache; |
fe2caefc PP |
323 | }; |
324 | ||
fe2caefc PP |
325 | |
326 | struct ocrdma_ucontext { | |
327 | struct ib_ucontext ibucontext; | |
fe2caefc PP |
328 | |
329 | struct list_head mm_head; | |
330 | struct mutex mm_list_lock; /* protects list entries of mm type */ | |
cffce990 NG |
331 | struct ocrdma_pd *cntxt_pd; |
332 | int pd_in_use; | |
333 | ||
fe2caefc PP |
334 | struct { |
335 | u32 *va; | |
336 | dma_addr_t pa; | |
337 | u32 len; | |
338 | } ah_tbl; | |
339 | }; | |
340 | ||
341 | struct ocrdma_mm { | |
342 | struct { | |
343 | u64 phy_addr; | |
344 | unsigned long len; | |
345 | } key; | |
346 | struct list_head entry; | |
347 | }; | |
348 | ||
349 | static inline struct ocrdma_dev *get_ocrdma_dev(struct ib_device *ibdev) | |
350 | { | |
351 | return container_of(ibdev, struct ocrdma_dev, ibdev); | |
352 | } | |
353 | ||
354 | static inline struct ocrdma_ucontext *get_ocrdma_ucontext(struct ib_ucontext | |
355 | *ibucontext) | |
356 | { | |
357 | return container_of(ibucontext, struct ocrdma_ucontext, ibucontext); | |
358 | } | |
359 | ||
360 | static inline struct ocrdma_pd *get_ocrdma_pd(struct ib_pd *ibpd) | |
361 | { | |
362 | return container_of(ibpd, struct ocrdma_pd, ibpd); | |
363 | } | |
364 | ||
365 | static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) | |
366 | { | |
367 | return container_of(ibcq, struct ocrdma_cq, ibcq); | |
368 | } | |
369 | ||
370 | static inline struct ocrdma_qp *get_ocrdma_qp(struct ib_qp *ibqp) | |
371 | { | |
372 | return container_of(ibqp, struct ocrdma_qp, ibqp); | |
373 | } | |
374 | ||
375 | static inline struct ocrdma_mr *get_ocrdma_mr(struct ib_mr *ibmr) | |
376 | { | |
377 | return container_of(ibmr, struct ocrdma_mr, ibmr); | |
378 | } | |
379 | ||
380 | static inline struct ocrdma_ah *get_ocrdma_ah(struct ib_ah *ibah) | |
381 | { | |
382 | return container_of(ibah, struct ocrdma_ah, ibah); | |
383 | } | |
384 | ||
385 | static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq) | |
386 | { | |
387 | return container_of(ibsrq, struct ocrdma_srq, ibsrq); | |
388 | } | |
389 | ||
df176ea0 NG |
390 | static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe) |
391 | { | |
392 | int cqe_valid; | |
393 | cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID; | |
f99b1649 | 394 | return (cqe_valid == cq->phase); |
df176ea0 NG |
395 | } |
396 | ||
397 | static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe) | |
398 | { | |
399 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
400 | OCRDMA_CQE_QTYPE) ? 0 : 1; | |
401 | } | |
402 | ||
403 | static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe) | |
404 | { | |
405 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
406 | OCRDMA_CQE_INVALIDATE) ? 1 : 0; | |
407 | } | |
408 | ||
409 | static inline int is_cqe_imm(struct ocrdma_cqe *cqe) | |
410 | { | |
411 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
412 | OCRDMA_CQE_IMM) ? 1 : 0; | |
413 | } | |
414 | ||
415 | static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe) | |
416 | { | |
417 | return (le32_to_cpu(cqe->flags_status_srcqpn) & | |
418 | OCRDMA_CQE_WRITE_IMM) ? 1 : 0; | |
419 | } | |
420 | ||
40aca6ff MS |
421 | static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev, |
422 | struct ib_ah_attr *ah_attr, u8 *mac_addr) | |
423 | { | |
424 | struct in6_addr in6; | |
425 | ||
426 | memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); | |
427 | if (rdma_is_multicast_addr(&in6)) | |
428 | rdma_get_mcast_mac(&in6, mac_addr); | |
429 | else | |
430 | memcpy(mac_addr, ah_attr->dmac, ETH_ALEN); | |
431 | return 0; | |
432 | } | |
df176ea0 | 433 | |
ea617626 DS |
434 | static inline int ocrdma_get_eq_table_index(struct ocrdma_dev *dev, |
435 | int eqid) | |
436 | { | |
437 | int indx; | |
438 | ||
439 | for (indx = 0; indx < dev->eq_cnt; indx++) { | |
440 | if (dev->eq_tbl[indx].q.id == eqid) | |
441 | return indx; | |
442 | } | |
443 | ||
444 | return -EINVAL; | |
445 | } | |
446 | ||
21c3391a DS |
447 | static inline u8 ocrdma_get_asic_type(struct ocrdma_dev *dev) |
448 | { | |
449 | if (dev->nic_info.dev_family == 0xF && !dev->asic_id) { | |
450 | pci_read_config_dword( | |
451 | dev->nic_info.pdev, | |
452 | OCRDMA_SLI_ASIC_ID_OFFSET, &dev->asic_id); | |
453 | } | |
454 | ||
455 | return (dev->asic_id & OCRDMA_SLI_ASIC_GEN_NUM_MASK) >> | |
456 | OCRDMA_SLI_ASIC_GEN_NUM_SHIFT; | |
457 | } | |
458 | ||
fe2caefc | 459 | #endif |