2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include "hns_roce_common.h"
35 #include "hns_roce_device.h"
36 #include "hns_roce_eq.h"
38 static void eq_set_cons_index(struct hns_roce_eq
*eq
, int req_not
)
40 roce_raw_write((eq
->cons_index
& CONS_INDEX_MASK
) |
41 (req_not
<< eq
->log_entries
), eq
->doorbell
);
46 static struct hns_roce_aeqe
*get_aeqe(struct hns_roce_eq
*eq
, u32 entry
)
48 unsigned long off
= (entry
& (eq
->entries
- 1)) *
49 HNS_ROCE_AEQ_ENTRY_SIZE
;
51 return (struct hns_roce_aeqe
*)((u8
*)
52 (eq
->buf_list
[off
/ HNS_ROCE_BA_SIZE
].buf
) +
53 off
% HNS_ROCE_BA_SIZE
);
56 static struct hns_roce_aeqe
*next_aeqe_sw(struct hns_roce_eq
*eq
)
58 struct hns_roce_aeqe
*aeqe
= get_aeqe(eq
, eq
->cons_index
);
60 return (roce_get_bit(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_OWNER_S
) ^
61 !!(eq
->cons_index
& eq
->entries
)) ? aeqe
: NULL
;
64 static void hns_roce_wq_catas_err_handle(struct hns_roce_dev
*hr_dev
,
65 struct hns_roce_aeqe
*aeqe
, int qpn
)
67 struct device
*dev
= &hr_dev
->pdev
->dev
;
69 qpn
= roce_get_field(aeqe
->event
.qp_event
.qp
,
70 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M
,
71 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S
);
72 dev_warn(dev
, "Local Work Queue Catastrophic Error.\n");
73 switch (roce_get_field(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M
,
74 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S
)) {
75 case HNS_ROCE_LWQCE_QPC_ERROR
:
76 dev_warn(dev
, "QP %d, QPC error.\n", qpn
);
78 case HNS_ROCE_LWQCE_MTU_ERROR
:
79 dev_warn(dev
, "QP %d, MTU error.\n", qpn
);
81 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR
:
82 dev_warn(dev
, "QP %d, WQE BA addr error.\n", qpn
);
84 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR
:
85 dev_warn(dev
, "QP %d, WQE addr error.\n", qpn
);
87 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR
:
88 dev_warn(dev
, "QP %d, WQE shift error\n", qpn
);
90 case HNS_ROCE_LWQCE_SL_ERROR
:
91 dev_warn(dev
, "QP %d, SL error.\n", qpn
);
93 case HNS_ROCE_LWQCE_PORT_ERROR
:
94 dev_warn(dev
, "QP %d, port error.\n", qpn
);
100 hns_roce_qp_event(hr_dev
, roce_get_field(aeqe
->event
.qp_event
.qp
,
101 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M
,
102 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S
),
103 roce_get_field(aeqe
->asyn
,
104 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
105 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
108 static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev
*hr_dev
,
109 struct hns_roce_aeqe
*aeqe
,
112 struct device
*dev
= &hr_dev
->pdev
->dev
;
114 qpn
= roce_get_field(aeqe
->event
.qp_event
.qp
,
115 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M
,
116 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S
);
117 dev_warn(dev
, "Local Access Violation Work Queue Error.\n");
118 switch (roce_get_field(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M
,
119 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S
)) {
120 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION
:
121 dev_warn(dev
, "QP %d, R_key violation.\n", qpn
);
123 case HNS_ROCE_LAVWQE_LENGTH_ERROR
:
124 dev_warn(dev
, "QP %d, length error.\n", qpn
);
126 case HNS_ROCE_LAVWQE_VA_ERROR
:
127 dev_warn(dev
, "QP %d, VA error.\n", qpn
);
129 case HNS_ROCE_LAVWQE_PD_ERROR
:
130 dev_err(dev
, "QP %d, PD error.\n", qpn
);
132 case HNS_ROCE_LAVWQE_RW_ACC_ERROR
:
133 dev_warn(dev
, "QP %d, rw acc error.\n", qpn
);
135 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR
:
136 dev_warn(dev
, "QP %d, key state error.\n", qpn
);
138 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR
:
139 dev_warn(dev
, "QP %d, MR operation error.\n", qpn
);
145 hns_roce_qp_event(hr_dev
, roce_get_field(aeqe
->event
.qp_event
.qp
,
146 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M
,
147 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S
),
148 roce_get_field(aeqe
->asyn
,
149 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
150 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
153 static void hns_roce_db_overflow_handle(struct hns_roce_dev
*hr_dev
,
154 struct hns_roce_aeqe
*aeqe
)
156 struct device
*dev
= &hr_dev
->pdev
->dev
;
158 switch (roce_get_field(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M
,
159 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S
)) {
160 case HNS_ROCE_DB_SUBTYPE_SDB_OVF
:
161 dev_warn(dev
, "SDB overflow.\n");
163 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF
:
164 dev_warn(dev
, "SDB almost overflow.\n");
166 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP
:
167 dev_warn(dev
, "SDB almost empty.\n");
169 case HNS_ROCE_DB_SUBTYPE_ODB_OVF
:
170 dev_warn(dev
, "ODB overflow.\n");
172 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF
:
173 dev_warn(dev
, "ODB almost overflow.\n");
175 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP
:
176 dev_warn(dev
, "SDB almost empty.\n");
183 static int hns_roce_aeq_int(struct hns_roce_dev
*hr_dev
, struct hns_roce_eq
*eq
)
185 struct device
*dev
= &hr_dev
->pdev
->dev
;
186 struct hns_roce_aeqe
*aeqe
;
190 while ((aeqe
= next_aeqe_sw(eq
))) {
191 dev_dbg(dev
, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe
,
192 roce_get_field(aeqe
->asyn
,
193 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
194 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
198 switch (roce_get_field(aeqe
->asyn
,
199 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
200 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
)) {
201 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
202 dev_warn(dev
, "PATH MIG not supported\n");
204 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
205 dev_warn(dev
, "COMMUNICATION established\n");
207 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
208 dev_warn(dev
, "SQ DRAINED not supported\n");
210 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
211 dev_warn(dev
, "PATH MIG failed\n");
213 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
214 dev_warn(dev
, "qpn = 0x%lx\n",
215 roce_get_field(aeqe
->event
.qp_event
.qp
,
216 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M
,
217 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S
));
218 hns_roce_qp_event(hr_dev
,
219 roce_get_field(aeqe
->event
.qp_event
.qp
,
220 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M
,
221 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S
),
222 roce_get_field(aeqe
->asyn
,
223 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
224 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
226 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
227 hns_roce_wq_catas_err_handle(hr_dev
, aeqe
, qpn
);
229 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
230 hns_roce_local_wq_access_err_handle(hr_dev
, aeqe
, qpn
);
232 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
:
233 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
:
234 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
235 dev_warn(dev
, "SRQ not support!\n");
237 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
:
238 dev_warn(dev
, "CQ 0x%lx access err.\n",
239 roce_get_field(aeqe
->event
.cq_event
.cq
,
240 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M
,
241 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S
));
242 hns_roce_cq_event(hr_dev
,
243 le32_to_cpu(roce_get_field(aeqe
->event
.cq_event
.cq
,
244 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M
,
245 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S
)),
246 roce_get_field(aeqe
->asyn
,
247 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
248 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
250 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
:
251 dev_warn(dev
, "CQ 0x%lx overflow\n",
252 roce_get_field(aeqe
->event
.cq_event
.cq
,
253 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M
,
254 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S
));
255 hns_roce_cq_event(hr_dev
,
256 le32_to_cpu(roce_get_field(aeqe
->event
.cq_event
.cq
,
257 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M
,
258 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S
)),
259 roce_get_field(aeqe
->asyn
,
260 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
261 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
263 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID
:
264 dev_warn(dev
, "CQ ID invalid.\n");
265 hns_roce_cq_event(hr_dev
,
266 le32_to_cpu(roce_get_field(aeqe
->event
.cq_event
.cq
,
267 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M
,
268 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S
)),
269 roce_get_field(aeqe
->asyn
,
270 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
271 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
273 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE
:
274 dev_warn(dev
, "port change.\n");
276 case HNS_ROCE_EVENT_TYPE_MB
:
277 hns_roce_cmd_event(hr_dev
,
278 le16_to_cpu(aeqe
->event
.cmd
.token
),
279 aeqe
->event
.cmd
.status
,
280 le64_to_cpu(aeqe
->event
.cmd
.out_param
283 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW
:
284 hns_roce_db_overflow_handle(hr_dev
, aeqe
);
286 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW
:
287 dev_warn(dev
, "CEQ 0x%lx overflow.\n",
288 roce_get_field(aeqe
->event
.ce_event
.ceqe
,
289 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M
,
290 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S
));
293 dev_warn(dev
, "Unhandled event 0x%lx on EQ %d at index %u\n",
294 roce_get_field(aeqe
->asyn
,
295 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
296 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
),
297 eq
->eqn
, eq
->cons_index
);
304 if (eq
->cons_index
> 2 * hr_dev
->caps
.aeqe_depth
- 1) {
305 dev_warn(dev
, "cons_index overflow, set back to zero\n"
311 eq_set_cons_index(eq
, 0);
316 static struct hns_roce_ceqe
*get_ceqe(struct hns_roce_eq
*eq
, u32 entry
)
318 unsigned long off
= (entry
& (eq
->entries
- 1)) *
319 HNS_ROCE_CEQ_ENTRY_SIZE
;
321 return (struct hns_roce_ceqe
*)((u8
*)
322 (eq
->buf_list
[off
/ HNS_ROCE_BA_SIZE
].buf
) +
323 off
% HNS_ROCE_BA_SIZE
);
326 static struct hns_roce_ceqe
*next_ceqe_sw(struct hns_roce_eq
*eq
)
328 struct hns_roce_ceqe
*ceqe
= get_ceqe(eq
, eq
->cons_index
);
330 return (!!(roce_get_bit(ceqe
->ceqe
.comp
,
331 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S
))) ^
332 (!!(eq
->cons_index
& eq
->entries
)) ? ceqe
: NULL
;
335 static int hns_roce_ceq_int(struct hns_roce_dev
*hr_dev
, struct hns_roce_eq
*eq
)
337 struct hns_roce_ceqe
*ceqe
;
341 while ((ceqe
= next_ceqe_sw(eq
))) {
344 cqn
= roce_get_field(ceqe
->ceqe
.comp
,
345 HNS_ROCE_CEQE_CEQE_COMP_CQN_M
,
346 HNS_ROCE_CEQE_CEQE_COMP_CQN_S
);
347 hns_roce_cq_completion(hr_dev
, cqn
);
352 if (eq
->cons_index
> 2 * hr_dev
->caps
.ceqe_depth
[eq
->eqn
] - 1) {
353 dev_warn(&eq
->hr_dev
->pdev
->dev
,
354 "cons_index overflow, set back to zero\n");
359 eq_set_cons_index(eq
, 0);
364 static int hns_roce_aeq_ovf_int(struct hns_roce_dev
*hr_dev
,
365 struct hns_roce_eq
*eq
)
367 struct device
*dev
= &eq
->hr_dev
->pdev
->dev
;
378 * AEQ overflow ECC mult bit err CEQ overflow alarm
379 * must clear interrupt, mask irq, clear irq, cancel mask operation
381 aeshift_val
= roce_read(hr_dev
, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG
);
383 if (roce_get_bit(aeshift_val
,
384 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S
) == 1) {
385 dev_warn(dev
, "AEQ overflow!\n");
388 caepaemask_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_MASK_REG
);
389 roce_set_bit(caepaemask_val
,
390 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S
,
391 HNS_ROCE_INT_MASK_ENABLE
);
392 roce_write(hr_dev
, ROCEE_CAEP_AE_MASK_REG
, caepaemask_val
);
394 /* Clear int state(INT_WC : write 1 clear) */
395 caepaest_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_ST_REG
);
396 roce_set_bit(caepaest_val
,
397 ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S
, 1);
398 roce_write(hr_dev
, ROCEE_CAEP_AE_ST_REG
, caepaest_val
);
401 caepaemask_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_MASK_REG
);
402 roce_set_bit(caepaemask_val
,
403 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S
,
404 HNS_ROCE_INT_MASK_DISABLE
);
405 roce_write(hr_dev
, ROCEE_CAEP_AE_MASK_REG
, caepaemask_val
);
408 /* CEQ almost overflow */
409 for (i
= 0; i
< hr_dev
->caps
.num_comp_vectors
; i
++) {
410 ceshift_val
= roce_read(hr_dev
, ROCEE_CAEP_CEQC_SHIFT_0_REG
+
413 if (roce_get_bit(ceshift_val
,
414 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S
) == 1) {
415 dev_warn(dev
, "CEQ[%d] almost overflow!\n", i
);
419 cemask_val
= roce_read(hr_dev
,
420 ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
422 roce_set_bit(cemask_val
,
423 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S
,
424 HNS_ROCE_INT_MASK_ENABLE
);
425 roce_write(hr_dev
, ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
426 i
* CEQ_REG_OFFSET
, cemask_val
);
428 /* Clear int state(INT_WC : write 1 clear) */
429 cealmovf_val
= roce_read(hr_dev
,
430 ROCEE_CAEP_CEQ_ALM_OVF_0_REG
+
432 roce_set_bit(cealmovf_val
,
433 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S
,
435 roce_write(hr_dev
, ROCEE_CAEP_CEQ_ALM_OVF_0_REG
+
436 i
* CEQ_REG_OFFSET
, cealmovf_val
);
439 cemask_val
= roce_read(hr_dev
,
440 ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
442 roce_set_bit(cemask_val
,
443 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S
,
444 HNS_ROCE_INT_MASK_DISABLE
);
445 roce_write(hr_dev
, ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
446 i
* CEQ_REG_OFFSET
, cemask_val
);
450 /* ECC multi-bit error alarm */
451 dev_warn(dev
, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
452 roce_read(hr_dev
, ROCEE_ECC_UCERR_ALM0_REG
),
453 roce_read(hr_dev
, ROCEE_ECC_UCERR_ALM1_REG
),
454 roce_read(hr_dev
, ROCEE_ECC_UCERR_ALM2_REG
));
456 dev_warn(dev
, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
457 roce_read(hr_dev
, ROCEE_ECC_CERR_ALM0_REG
),
458 roce_read(hr_dev
, ROCEE_ECC_CERR_ALM1_REG
),
459 roce_read(hr_dev
, ROCEE_ECC_CERR_ALM2_REG
));
464 static int hns_roce_eq_int(struct hns_roce_dev
*hr_dev
, struct hns_roce_eq
*eq
)
468 if (likely(eq
->type_flag
== HNS_ROCE_CEQ
))
469 /* CEQ irq routine, CEQ is pulse irq, not clear */
470 eqes_found
= hns_roce_ceq_int(hr_dev
, eq
);
471 else if (likely(eq
->type_flag
== HNS_ROCE_AEQ
))
472 /* AEQ irq routine, AEQ is pulse irq, not clear */
473 eqes_found
= hns_roce_aeq_int(hr_dev
, eq
);
475 /* AEQ queue overflow irq */
476 eqes_found
= hns_roce_aeq_ovf_int(hr_dev
, eq
);
481 static irqreturn_t
hns_roce_msi_x_interrupt(int irq
, void *eq_ptr
)
484 struct hns_roce_eq
*eq
= eq_ptr
;
485 struct hns_roce_dev
*hr_dev
= eq
->hr_dev
;
487 int_work
= hns_roce_eq_int(hr_dev
, eq
);
489 return IRQ_RETVAL(int_work
);
492 static void hns_roce_enable_eq(struct hns_roce_dev
*hr_dev
, int eq_num
,
495 void __iomem
*eqc
= hr_dev
->eq_table
.eqc_base
[eq_num
];
502 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M
,
503 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S
,
504 HNS_ROCE_EQ_STAT_VALID
);
507 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M
,
508 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S
,
509 HNS_ROCE_EQ_STAT_INVALID
);
513 static int hns_roce_create_eq(struct hns_roce_dev
*hr_dev
,
514 struct hns_roce_eq
*eq
)
516 void __iomem
*eqc
= hr_dev
->eq_table
.eqc_base
[eq
->eqn
];
517 struct device
*dev
= &hr_dev
->pdev
->dev
;
518 dma_addr_t tmp_dma_addr
;
519 u32 eqconsindx_val
= 0;
520 u32 eqcuridx_val
= 0;
526 num_bas
= (PAGE_ALIGN(eq
->entries
* eq
->eqe_size
) +
527 HNS_ROCE_BA_SIZE
- 1) / HNS_ROCE_BA_SIZE
;
529 if ((eq
->entries
* eq
->eqe_size
) > HNS_ROCE_BA_SIZE
) {
530 dev_err(dev
, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
531 (eq
->entries
* eq
->eqe_size
), HNS_ROCE_BA_SIZE
,
536 eq
->buf_list
= kcalloc(num_bas
, sizeof(*eq
->buf_list
), GFP_KERNEL
);
540 for (i
= 0; i
< num_bas
; ++i
) {
541 eq
->buf_list
[i
].buf
= dma_alloc_coherent(dev
, HNS_ROCE_BA_SIZE
,
544 if (!eq
->buf_list
[i
].buf
) {
546 goto err_out_free_pages
;
549 eq
->buf_list
[i
].map
= tmp_dma_addr
;
550 memset(eq
->buf_list
[i
].buf
, 0, HNS_ROCE_BA_SIZE
);
553 roce_set_field(eqshift_val
,
554 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M
,
555 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S
,
556 HNS_ROCE_EQ_STAT_INVALID
);
557 roce_set_field(eqshift_val
,
558 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M
,
559 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S
,
561 writel(eqshift_val
, eqc
);
563 /* Configure eq extended address 12~44bit */
564 writel((u32
)(eq
->buf_list
[0].map
>> 12), (u8
*)eqc
+ 4);
567 * Configure eq extended address 45~49 bit.
568 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
569 * using 4K page, and shift more 32 because of
570 * caculating the high 32 bit value evaluated to hardware.
572 roce_set_field(eqcuridx_val
, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M
,
573 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S
,
574 eq
->buf_list
[0].map
>> 44);
575 roce_set_field(eqcuridx_val
,
576 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M
,
577 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S
, 0);
578 writel(eqcuridx_val
, (u8
*)eqc
+ 8);
580 /* Configure eq consumer index */
581 roce_set_field(eqconsindx_val
,
582 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M
,
583 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S
, 0);
584 writel(eqconsindx_val
, (u8
*)eqc
+ 0xc);
589 for (i
= i
- 1; i
>= 0; i
--)
590 dma_free_coherent(dev
, HNS_ROCE_BA_SIZE
, eq
->buf_list
[i
].buf
,
591 eq
->buf_list
[i
].map
);
597 static void hns_roce_free_eq(struct hns_roce_dev
*hr_dev
,
598 struct hns_roce_eq
*eq
)
601 int npages
= (PAGE_ALIGN(eq
->eqe_size
* eq
->entries
) +
602 HNS_ROCE_BA_SIZE
- 1) / HNS_ROCE_BA_SIZE
;
607 for (i
= 0; i
< npages
; ++i
)
608 dma_free_coherent(&hr_dev
->pdev
->dev
, HNS_ROCE_BA_SIZE
,
609 eq
->buf_list
[i
].buf
, eq
->buf_list
[i
].map
);
614 static void hns_roce_int_mask_en(struct hns_roce_dev
*hr_dev
)
621 aemask_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_MASK_REG
);
622 roce_set_bit(aemask_val
, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S
,
624 roce_set_bit(aemask_val
, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S
, masken
);
625 roce_write(hr_dev
, ROCEE_CAEP_AE_MASK_REG
, aemask_val
);
628 for (i
= 0; i
< hr_dev
->caps
.num_comp_vectors
; i
++) {
630 roce_write(hr_dev
, ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
631 i
* CEQ_REG_OFFSET
, masken
);
635 static void hns_roce_ce_int_default_cfg(struct hns_roce_dev
*hr_dev
)
637 /* Configure ce int interval */
638 roce_write(hr_dev
, ROCEE_CAEP_CE_INTERVAL_CFG_REG
,
639 HNS_ROCE_CEQ_DEFAULT_INTERVAL
);
641 /* Configure ce int burst num */
642 roce_write(hr_dev
, ROCEE_CAEP_CE_BURST_NUM_CFG_REG
,
643 HNS_ROCE_CEQ_DEFAULT_BURST_NUM
);
646 int hns_roce_init_eq_table(struct hns_roce_dev
*hr_dev
)
648 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
649 struct device
*dev
= &hr_dev
->pdev
->dev
;
650 struct hns_roce_eq
*eq
= NULL
;
656 eq_num
= hr_dev
->caps
.num_comp_vectors
+ hr_dev
->caps
.num_aeq_vectors
;
657 eq_table
->eq
= kcalloc(eq_num
, sizeof(*eq_table
->eq
), GFP_KERNEL
);
661 eq_table
->eqc_base
= kcalloc(eq_num
, sizeof(*eq_table
->eqc_base
),
663 if (!eq_table
->eqc_base
) {
665 goto err_eqc_base_alloc_fail
;
668 for (i
= 0; i
< eq_num
; i
++) {
669 eq
= &eq_table
->eq
[i
];
672 eq
->irq
= hr_dev
->irq
[i
];
673 eq
->log_page_size
= PAGE_SHIFT
;
675 if (i
< hr_dev
->caps
.num_comp_vectors
) {
677 eq_table
->eqc_base
[i
] = hr_dev
->reg_base
+
678 ROCEE_CAEP_CEQC_SHIFT_0_REG
+
679 HNS_ROCE_CEQC_REG_OFFSET
* i
;
680 eq
->type_flag
= HNS_ROCE_CEQ
;
681 eq
->doorbell
= hr_dev
->reg_base
+
682 ROCEE_CAEP_CEQC_CONS_IDX_0_REG
+
683 HNS_ROCE_CEQC_REG_OFFSET
* i
;
684 eq
->entries
= hr_dev
->caps
.ceqe_depth
[i
];
685 eq
->log_entries
= ilog2(eq
->entries
);
686 eq
->eqe_size
= sizeof(struct hns_roce_ceqe
);
689 eq_table
->eqc_base
[i
] = hr_dev
->reg_base
+
690 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG
;
691 eq
->type_flag
= HNS_ROCE_AEQ
;
692 eq
->doorbell
= hr_dev
->reg_base
+
693 ROCEE_CAEP_AEQE_CONS_IDX_REG
;
694 eq
->entries
= hr_dev
->caps
.aeqe_depth
;
695 eq
->log_entries
= ilog2(eq
->entries
);
696 eq
->eqe_size
= sizeof(struct hns_roce_aeqe
);
701 hns_roce_int_mask_en(hr_dev
);
703 /* Configure CE irq interval and burst num */
704 hns_roce_ce_int_default_cfg(hr_dev
);
706 for (i
= 0; i
< eq_num
; i
++) {
707 ret
= hns_roce_create_eq(hr_dev
, &eq_table
->eq
[i
]);
709 dev_err(dev
, "eq create failed\n");
710 goto err_create_eq_fail
;
714 for (j
= 0; j
< eq_num
; j
++) {
715 ret
= request_irq(eq_table
->eq
[j
].irq
, hns_roce_msi_x_interrupt
,
716 0, hr_dev
->irq_names
[j
], eq_table
->eq
+ j
);
718 dev_err(dev
, "request irq error!\n");
719 goto err_request_irq_fail
;
723 for (i
= 0; i
< eq_num
; i
++)
724 hns_roce_enable_eq(hr_dev
, i
, EQ_ENABLE
);
728 err_request_irq_fail
:
729 for (j
= j
- 1; j
>= 0; j
--)
730 free_irq(eq_table
->eq
[j
].irq
, eq_table
->eq
+ j
);
733 for (i
= i
- 1; i
>= 0; i
--)
734 hns_roce_free_eq(hr_dev
, &eq_table
->eq
[i
]);
736 kfree(eq_table
->eqc_base
);
738 err_eqc_base_alloc_fail
:
744 void hns_roce_cleanup_eq_table(struct hns_roce_dev
*hr_dev
)
748 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
750 eq_num
= hr_dev
->caps
.num_comp_vectors
+ hr_dev
->caps
.num_aeq_vectors
;
751 for (i
= 0; i
< eq_num
; i
++) {
753 hns_roce_enable_eq(hr_dev
, i
, EQ_DISABLE
);
755 free_irq(eq_table
->eq
[i
].irq
, eq_table
->eq
+ i
);
757 hns_roce_free_eq(hr_dev
, &eq_table
->eq
[i
]);
760 kfree(eq_table
->eqc_base
);