9a443537 |
1 | /* |
2 | * Copyright (c) 2016 Hisilicon Limited. |
3 | * |
4 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU |
6 | * General Public License (GPL) Version 2, available from the file |
7 | * COPYING in the main directory of this source tree, or the |
8 | * OpenIB.org BSD license below: |
9 | * |
10 | * Redistribution and use in source and binary forms, with or |
11 | * without modification, are permitted provided that the following |
12 | * conditions are met: |
13 | * |
14 | * - Redistributions of source code must retain the above |
15 | * copyright notice, this list of conditions and the following |
16 | * disclaimer. |
17 | * |
18 | * - Redistributions in binary form must reproduce the above |
19 | * copyright notice, this list of conditions and the following |
20 | * disclaimer in the documentation and/or other materials |
21 | * provided with the distribution. |
22 | * |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. |
31 | */ |
32 | |
33 | #include <linux/platform_device.h> |
34 | #include "hns_roce_common.h" |
35 | #include "hns_roce_device.h" |
36 | #include "hns_roce_eq.h" |
37 | |
38 | static void eq_set_cons_index(struct hns_roce_eq *eq, int req_not) |
39 | { |
40 | roce_raw_write((eq->cons_index & CONS_INDEX_MASK) | |
41 | (req_not << eq->log_entries), eq->doorbell); |
42 | /* Memory barrier */ |
43 | mb(); |
44 | } |
45 | |
46 | static struct hns_roce_aeqe *get_aeqe(struct hns_roce_eq *eq, u32 entry) |
47 | { |
48 | unsigned long off = (entry & (eq->entries - 1)) * |
49 | HNS_ROCE_AEQ_ENTRY_SIZE; |
50 | |
51 | return (struct hns_roce_aeqe *)((u8 *) |
52 | (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + |
53 | off % HNS_ROCE_BA_SIZE); |
54 | } |
55 | |
56 | static struct hns_roce_aeqe *next_aeqe_sw(struct hns_roce_eq *eq) |
57 | { |
58 | struct hns_roce_aeqe *aeqe = get_aeqe(eq, eq->cons_index); |
59 | |
60 | return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^ |
61 | !!(eq->cons_index & eq->entries)) ? aeqe : NULL; |
62 | } |
63 | |
64 | static void hns_roce_wq_catas_err_handle(struct hns_roce_dev *hr_dev, |
65 | struct hns_roce_aeqe *aeqe, int qpn) |
66 | { |
67 | struct device *dev = &hr_dev->pdev->dev; |
68 | |
69 | qpn = roce_get_field(aeqe->event.qp_event.qp, |
70 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, |
71 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); |
72 | dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); |
73 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, |
74 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { |
75 | case HNS_ROCE_LWQCE_QPC_ERROR: |
76 | dev_warn(dev, "QP %d, QPC error.\n", qpn); |
77 | break; |
78 | case HNS_ROCE_LWQCE_MTU_ERROR: |
79 | dev_warn(dev, "QP %d, MTU error.\n", qpn); |
80 | break; |
81 | case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: |
82 | dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); |
83 | break; |
84 | case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: |
85 | dev_warn(dev, "QP %d, WQE addr error.\n", qpn); |
86 | break; |
87 | case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: |
88 | dev_warn(dev, "QP %d, WQE shift error\n", qpn); |
89 | break; |
90 | case HNS_ROCE_LWQCE_SL_ERROR: |
91 | dev_warn(dev, "QP %d, SL error.\n", qpn); |
92 | break; |
93 | case HNS_ROCE_LWQCE_PORT_ERROR: |
94 | dev_warn(dev, "QP %d, port error.\n", qpn); |
95 | break; |
96 | default: |
97 | break; |
98 | } |
99 | |
100 | hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, |
101 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, |
102 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), |
103 | roce_get_field(aeqe->asyn, |
104 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
105 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
106 | } |
107 | |
108 | static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, |
109 | struct hns_roce_aeqe *aeqe, |
110 | int qpn) |
111 | { |
112 | struct device *dev = &hr_dev->pdev->dev; |
113 | |
114 | qpn = roce_get_field(aeqe->event.qp_event.qp, |
115 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, |
116 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); |
117 | dev_warn(dev, "Local Access Violation Work Queue Error.\n"); |
118 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, |
119 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { |
120 | case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: |
121 | dev_warn(dev, "QP %d, R_key violation.\n", qpn); |
122 | break; |
123 | case HNS_ROCE_LAVWQE_LENGTH_ERROR: |
124 | dev_warn(dev, "QP %d, length error.\n", qpn); |
125 | break; |
126 | case HNS_ROCE_LAVWQE_VA_ERROR: |
127 | dev_warn(dev, "QP %d, VA error.\n", qpn); |
128 | break; |
129 | case HNS_ROCE_LAVWQE_PD_ERROR: |
130 | dev_err(dev, "QP %d, PD error.\n", qpn); |
131 | break; |
132 | case HNS_ROCE_LAVWQE_RW_ACC_ERROR: |
133 | dev_warn(dev, "QP %d, rw acc error.\n", qpn); |
134 | break; |
135 | case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: |
136 | dev_warn(dev, "QP %d, key state error.\n", qpn); |
137 | break; |
138 | case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: |
139 | dev_warn(dev, "QP %d, MR operation error.\n", qpn); |
140 | break; |
141 | default: |
142 | break; |
143 | } |
144 | |
145 | hns_roce_qp_event(hr_dev, roce_get_field(aeqe->event.qp_event.qp, |
146 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, |
147 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), |
148 | roce_get_field(aeqe->asyn, |
149 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
150 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
151 | } |
152 | |
153 | static void hns_roce_db_overflow_handle(struct hns_roce_dev *hr_dev, |
154 | struct hns_roce_aeqe *aeqe) |
155 | { |
156 | struct device *dev = &hr_dev->pdev->dev; |
157 | |
158 | switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, |
159 | HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { |
160 | case HNS_ROCE_DB_SUBTYPE_SDB_OVF: |
161 | dev_warn(dev, "SDB overflow.\n"); |
162 | break; |
163 | case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF: |
164 | dev_warn(dev, "SDB almost overflow.\n"); |
165 | break; |
166 | case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP: |
167 | dev_warn(dev, "SDB almost empty.\n"); |
168 | break; |
169 | case HNS_ROCE_DB_SUBTYPE_ODB_OVF: |
170 | dev_warn(dev, "ODB overflow.\n"); |
171 | break; |
172 | case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF: |
173 | dev_warn(dev, "ODB almost overflow.\n"); |
174 | break; |
175 | case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP: |
176 | dev_warn(dev, "SDB almost empty.\n"); |
177 | break; |
178 | default: |
179 | break; |
180 | } |
181 | } |
182 | |
183 | static int hns_roce_aeq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) |
184 | { |
185 | struct device *dev = &hr_dev->pdev->dev; |
186 | struct hns_roce_aeqe *aeqe; |
187 | int aeqes_found = 0; |
188 | int qpn = 0; |
189 | |
190 | while ((aeqe = next_aeqe_sw(eq))) { |
191 | dev_dbg(dev, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe, |
192 | roce_get_field(aeqe->asyn, |
193 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
194 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
195 | /* Memory barrier */ |
196 | rmb(); |
197 | |
198 | switch (roce_get_field(aeqe->asyn, |
199 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
200 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)) { |
201 | case HNS_ROCE_EVENT_TYPE_PATH_MIG: |
202 | dev_warn(dev, "PATH MIG not supported\n"); |
203 | break; |
204 | case HNS_ROCE_EVENT_TYPE_COMM_EST: |
205 | dev_warn(dev, "COMMUNICATION established\n"); |
206 | break; |
207 | case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: |
208 | dev_warn(dev, "SQ DRAINED not supported\n"); |
209 | break; |
210 | case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: |
211 | dev_warn(dev, "PATH MIG failed\n"); |
212 | break; |
213 | case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: |
214 | dev_warn(dev, "qpn = 0x%lx\n", |
215 | roce_get_field(aeqe->event.qp_event.qp, |
216 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, |
217 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S)); |
218 | hns_roce_qp_event(hr_dev, |
219 | roce_get_field(aeqe->event.qp_event.qp, |
220 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, |
221 | HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S), |
222 | roce_get_field(aeqe->asyn, |
223 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
224 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
225 | break; |
226 | case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: |
227 | hns_roce_wq_catas_err_handle(hr_dev, aeqe, qpn); |
228 | break; |
229 | case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: |
230 | hns_roce_local_wq_access_err_handle(hr_dev, aeqe, qpn); |
231 | break; |
232 | case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: |
233 | case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: |
234 | case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: |
235 | dev_warn(dev, "SRQ not support!\n"); |
236 | break; |
237 | case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: |
238 | dev_warn(dev, "CQ 0x%lx access err.\n", |
239 | roce_get_field(aeqe->event.cq_event.cq, |
240 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, |
241 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); |
242 | hns_roce_cq_event(hr_dev, |
243 | le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, |
244 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, |
245 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), |
246 | roce_get_field(aeqe->asyn, |
247 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
248 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
249 | break; |
250 | case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: |
251 | dev_warn(dev, "CQ 0x%lx overflow\n", |
252 | roce_get_field(aeqe->event.cq_event.cq, |
253 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, |
254 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)); |
255 | hns_roce_cq_event(hr_dev, |
256 | le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, |
257 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, |
258 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), |
259 | roce_get_field(aeqe->asyn, |
260 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
261 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
262 | break; |
263 | case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: |
264 | dev_warn(dev, "CQ ID invalid.\n"); |
265 | hns_roce_cq_event(hr_dev, |
266 | le32_to_cpu(roce_get_field(aeqe->event.cq_event.cq, |
267 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, |
268 | HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S)), |
269 | roce_get_field(aeqe->asyn, |
270 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
271 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); |
272 | break; |
273 | case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: |
274 | dev_warn(dev, "port change.\n"); |
275 | break; |
276 | case HNS_ROCE_EVENT_TYPE_MB: |
277 | hns_roce_cmd_event(hr_dev, |
278 | le16_to_cpu(aeqe->event.cmd.token), |
279 | aeqe->event.cmd.status, |
280 | le64_to_cpu(aeqe->event.cmd.out_param |
281 | )); |
282 | break; |
283 | case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: |
284 | hns_roce_db_overflow_handle(hr_dev, aeqe); |
285 | break; |
286 | case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW: |
287 | dev_warn(dev, "CEQ 0x%lx overflow.\n", |
288 | roce_get_field(aeqe->event.ce_event.ceqe, |
289 | HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M, |
290 | HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S)); |
291 | break; |
292 | default: |
293 | dev_warn(dev, "Unhandled event 0x%lx on EQ %d at index %u\n", |
294 | roce_get_field(aeqe->asyn, |
295 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, |
296 | HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S), |
297 | eq->eqn, eq->cons_index); |
298 | break; |
299 | }; |
300 | |
301 | eq->cons_index++; |
302 | aeqes_found = 1; |
303 | |
304 | if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) { |
305 | dev_warn(dev, "cons_index overflow, set back to zero\n" |
306 | ); |
307 | eq->cons_index = 0; |
308 | } |
309 | } |
310 | |
311 | eq_set_cons_index(eq, 0); |
312 | |
313 | return aeqes_found; |
314 | } |
315 | |
316 | static struct hns_roce_ceqe *get_ceqe(struct hns_roce_eq *eq, u32 entry) |
317 | { |
318 | unsigned long off = (entry & (eq->entries - 1)) * |
319 | HNS_ROCE_CEQ_ENTRY_SIZE; |
320 | |
321 | return (struct hns_roce_ceqe *)((u8 *) |
322 | (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + |
323 | off % HNS_ROCE_BA_SIZE); |
324 | } |
325 | |
326 | static struct hns_roce_ceqe *next_ceqe_sw(struct hns_roce_eq *eq) |
327 | { |
328 | struct hns_roce_ceqe *ceqe = get_ceqe(eq, eq->cons_index); |
329 | |
330 | return (!!(roce_get_bit(ceqe->ceqe.comp, |
331 | HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^ |
332 | (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; |
333 | } |
334 | |
335 | static int hns_roce_ceq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) |
336 | { |
337 | struct hns_roce_ceqe *ceqe; |
338 | int ceqes_found = 0; |
339 | u32 cqn; |
340 | |
341 | while ((ceqe = next_ceqe_sw(eq))) { |
342 | /* Memory barrier */ |
343 | rmb(); |
344 | cqn = roce_get_field(ceqe->ceqe.comp, |
345 | HNS_ROCE_CEQE_CEQE_COMP_CQN_M, |
346 | HNS_ROCE_CEQE_CEQE_COMP_CQN_S); |
347 | hns_roce_cq_completion(hr_dev, cqn); |
348 | |
349 | ++eq->cons_index; |
350 | ceqes_found = 1; |
351 | |
352 | if (eq->cons_index > 2 * hr_dev->caps.ceqe_depth[eq->eqn] - 1) { |
353 | dev_warn(&eq->hr_dev->pdev->dev, |
354 | "cons_index overflow, set back to zero\n"); |
355 | eq->cons_index = 0; |
356 | } |
357 | } |
358 | |
359 | eq_set_cons_index(eq, 0); |
360 | |
361 | return ceqes_found; |
362 | } |
363 | |
364 | static int hns_roce_aeq_ovf_int(struct hns_roce_dev *hr_dev, |
365 | struct hns_roce_eq *eq) |
366 | { |
367 | struct device *dev = &eq->hr_dev->pdev->dev; |
368 | int eqovf_found = 0; |
369 | u32 caepaemask_val; |
370 | u32 cealmovf_val; |
371 | u32 caepaest_val; |
372 | u32 aeshift_val; |
373 | u32 ceshift_val; |
374 | u32 cemask_val; |
375 | int i = 0; |
376 | |
377 | /** |
378 | * AEQ overflow ECC mult bit err CEQ overflow alarm |
379 | * must clear interrupt, mask irq, clear irq, cancel mask operation |
380 | */ |
381 | aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG); |
382 | |
383 | if (roce_get_bit(aeshift_val, |
384 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) { |
385 | dev_warn(dev, "AEQ overflow!\n"); |
386 | |
387 | /* Set mask */ |
388 | caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); |
389 | roce_set_bit(caepaemask_val, |
390 | ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, |
391 | HNS_ROCE_INT_MASK_ENABLE); |
392 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); |
393 | |
394 | /* Clear int state(INT_WC : write 1 clear) */ |
395 | caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG); |
396 | roce_set_bit(caepaest_val, |
397 | ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1); |
398 | roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val); |
399 | |
400 | /* Clear mask */ |
401 | caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); |
402 | roce_set_bit(caepaemask_val, |
403 | ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, |
404 | HNS_ROCE_INT_MASK_DISABLE); |
405 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); |
406 | } |
407 | |
408 | /* CEQ almost overflow */ |
409 | for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { |
410 | ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG + |
411 | i * CEQ_REG_OFFSET); |
412 | |
413 | if (roce_get_bit(ceshift_val, |
414 | ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) { |
415 | dev_warn(dev, "CEQ[%d] almost overflow!\n", i); |
416 | eqovf_found++; |
417 | |
418 | /* Set mask */ |
419 | cemask_val = roce_read(hr_dev, |
420 | ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
421 | i * CEQ_REG_OFFSET); |
422 | roce_set_bit(cemask_val, |
423 | ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, |
424 | HNS_ROCE_INT_MASK_ENABLE); |
425 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
426 | i * CEQ_REG_OFFSET, cemask_val); |
427 | |
428 | /* Clear int state(INT_WC : write 1 clear) */ |
429 | cealmovf_val = roce_read(hr_dev, |
430 | ROCEE_CAEP_CEQ_ALM_OVF_0_REG + |
431 | i * CEQ_REG_OFFSET); |
432 | roce_set_bit(cealmovf_val, |
433 | ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S, |
434 | 1); |
435 | roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG + |
436 | i * CEQ_REG_OFFSET, cealmovf_val); |
437 | |
438 | /* Clear mask */ |
439 | cemask_val = roce_read(hr_dev, |
440 | ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
441 | i * CEQ_REG_OFFSET); |
442 | roce_set_bit(cemask_val, |
443 | ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, |
444 | HNS_ROCE_INT_MASK_DISABLE); |
445 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
446 | i * CEQ_REG_OFFSET, cemask_val); |
447 | } |
448 | } |
449 | |
450 | /* ECC multi-bit error alarm */ |
451 | dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n", |
452 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG), |
453 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG), |
454 | roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG)); |
455 | |
456 | dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n", |
457 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG), |
458 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG), |
459 | roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG)); |
460 | |
461 | return eqovf_found; |
462 | } |
463 | |
464 | static int hns_roce_eq_int(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) |
465 | { |
466 | int eqes_found = 0; |
467 | |
468 | if (likely(eq->type_flag == HNS_ROCE_CEQ)) |
469 | /* CEQ irq routine, CEQ is pulse irq, not clear */ |
470 | eqes_found = hns_roce_ceq_int(hr_dev, eq); |
471 | else if (likely(eq->type_flag == HNS_ROCE_AEQ)) |
472 | /* AEQ irq routine, AEQ is pulse irq, not clear */ |
473 | eqes_found = hns_roce_aeq_int(hr_dev, eq); |
474 | else |
475 | /* AEQ queue overflow irq */ |
476 | eqes_found = hns_roce_aeq_ovf_int(hr_dev, eq); |
477 | |
478 | return eqes_found; |
479 | } |
480 | |
481 | static irqreturn_t hns_roce_msi_x_interrupt(int irq, void *eq_ptr) |
482 | { |
483 | int int_work = 0; |
484 | struct hns_roce_eq *eq = eq_ptr; |
485 | struct hns_roce_dev *hr_dev = eq->hr_dev; |
486 | |
487 | int_work = hns_roce_eq_int(hr_dev, eq); |
488 | |
489 | return IRQ_RETVAL(int_work); |
490 | } |
491 | |
492 | static void hns_roce_enable_eq(struct hns_roce_dev *hr_dev, int eq_num, |
493 | int enable_flag) |
494 | { |
495 | void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num]; |
496 | u32 val; |
497 | |
498 | val = readl(eqc); |
499 | |
500 | if (enable_flag) |
501 | roce_set_field(val, |
502 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, |
503 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, |
504 | HNS_ROCE_EQ_STAT_VALID); |
505 | else |
506 | roce_set_field(val, |
507 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, |
508 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, |
509 | HNS_ROCE_EQ_STAT_INVALID); |
510 | writel(val, eqc); |
511 | } |
512 | |
513 | static int hns_roce_create_eq(struct hns_roce_dev *hr_dev, |
514 | struct hns_roce_eq *eq) |
515 | { |
516 | void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; |
517 | struct device *dev = &hr_dev->pdev->dev; |
518 | dma_addr_t tmp_dma_addr; |
519 | u32 eqconsindx_val = 0; |
520 | u32 eqcuridx_val = 0; |
521 | u32 eqshift_val = 0; |
522 | int num_bas = 0; |
523 | int ret; |
524 | int i; |
525 | |
526 | num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + |
527 | HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; |
528 | |
529 | if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { |
530 | dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n", |
531 | (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, |
532 | num_bas); |
533 | return -EINVAL; |
534 | } |
535 | |
536 | eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL); |
537 | if (!eq->buf_list) |
538 | return -ENOMEM; |
539 | |
540 | for (i = 0; i < num_bas; ++i) { |
541 | eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE, |
542 | &tmp_dma_addr, |
543 | GFP_KERNEL); |
544 | if (!eq->buf_list[i].buf) { |
545 | ret = -ENOMEM; |
546 | goto err_out_free_pages; |
547 | } |
548 | |
549 | eq->buf_list[i].map = tmp_dma_addr; |
550 | memset(eq->buf_list[i].buf, 0, HNS_ROCE_BA_SIZE); |
551 | } |
552 | eq->cons_index = 0; |
553 | roce_set_field(eqshift_val, |
554 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, |
555 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, |
556 | HNS_ROCE_EQ_STAT_INVALID); |
557 | roce_set_field(eqshift_val, |
558 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M, |
559 | ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S, |
560 | eq->log_entries); |
561 | writel(eqshift_val, eqc); |
562 | |
563 | /* Configure eq extended address 12~44bit */ |
564 | writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4); |
565 | |
566 | /* |
567 | * Configure eq extended address 45~49 bit. |
568 | * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of |
569 | * using 4K page, and shift more 32 because of |
570 | * caculating the high 32 bit value evaluated to hardware. |
571 | */ |
572 | roce_set_field(eqcuridx_val, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M, |
573 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S, |
574 | eq->buf_list[0].map >> 44); |
575 | roce_set_field(eqcuridx_val, |
576 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M, |
577 | ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0); |
578 | writel(eqcuridx_val, (u8 *)eqc + 8); |
579 | |
580 | /* Configure eq consumer index */ |
581 | roce_set_field(eqconsindx_val, |
582 | ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M, |
583 | ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0); |
584 | writel(eqconsindx_val, (u8 *)eqc + 0xc); |
585 | |
586 | return 0; |
587 | |
588 | err_out_free_pages: |
589 | for (i = i - 1; i >= 0; i--) |
590 | dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf, |
591 | eq->buf_list[i].map); |
592 | |
593 | kfree(eq->buf_list); |
594 | return ret; |
595 | } |
596 | |
597 | static void hns_roce_free_eq(struct hns_roce_dev *hr_dev, |
598 | struct hns_roce_eq *eq) |
599 | { |
600 | int i = 0; |
601 | int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + |
602 | HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; |
603 | |
604 | if (!eq->buf_list) |
605 | return; |
606 | |
607 | for (i = 0; i < npages; ++i) |
608 | dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE, |
609 | eq->buf_list[i].buf, eq->buf_list[i].map); |
610 | |
611 | kfree(eq->buf_list); |
612 | } |
613 | |
614 | static void hns_roce_int_mask_en(struct hns_roce_dev *hr_dev) |
615 | { |
616 | int i = 0; |
617 | u32 aemask_val; |
618 | int masken = 0; |
619 | |
620 | /* AEQ INT */ |
621 | aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); |
622 | roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, |
623 | masken); |
624 | roce_set_bit(aemask_val, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken); |
625 | roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val); |
626 | |
627 | /* CEQ INT */ |
628 | for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { |
629 | /* IRQ mask */ |
630 | roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + |
631 | i * CEQ_REG_OFFSET, masken); |
632 | } |
633 | } |
634 | |
635 | static void hns_roce_ce_int_default_cfg(struct hns_roce_dev *hr_dev) |
636 | { |
637 | /* Configure ce int interval */ |
638 | roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG, |
639 | HNS_ROCE_CEQ_DEFAULT_INTERVAL); |
640 | |
641 | /* Configure ce int burst num */ |
642 | roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG, |
643 | HNS_ROCE_CEQ_DEFAULT_BURST_NUM); |
644 | } |
645 | |
646 | int hns_roce_init_eq_table(struct hns_roce_dev *hr_dev) |
647 | { |
648 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; |
649 | struct device *dev = &hr_dev->pdev->dev; |
650 | struct hns_roce_eq *eq = NULL; |
651 | int eq_num = 0; |
652 | int ret = 0; |
653 | int i = 0; |
654 | int j = 0; |
655 | |
656 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; |
657 | eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); |
658 | if (!eq_table->eq) |
659 | return -ENOMEM; |
660 | |
661 | eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base), |
662 | GFP_KERNEL); |
663 | if (!eq_table->eqc_base) { |
664 | ret = -ENOMEM; |
665 | goto err_eqc_base_alloc_fail; |
666 | } |
667 | |
668 | for (i = 0; i < eq_num; i++) { |
669 | eq = &eq_table->eq[i]; |
670 | eq->hr_dev = hr_dev; |
671 | eq->eqn = i; |
672 | eq->irq = hr_dev->irq[i]; |
673 | eq->log_page_size = PAGE_SHIFT; |
674 | |
675 | if (i < hr_dev->caps.num_comp_vectors) { |
676 | /* CEQ */ |
677 | eq_table->eqc_base[i] = hr_dev->reg_base + |
678 | ROCEE_CAEP_CEQC_SHIFT_0_REG + |
679 | HNS_ROCE_CEQC_REG_OFFSET * i; |
680 | eq->type_flag = HNS_ROCE_CEQ; |
681 | eq->doorbell = hr_dev->reg_base + |
682 | ROCEE_CAEP_CEQC_CONS_IDX_0_REG + |
683 | HNS_ROCE_CEQC_REG_OFFSET * i; |
684 | eq->entries = hr_dev->caps.ceqe_depth[i]; |
685 | eq->log_entries = ilog2(eq->entries); |
686 | eq->eqe_size = sizeof(struct hns_roce_ceqe); |
687 | } else { |
688 | /* AEQ */ |
689 | eq_table->eqc_base[i] = hr_dev->reg_base + |
690 | ROCEE_CAEP_AEQC_AEQE_SHIFT_REG; |
691 | eq->type_flag = HNS_ROCE_AEQ; |
692 | eq->doorbell = hr_dev->reg_base + |
693 | ROCEE_CAEP_AEQE_CONS_IDX_REG; |
694 | eq->entries = hr_dev->caps.aeqe_depth; |
695 | eq->log_entries = ilog2(eq->entries); |
696 | eq->eqe_size = sizeof(struct hns_roce_aeqe); |
697 | } |
698 | } |
699 | |
700 | /* Disable irq */ |
701 | hns_roce_int_mask_en(hr_dev); |
702 | |
703 | /* Configure CE irq interval and burst num */ |
704 | hns_roce_ce_int_default_cfg(hr_dev); |
705 | |
706 | for (i = 0; i < eq_num; i++) { |
707 | ret = hns_roce_create_eq(hr_dev, &eq_table->eq[i]); |
708 | if (ret) { |
709 | dev_err(dev, "eq create failed\n"); |
710 | goto err_create_eq_fail; |
711 | } |
712 | } |
713 | |
714 | for (j = 0; j < eq_num; j++) { |
715 | ret = request_irq(eq_table->eq[j].irq, hns_roce_msi_x_interrupt, |
528f1deb |
716 | 0, hr_dev->irq_names[j], eq_table->eq + j); |
9a443537 |
717 | if (ret) { |
718 | dev_err(dev, "request irq error!\n"); |
719 | goto err_request_irq_fail; |
720 | } |
721 | } |
722 | |
723 | for (i = 0; i < eq_num; i++) |
724 | hns_roce_enable_eq(hr_dev, i, EQ_ENABLE); |
725 | |
726 | return 0; |
727 | |
728 | err_request_irq_fail: |
729 | for (j = j - 1; j >= 0; j--) |
730 | free_irq(eq_table->eq[j].irq, eq_table->eq + j); |
731 | |
732 | err_create_eq_fail: |
733 | for (i = i - 1; i >= 0; i--) |
734 | hns_roce_free_eq(hr_dev, &eq_table->eq[i]); |
735 | |
736 | kfree(eq_table->eqc_base); |
737 | |
738 | err_eqc_base_alloc_fail: |
739 | kfree(eq_table->eq); |
740 | |
741 | return ret; |
742 | } |
743 | |
744 | void hns_roce_cleanup_eq_table(struct hns_roce_dev *hr_dev) |
745 | { |
746 | int i; |
747 | int eq_num; |
748 | struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; |
749 | |
750 | eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; |
751 | for (i = 0; i < eq_num; i++) { |
752 | /* Disable EQ */ |
753 | hns_roce_enable_eq(hr_dev, i, EQ_DISABLE); |
754 | |
755 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
756 | |
757 | hns_roce_free_eq(hr_dev, &eq_table->eq[i]); |
758 | } |
759 | |
760 | kfree(eq_table->eqc_base); |
761 | kfree(eq_table->eq); |
762 | } |