Commit | Line | Data |
---|---|---|
225c7b1f RD |
1 | /* |
2 | * Copyright (c) 2005 Mellanox Technologies. All rights reserved. | |
3 | * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. | |
4 | * | |
5 | * This software is available to you under a choice of one of two | |
6 | * licenses. You may choose to be licensed under the terms of the GNU | |
7 | * General Public License (GPL) Version 2, available from the file | |
8 | * COPYING in the main directory of this source tree, or the | |
9 | * OpenIB.org BSD license below: | |
10 | * | |
11 | * Redistribution and use in source and binary forms, with or | |
12 | * without modification, are permitted provided that the following | |
13 | * conditions are met: | |
14 | * | |
15 | * - Redistributions of source code must retain the above | |
16 | * copyright notice, this list of conditions and the following | |
17 | * disclaimer. | |
18 | * | |
19 | * - Redistributions in binary form must reproduce the above | |
20 | * copyright notice, this list of conditions and the following | |
21 | * disclaimer in the documentation and/or other materials | |
22 | * provided with the distribution. | |
23 | * | |
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
31 | * SOFTWARE. | |
32 | */ | |
33 | ||
34 | #include <linux/init.h> | |
35 | #include <linux/interrupt.h> | |
27ac792c | 36 | #include <linux/mm.h> |
9cbe05c7 | 37 | #include <linux/dma-mapping.h> |
225c7b1f RD |
38 | |
39 | #include <linux/mlx4/cmd.h> | |
40 | ||
41 | #include "mlx4.h" | |
42 | #include "fw.h" | |
43 | ||
44 | enum { | |
45 | MLX4_NUM_ASYNC_EQE = 0x100, | |
46 | MLX4_NUM_SPARE_EQE = 0x80, | |
47 | MLX4_EQ_ENTRY_SIZE = 0x20 | |
48 | }; | |
49 | ||
50 | /* | |
51 | * Must be packed because start is 64 bits but only aligned to 32 bits. | |
52 | */ | |
53 | struct mlx4_eq_context { | |
54 | __be32 flags; | |
55 | u16 reserved1[3]; | |
56 | __be16 page_offset; | |
57 | u8 log_eq_size; | |
58 | u8 reserved2[4]; | |
59 | u8 eq_period; | |
60 | u8 reserved3; | |
61 | u8 eq_max_count; | |
62 | u8 reserved4[3]; | |
63 | u8 intr; | |
64 | u8 log_page_size; | |
65 | u8 reserved5[2]; | |
66 | u8 mtt_base_addr_h; | |
67 | __be32 mtt_base_addr_l; | |
68 | u32 reserved6[2]; | |
69 | __be32 consumer_index; | |
70 | __be32 producer_index; | |
71 | u32 reserved7[4]; | |
72 | }; | |
73 | ||
74 | #define MLX4_EQ_STATUS_OK ( 0 << 28) | |
75 | #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) | |
76 | #define MLX4_EQ_OWNER_SW ( 0 << 24) | |
77 | #define MLX4_EQ_OWNER_HW ( 1 << 24) | |
78 | #define MLX4_EQ_FLAG_EC ( 1 << 18) | |
79 | #define MLX4_EQ_FLAG_OI ( 1 << 17) | |
80 | #define MLX4_EQ_STATE_ARMED ( 9 << 8) | |
81 | #define MLX4_EQ_STATE_FIRED (10 << 8) | |
82 | #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8) | |
83 | ||
84 | #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \ | |
85 | (1ull << MLX4_EVENT_TYPE_COMM_EST) | \ | |
86 | (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \ | |
87 | (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \ | |
88 | (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \ | |
89 | (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \ | |
90 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ | |
91 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | |
92 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | |
225c7b1f RD |
93 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ |
94 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ | |
95 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | |
96 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | |
97 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | |
98 | (1ull << MLX4_EVENT_TYPE_CMD)) | |
225c7b1f RD |
99 | |
100 | struct mlx4_eqe { | |
101 | u8 reserved1; | |
102 | u8 type; | |
103 | u8 reserved2; | |
104 | u8 subtype; | |
105 | union { | |
106 | u32 raw[6]; | |
107 | struct { | |
108 | __be32 cqn; | |
109 | } __attribute__((packed)) comp; | |
110 | struct { | |
111 | u16 reserved1; | |
112 | __be16 token; | |
113 | u32 reserved2; | |
114 | u8 reserved3[3]; | |
115 | u8 status; | |
116 | __be64 out_param; | |
117 | } __attribute__((packed)) cmd; | |
118 | struct { | |
119 | __be32 qpn; | |
120 | } __attribute__((packed)) qp; | |
121 | struct { | |
122 | __be32 srqn; | |
123 | } __attribute__((packed)) srq; | |
124 | struct { | |
125 | __be32 cqn; | |
126 | u32 reserved1; | |
127 | u8 reserved2[3]; | |
128 | u8 syndrome; | |
129 | } __attribute__((packed)) cq_err; | |
130 | struct { | |
131 | u32 reserved1[2]; | |
132 | __be32 port; | |
133 | } __attribute__((packed)) port_change; | |
134 | } event; | |
135 | u8 reserved3[3]; | |
136 | u8 owner; | |
137 | } __attribute__((packed)); | |
138 | ||
139 | static void eq_set_ci(struct mlx4_eq *eq, int req_not) | |
140 | { | |
141 | __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) | | |
142 | req_not << 31), | |
143 | eq->doorbell); | |
144 | /* We still want ordering, just not swabbing, so add a barrier */ | |
145 | mb(); | |
146 | } | |
147 | ||
148 | static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry) | |
149 | { | |
150 | unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE; | |
151 | return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE; | |
152 | } | |
153 | ||
154 | static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq) | |
155 | { | |
156 | struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index); | |
157 | return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; | |
158 | } | |
159 | ||
160 | static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) | |
161 | { | |
162 | struct mlx4_eqe *eqe; | |
163 | int cqn; | |
164 | int eqes_found = 0; | |
165 | int set_ci = 0; | |
166 | ||
167 | while ((eqe = next_eqe_sw(eq))) { | |
168 | /* | |
169 | * Make sure we read EQ entry contents after we've | |
170 | * checked the ownership bit. | |
171 | */ | |
172 | rmb(); | |
173 | ||
174 | switch (eqe->type) { | |
175 | case MLX4_EVENT_TYPE_COMP: | |
176 | cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; | |
177 | mlx4_cq_completion(dev, cqn); | |
178 | break; | |
179 | ||
180 | case MLX4_EVENT_TYPE_PATH_MIG: | |
181 | case MLX4_EVENT_TYPE_COMM_EST: | |
182 | case MLX4_EVENT_TYPE_SQ_DRAINED: | |
183 | case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE: | |
184 | case MLX4_EVENT_TYPE_WQ_CATAS_ERROR: | |
185 | case MLX4_EVENT_TYPE_PATH_MIG_FAILED: | |
186 | case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | |
187 | case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR: | |
188 | mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, | |
189 | eqe->type); | |
190 | break; | |
191 | ||
192 | case MLX4_EVENT_TYPE_SRQ_LIMIT: | |
193 | case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: | |
194 | mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff, | |
195 | eqe->type); | |
196 | break; | |
197 | ||
198 | case MLX4_EVENT_TYPE_CMD: | |
199 | mlx4_cmd_event(dev, | |
200 | be16_to_cpu(eqe->event.cmd.token), | |
201 | eqe->event.cmd.status, | |
202 | be64_to_cpu(eqe->event.cmd.out_param)); | |
203 | break; | |
204 | ||
205 | case MLX4_EVENT_TYPE_PORT_CHANGE: | |
37608eea RD |
206 | mlx4_dispatch_event(dev, |
207 | eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_ACTIVE ? | |
208 | MLX4_DEV_EVENT_PORT_UP : | |
209 | MLX4_DEV_EVENT_PORT_DOWN, | |
225c7b1f RD |
210 | be32_to_cpu(eqe->event.port_change.port) >> 28); |
211 | break; | |
212 | ||
213 | case MLX4_EVENT_TYPE_CQ_ERROR: | |
214 | mlx4_warn(dev, "CQ %s on CQN %06x\n", | |
215 | eqe->event.cq_err.syndrome == 1 ? | |
216 | "overrun" : "access violation", | |
217 | be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); | |
218 | mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), | |
219 | eqe->type); | |
220 | break; | |
221 | ||
222 | case MLX4_EVENT_TYPE_EQ_OVERFLOW: | |
223 | mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn); | |
224 | break; | |
225 | ||
226 | case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: | |
227 | case MLX4_EVENT_TYPE_ECC_DETECT: | |
228 | default: | |
229 | mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", | |
230 | eqe->type, eqe->subtype, eq->eqn, eq->cons_index); | |
231 | break; | |
232 | }; | |
233 | ||
234 | ++eq->cons_index; | |
235 | eqes_found = 1; | |
236 | ++set_ci; | |
237 | ||
238 | /* | |
239 | * The HCA will think the queue has overflowed if we | |
240 | * don't tell it we've been processing events. We | |
241 | * create our EQs with MLX4_NUM_SPARE_EQE extra | |
242 | * entries, so we must update our consumer index at | |
243 | * least that often. | |
244 | */ | |
245 | if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) { | |
246 | /* | |
247 | * Conditional on hca_type is OK here because | |
248 | * this is a rare case, not the fast path. | |
249 | */ | |
250 | eq_set_ci(eq, 0); | |
251 | set_ci = 0; | |
252 | } | |
253 | } | |
254 | ||
255 | eq_set_ci(eq, 1); | |
256 | ||
257 | return eqes_found; | |
258 | } | |
259 | ||
260 | static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | |
261 | { | |
262 | struct mlx4_dev *dev = dev_ptr; | |
263 | struct mlx4_priv *priv = mlx4_priv(dev); | |
264 | int work = 0; | |
265 | int i; | |
266 | ||
267 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | |
268 | ||
ee49bd93 | 269 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
225c7b1f RD |
270 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); |
271 | ||
272 | return IRQ_RETVAL(work); | |
273 | } | |
274 | ||
275 | static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) | |
276 | { | |
277 | struct mlx4_eq *eq = eq_ptr; | |
278 | struct mlx4_dev *dev = eq->dev; | |
279 | ||
280 | mlx4_eq_int(dev, eq); | |
281 | ||
282 | /* MSI-X vectors always belong to us */ | |
283 | return IRQ_HANDLED; | |
284 | } | |
285 | ||
225c7b1f RD |
286 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, |
287 | int eq_num) | |
288 | { | |
289 | return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num, | |
290 | 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B); | |
291 | } | |
292 | ||
293 | static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
294 | int eq_num) | |
295 | { | |
296 | return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ, | |
297 | MLX4_CMD_TIME_CLASS_A); | |
298 | } | |
299 | ||
300 | static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |
301 | int eq_num) | |
302 | { | |
303 | return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ, | |
304 | MLX4_CMD_TIME_CLASS_A); | |
305 | } | |
306 | ||
3d73c288 | 307 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
225c7b1f RD |
308 | { |
309 | struct mlx4_priv *priv = mlx4_priv(dev); | |
310 | int index; | |
311 | ||
312 | index = eq->eqn / 4 - dev->caps.reserved_eqs / 4; | |
313 | ||
314 | if (!priv->eq_table.uar_map[index]) { | |
315 | priv->eq_table.uar_map[index] = | |
316 | ioremap(pci_resource_start(dev->pdev, 2) + | |
317 | ((eq->eqn / 4) << PAGE_SHIFT), | |
318 | PAGE_SIZE); | |
319 | if (!priv->eq_table.uar_map[index]) { | |
320 | mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", | |
321 | eq->eqn); | |
322 | return NULL; | |
323 | } | |
324 | } | |
325 | ||
326 | return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4); | |
327 | } | |
328 | ||
3d73c288 RD |
329 | static int mlx4_create_eq(struct mlx4_dev *dev, int nent, |
330 | u8 intr, struct mlx4_eq *eq) | |
225c7b1f RD |
331 | { |
332 | struct mlx4_priv *priv = mlx4_priv(dev); | |
333 | struct mlx4_cmd_mailbox *mailbox; | |
334 | struct mlx4_eq_context *eq_context; | |
335 | int npages; | |
336 | u64 *dma_list = NULL; | |
337 | dma_addr_t t; | |
338 | u64 mtt_addr; | |
339 | int err = -ENOMEM; | |
340 | int i; | |
341 | ||
342 | eq->dev = dev; | |
343 | eq->nent = roundup_pow_of_two(max(nent, 2)); | |
344 | npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE; | |
345 | ||
346 | eq->page_list = kmalloc(npages * sizeof *eq->page_list, | |
347 | GFP_KERNEL); | |
348 | if (!eq->page_list) | |
349 | goto err_out; | |
350 | ||
351 | for (i = 0; i < npages; ++i) | |
352 | eq->page_list[i].buf = NULL; | |
353 | ||
354 | dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); | |
355 | if (!dma_list) | |
356 | goto err_out_free; | |
357 | ||
358 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
359 | if (IS_ERR(mailbox)) | |
360 | goto err_out_free; | |
361 | eq_context = mailbox->buf; | |
362 | ||
363 | for (i = 0; i < npages; ++i) { | |
364 | eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev, | |
365 | PAGE_SIZE, &t, GFP_KERNEL); | |
366 | if (!eq->page_list[i].buf) | |
367 | goto err_out_free_pages; | |
368 | ||
369 | dma_list[i] = t; | |
370 | eq->page_list[i].map = t; | |
371 | ||
372 | memset(eq->page_list[i].buf, 0, PAGE_SIZE); | |
373 | } | |
374 | ||
375 | eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap); | |
376 | if (eq->eqn == -1) | |
377 | goto err_out_free_pages; | |
378 | ||
379 | eq->doorbell = mlx4_get_eq_uar(dev, eq); | |
380 | if (!eq->doorbell) { | |
381 | err = -ENOMEM; | |
382 | goto err_out_free_eq; | |
383 | } | |
384 | ||
385 | err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); | |
386 | if (err) | |
387 | goto err_out_free_eq; | |
388 | ||
389 | err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); | |
390 | if (err) | |
391 | goto err_out_free_mtt; | |
392 | ||
393 | memset(eq_context, 0, sizeof *eq_context); | |
394 | eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK | | |
395 | MLX4_EQ_STATE_ARMED); | |
396 | eq_context->log_eq_size = ilog2(eq->nent); | |
397 | eq_context->intr = intr; | |
398 | eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT; | |
399 | ||
400 | mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); | |
401 | eq_context->mtt_base_addr_h = mtt_addr >> 32; | |
402 | eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); | |
403 | ||
404 | err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn); | |
405 | if (err) { | |
406 | mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err); | |
407 | goto err_out_free_mtt; | |
408 | } | |
409 | ||
410 | kfree(dma_list); | |
411 | mlx4_free_cmd_mailbox(dev, mailbox); | |
412 | ||
413 | eq->cons_index = 0; | |
414 | ||
415 | return err; | |
416 | ||
417 | err_out_free_mtt: | |
418 | mlx4_mtt_cleanup(dev, &eq->mtt); | |
419 | ||
420 | err_out_free_eq: | |
421 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); | |
422 | ||
423 | err_out_free_pages: | |
424 | for (i = 0; i < npages; ++i) | |
425 | if (eq->page_list[i].buf) | |
426 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | |
427 | eq->page_list[i].buf, | |
428 | eq->page_list[i].map); | |
429 | ||
430 | mlx4_free_cmd_mailbox(dev, mailbox); | |
431 | ||
432 | err_out_free: | |
433 | kfree(eq->page_list); | |
434 | kfree(dma_list); | |
435 | ||
436 | err_out: | |
437 | return err; | |
438 | } | |
439 | ||
440 | static void mlx4_free_eq(struct mlx4_dev *dev, | |
441 | struct mlx4_eq *eq) | |
442 | { | |
443 | struct mlx4_priv *priv = mlx4_priv(dev); | |
444 | struct mlx4_cmd_mailbox *mailbox; | |
445 | int err; | |
446 | int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE; | |
447 | int i; | |
448 | ||
449 | mailbox = mlx4_alloc_cmd_mailbox(dev); | |
450 | if (IS_ERR(mailbox)) | |
451 | return; | |
452 | ||
453 | err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn); | |
454 | if (err) | |
455 | mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err); | |
456 | ||
457 | if (0) { | |
458 | mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn); | |
459 | for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) { | |
460 | if (i % 4 == 0) | |
461 | printk("[%02x] ", i * 4); | |
462 | printk(" %08x", be32_to_cpup(mailbox->buf + i * 4)); | |
463 | if ((i + 1) % 4 == 0) | |
464 | printk("\n"); | |
465 | } | |
466 | } | |
467 | ||
468 | mlx4_mtt_cleanup(dev, &eq->mtt); | |
469 | for (i = 0; i < npages; ++i) | |
470 | pci_free_consistent(dev->pdev, PAGE_SIZE, | |
471 | eq->page_list[i].buf, | |
472 | eq->page_list[i].map); | |
473 | ||
474 | kfree(eq->page_list); | |
475 | mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn); | |
476 | mlx4_free_cmd_mailbox(dev, mailbox); | |
477 | } | |
478 | ||
479 | static void mlx4_free_irqs(struct mlx4_dev *dev) | |
480 | { | |
481 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; | |
482 | int i; | |
483 | ||
484 | if (eq_table->have_irq) | |
485 | free_irq(dev->pdev->irq, dev); | |
ee49bd93 | 486 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
225c7b1f RD |
487 | if (eq_table->eq[i].have_irq) |
488 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | |
489 | } | |
490 | ||
3d73c288 | 491 | static int mlx4_map_clr_int(struct mlx4_dev *dev) |
225c7b1f RD |
492 | { |
493 | struct mlx4_priv *priv = mlx4_priv(dev); | |
494 | ||
495 | priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) + | |
496 | priv->fw.clr_int_base, MLX4_CLR_INT_SIZE); | |
497 | if (!priv->clr_base) { | |
498 | mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n"); | |
499 | return -ENOMEM; | |
500 | } | |
501 | ||
502 | return 0; | |
503 | } | |
504 | ||
505 | static void mlx4_unmap_clr_int(struct mlx4_dev *dev) | |
506 | { | |
507 | struct mlx4_priv *priv = mlx4_priv(dev); | |
508 | ||
509 | iounmap(priv->clr_base); | |
510 | } | |
511 | ||
3d73c288 | 512 | int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) |
225c7b1f RD |
513 | { |
514 | struct mlx4_priv *priv = mlx4_priv(dev); | |
515 | int ret; | |
516 | ||
517 | /* | |
518 | * We assume that mapping one page is enough for the whole EQ | |
519 | * context table. This is fine with all current HCAs, because | |
520 | * we only use 32 EQs and each EQ uses 64 bytes of context | |
521 | * memory, or 1 KB total. | |
522 | */ | |
523 | priv->eq_table.icm_virt = icm_virt; | |
524 | priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); | |
525 | if (!priv->eq_table.icm_page) | |
526 | return -ENOMEM; | |
527 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | |
528 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
529 | if (pci_dma_mapping_error(priv->eq_table.icm_dma)) { | |
530 | __free_page(priv->eq_table.icm_page); | |
531 | return -ENOMEM; | |
532 | } | |
533 | ||
534 | ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); | |
535 | if (ret) { | |
536 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | |
537 | PCI_DMA_BIDIRECTIONAL); | |
538 | __free_page(priv->eq_table.icm_page); | |
539 | } | |
540 | ||
541 | return ret; | |
542 | } | |
543 | ||
544 | void mlx4_unmap_eq_icm(struct mlx4_dev *dev) | |
545 | { | |
546 | struct mlx4_priv *priv = mlx4_priv(dev); | |
547 | ||
548 | mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); | |
549 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | |
550 | PCI_DMA_BIDIRECTIONAL); | |
551 | __free_page(priv->eq_table.icm_page); | |
552 | } | |
553 | ||
3d73c288 | 554 | int mlx4_init_eq_table(struct mlx4_dev *dev) |
225c7b1f RD |
555 | { |
556 | struct mlx4_priv *priv = mlx4_priv(dev); | |
557 | int err; | |
558 | int i; | |
559 | ||
560 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, | |
561 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs); | |
562 | if (err) | |
563 | return err; | |
564 | ||
565 | for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) | |
566 | priv->eq_table.uar_map[i] = NULL; | |
567 | ||
568 | err = mlx4_map_clr_int(dev); | |
569 | if (err) | |
570 | goto err_out_free; | |
571 | ||
572 | priv->eq_table.clr_mask = | |
573 | swab32(1 << (priv->eq_table.inta_pin & 31)); | |
574 | priv->eq_table.clr_int = priv->clr_base + | |
575 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | |
576 | ||
577 | err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, | |
578 | (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0, | |
579 | &priv->eq_table.eq[MLX4_EQ_COMP]); | |
580 | if (err) | |
581 | goto err_out_unmap; | |
582 | ||
583 | err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, | |
584 | (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0, | |
585 | &priv->eq_table.eq[MLX4_EQ_ASYNC]); | |
586 | if (err) | |
587 | goto err_out_comp; | |
588 | ||
589 | if (dev->flags & MLX4_FLAG_MSI_X) { | |
590 | static const char *eq_name[] = { | |
591 | [MLX4_EQ_COMP] = DRV_NAME " (comp)", | |
ee49bd93 | 592 | [MLX4_EQ_ASYNC] = DRV_NAME " (async)" |
225c7b1f RD |
593 | }; |
594 | ||
ee49bd93 | 595 | for (i = 0; i < MLX4_NUM_EQ; ++i) { |
225c7b1f RD |
596 | err = request_irq(priv->eq_table.eq[i].irq, |
597 | mlx4_msi_x_interrupt, | |
598 | 0, eq_name[i], priv->eq_table.eq + i); | |
599 | if (err) | |
ee49bd93 | 600 | goto err_out_async; |
225c7b1f RD |
601 | |
602 | priv->eq_table.eq[i].have_irq = 1; | |
603 | } | |
604 | ||
225c7b1f RD |
605 | } else { |
606 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | |
4093785d | 607 | IRQF_SHARED, DRV_NAME, dev); |
225c7b1f RD |
608 | if (err) |
609 | goto err_out_async; | |
610 | ||
611 | priv->eq_table.have_irq = 1; | |
612 | } | |
613 | ||
614 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | |
615 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | |
616 | if (err) | |
617 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | |
618 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); | |
619 | ||
ee49bd93 | 620 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
225c7b1f RD |
621 | eq_set_ci(&priv->eq_table.eq[i], 1); |
622 | ||
225c7b1f RD |
623 | return 0; |
624 | ||
225c7b1f RD |
625 | err_out_async: |
626 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); | |
627 | ||
628 | err_out_comp: | |
629 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]); | |
630 | ||
631 | err_out_unmap: | |
632 | mlx4_unmap_clr_int(dev); | |
633 | mlx4_free_irqs(dev); | |
634 | ||
635 | err_out_free: | |
636 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | |
637 | return err; | |
638 | } | |
639 | ||
640 | void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |
641 | { | |
642 | struct mlx4_priv *priv = mlx4_priv(dev); | |
643 | int i; | |
644 | ||
225c7b1f RD |
645 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, |
646 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | |
647 | ||
648 | mlx4_free_irqs(dev); | |
649 | ||
ee49bd93 | 650 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
225c7b1f | 651 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
225c7b1f RD |
652 | |
653 | mlx4_unmap_clr_int(dev); | |
654 | ||
655 | for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) | |
656 | if (priv->eq_table.uar_map[i]) | |
657 | iounmap(priv->eq_table.uar_map[i]); | |
658 | ||
659 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | |
660 | } |