qlcnic: dcb code cleanup and refactoring.
[deliverable/linux.git] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_minidump.c
CommitLineData
577ae39d
JK
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
4e60ac46 7
58634e74
SC
8#include "qlcnic.h"
9#include "qlcnic_hdr.h"
4e60ac46
SC
10#include "qlcnic_83xx_hw.h"
11#include "qlcnic_hw.h"
58634e74
SC
12
13#include <net/ip.h>
14
4e60ac46
SC
15#define QLC_83XX_MINIDUMP_FLASH 0x520000
16#define QLC_83XX_OCM_INDEX 3
17#define QLC_83XX_PCI_INDEX 0
9baf1aa9 18#define QLC_83XX_DMA_ENGINE_INDEX 8
4e60ac46
SC
19
20static const u32 qlcnic_ms_read_data[] = {
21 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
22};
23
58634e74
SC
24#define QLCNIC_DUMP_WCRB BIT_0
25#define QLCNIC_DUMP_RWCRB BIT_1
26#define QLCNIC_DUMP_ANDCRB BIT_2
27#define QLCNIC_DUMP_ORCRB BIT_3
28#define QLCNIC_DUMP_POLLCRB BIT_4
29#define QLCNIC_DUMP_RD_SAVE BIT_5
30#define QLCNIC_DUMP_WRT_SAVED BIT_6
31#define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
32#define QLCNIC_DUMP_SKIP BIT_7
33
34#define QLCNIC_DUMP_MASK_MAX 0xff
35
9baf1aa9
SS
36struct qlcnic_pex_dma_descriptor {
37 u32 read_data_size;
38 u32 dma_desc_cmd;
39 u32 src_addr_low;
40 u32 src_addr_high;
41 u32 dma_bus_addr_low;
42 u32 dma_bus_addr_high;
43 u32 rsvd[6];
44} __packed;
45
58634e74
SC
46struct qlcnic_common_entry_hdr {
47 u32 type;
48 u32 offset;
49 u32 cap_size;
50 u8 mask;
51 u8 rsvd[2];
52 u8 flags;
53} __packed;
54
55struct __crb {
56 u32 addr;
57 u8 stride;
58 u8 rsvd1[3];
59 u32 data_size;
60 u32 no_ops;
61 u32 rsvd2[4];
62} __packed;
63
64struct __ctrl {
65 u32 addr;
66 u8 stride;
67 u8 index_a;
68 u16 timeout;
69 u32 data_size;
70 u32 no_ops;
71 u8 opcode;
72 u8 index_v;
73 u8 shl_val;
74 u8 shr_val;
75 u32 val1;
76 u32 val2;
77 u32 val3;
78} __packed;
79
80struct __cache {
81 u32 addr;
82 u16 stride;
83 u16 init_tag_val;
84 u32 size;
85 u32 no_ops;
86 u32 ctrl_addr;
87 u32 ctrl_val;
88 u32 read_addr;
89 u8 read_addr_stride;
90 u8 read_addr_num;
91 u8 rsvd1[2];
92} __packed;
93
94struct __ocm {
95 u8 rsvd[8];
96 u32 size;
97 u32 no_ops;
98 u8 rsvd1[8];
99 u32 read_addr;
100 u32 read_addr_stride;
101} __packed;
102
103struct __mem {
9baf1aa9
SS
104 u32 desc_card_addr;
105 u32 dma_desc_cmd;
106 u32 start_dma_cmd;
107 u32 rsvd[3];
58634e74
SC
108 u32 addr;
109 u32 size;
110} __packed;
111
112struct __mux {
113 u32 addr;
114 u8 rsvd[4];
115 u32 size;
116 u32 no_ops;
117 u32 val;
118 u32 val_stride;
119 u32 read_addr;
120 u8 rsvd2[4];
121} __packed;
122
123struct __queue {
124 u32 sel_addr;
125 u16 stride;
126 u8 rsvd[2];
127 u32 size;
128 u32 no_ops;
129 u8 rsvd2[8];
130 u32 read_addr;
131 u8 read_addr_stride;
132 u8 read_addr_cnt;
133 u8 rsvd3[2];
134} __packed;
135
4e60ac46
SC
136struct __pollrd {
137 u32 sel_addr;
138 u32 read_addr;
139 u32 sel_val;
140 u16 sel_val_stride;
141 u16 no_ops;
142 u32 poll_wait;
143 u32 poll_mask;
144 u32 data_size;
145 u8 rsvd[4];
146} __packed;
147
148struct __mux2 {
149 u32 sel_addr1;
150 u32 sel_addr2;
151 u32 sel_val1;
152 u32 sel_val2;
153 u32 no_ops;
154 u32 sel_val_mask;
155 u32 read_addr;
156 u8 sel_val_stride;
157 u8 data_size;
158 u8 rsvd[2];
159} __packed;
160
161struct __pollrdmwr {
162 u32 addr1;
163 u32 addr2;
164 u32 val1;
165 u32 val2;
166 u32 poll_wait;
167 u32 poll_mask;
168 u32 mod_mask;
169 u32 data_size;
170} __packed;
171
58634e74
SC
172struct qlcnic_dump_entry {
173 struct qlcnic_common_entry_hdr hdr;
174 union {
4e60ac46
SC
175 struct __crb crb;
176 struct __cache cache;
177 struct __ocm ocm;
178 struct __mem mem;
179 struct __mux mux;
180 struct __queue que;
181 struct __ctrl ctrl;
182 struct __pollrdmwr pollrdmwr;
183 struct __mux2 mux2;
184 struct __pollrd pollrd;
58634e74
SC
185 } region;
186} __packed;
187
1e6b55ee 188enum qlcnic_minidump_opcode {
58634e74
SC
189 QLCNIC_DUMP_NOP = 0,
190 QLCNIC_DUMP_READ_CRB = 1,
191 QLCNIC_DUMP_READ_MUX = 2,
192 QLCNIC_DUMP_QUEUE = 3,
193 QLCNIC_DUMP_BRD_CONFIG = 4,
194 QLCNIC_DUMP_READ_OCM = 6,
195 QLCNIC_DUMP_PEG_REG = 7,
196 QLCNIC_DUMP_L1_DTAG = 8,
197 QLCNIC_DUMP_L1_ITAG = 9,
198 QLCNIC_DUMP_L1_DATA = 11,
199 QLCNIC_DUMP_L1_INST = 12,
200 QLCNIC_DUMP_L2_DTAG = 21,
201 QLCNIC_DUMP_L2_ITAG = 22,
202 QLCNIC_DUMP_L2_DATA = 23,
203 QLCNIC_DUMP_L2_INST = 24,
4e60ac46
SC
204 QLCNIC_DUMP_POLL_RD = 35,
205 QLCNIC_READ_MUX2 = 36,
206 QLCNIC_READ_POLLRDMWR = 37,
58634e74
SC
207 QLCNIC_DUMP_READ_ROM = 71,
208 QLCNIC_DUMP_READ_MEM = 72,
209 QLCNIC_DUMP_READ_CTRL = 98,
210 QLCNIC_DUMP_TLHDR = 99,
211 QLCNIC_DUMP_RDEND = 255
212};
213
214struct qlcnic_dump_operations {
1e6b55ee 215 enum qlcnic_minidump_opcode opcode;
58634e74
SC
216 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
217 __le32 *);
218};
219
58634e74
SC
220static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
221 struct qlcnic_dump_entry *entry, __le32 *buffer)
222{
223 int i;
224 u32 addr, data;
225 struct __crb *crb = &entry->region.crb;
58634e74
SC
226
227 addr = crb->addr;
228
229 for (i = 0; i < crb->no_ops; i++) {
4e60ac46 230 data = qlcnic_ind_rd(adapter, addr);
58634e74
SC
231 *buffer++ = cpu_to_le32(addr);
232 *buffer++ = cpu_to_le32(data);
233 addr += crb->stride;
234 }
235 return crb->no_ops * 2 * sizeof(u32);
236}
237
238static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
239 struct qlcnic_dump_entry *entry, __le32 *buffer)
240{
241 int i, k, timeout = 0;
58634e74 242 u32 addr, data;
5d17f36b 243 u8 no_ops;
58634e74
SC
244 struct __ctrl *ctr = &entry->region.ctrl;
245 struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
246
247 addr = ctr->addr;
248 no_ops = ctr->no_ops;
249
250 for (i = 0; i < no_ops; i++) {
251 k = 0;
58634e74
SC
252 for (k = 0; k < 8; k++) {
253 if (!(ctr->opcode & (1 << k)))
254 continue;
255 switch (1 << k) {
256 case QLCNIC_DUMP_WCRB:
4e60ac46 257 qlcnic_ind_wr(adapter, addr, ctr->val1);
58634e74
SC
258 break;
259 case QLCNIC_DUMP_RWCRB:
4e60ac46
SC
260 data = qlcnic_ind_rd(adapter, addr);
261 qlcnic_ind_wr(adapter, addr, data);
58634e74
SC
262 break;
263 case QLCNIC_DUMP_ANDCRB:
4e60ac46
SC
264 data = qlcnic_ind_rd(adapter, addr);
265 qlcnic_ind_wr(adapter, addr,
266 (data & ctr->val2));
58634e74
SC
267 break;
268 case QLCNIC_DUMP_ORCRB:
4e60ac46
SC
269 data = qlcnic_ind_rd(adapter, addr);
270 qlcnic_ind_wr(adapter, addr,
271 (data | ctr->val3));
58634e74
SC
272 break;
273 case QLCNIC_DUMP_POLLCRB:
274 while (timeout <= ctr->timeout) {
4e60ac46 275 data = qlcnic_ind_rd(adapter, addr);
58634e74
SC
276 if ((data & ctr->val2) == ctr->val1)
277 break;
4e60ac46 278 usleep_range(1000, 2000);
58634e74
SC
279 timeout++;
280 }
281 if (timeout > ctr->timeout) {
282 dev_info(&adapter->pdev->dev,
283 "Timed out, aborting poll CRB\n");
284 return -EINVAL;
285 }
286 break;
287 case QLCNIC_DUMP_RD_SAVE:
288 if (ctr->index_a)
289 addr = t_hdr->saved_state[ctr->index_a];
4e60ac46 290 data = qlcnic_ind_rd(adapter, addr);
58634e74
SC
291 t_hdr->saved_state[ctr->index_v] = data;
292 break;
293 case QLCNIC_DUMP_WRT_SAVED:
294 if (ctr->index_v)
295 data = t_hdr->saved_state[ctr->index_v];
296 else
297 data = ctr->val1;
298 if (ctr->index_a)
299 addr = t_hdr->saved_state[ctr->index_a];
4e60ac46 300 qlcnic_ind_wr(adapter, addr, data);
58634e74
SC
301 break;
302 case QLCNIC_DUMP_MOD_SAVE_ST:
303 data = t_hdr->saved_state[ctr->index_v];
304 data <<= ctr->shl_val;
305 data >>= ctr->shr_val;
306 if (ctr->val2)
307 data &= ctr->val2;
308 data |= ctr->val3;
309 data += ctr->val1;
310 t_hdr->saved_state[ctr->index_v] = data;
311 break;
312 default:
313 dev_info(&adapter->pdev->dev,
1e6b55ee 314 "Unknown opcode\n");
58634e74
SC
315 break;
316 }
317 }
318 addr += ctr->stride;
319 }
320 return 0;
321}
322
323static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
324 struct qlcnic_dump_entry *entry, __le32 *buffer)
325{
326 int loop;
327 u32 val, data = 0;
328 struct __mux *mux = &entry->region.mux;
58634e74
SC
329
330 val = mux->val;
331 for (loop = 0; loop < mux->no_ops; loop++) {
4e60ac46
SC
332 qlcnic_ind_wr(adapter, mux->addr, val);
333 data = qlcnic_ind_rd(adapter, mux->read_addr);
58634e74
SC
334 *buffer++ = cpu_to_le32(val);
335 *buffer++ = cpu_to_le32(data);
336 val += mux->val_stride;
337 }
338 return 2 * mux->no_ops * sizeof(u32);
339}
340
341static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
342 struct qlcnic_dump_entry *entry, __le32 *buffer)
343{
344 int i, loop;
345 u32 cnt, addr, data, que_id = 0;
58634e74
SC
346 struct __queue *que = &entry->region.que;
347
348 addr = que->read_addr;
349 cnt = que->read_addr_cnt;
350
351 for (loop = 0; loop < que->no_ops; loop++) {
4e60ac46 352 qlcnic_ind_wr(adapter, que->sel_addr, que_id);
58634e74
SC
353 addr = que->read_addr;
354 for (i = 0; i < cnt; i++) {
4e60ac46 355 data = qlcnic_ind_rd(adapter, addr);
58634e74
SC
356 *buffer++ = cpu_to_le32(data);
357 addr += que->read_addr_stride;
358 }
359 que_id += que->stride;
360 }
361 return que->no_ops * cnt * sizeof(u32);
362}
363
364static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
365 struct qlcnic_dump_entry *entry, __le32 *buffer)
366{
367 int i;
368 u32 data;
369 void __iomem *addr;
370 struct __ocm *ocm = &entry->region.ocm;
371
372 addr = adapter->ahw->pci_base0 + ocm->read_addr;
373 for (i = 0; i < ocm->no_ops; i++) {
374 data = readl(addr);
375 *buffer++ = cpu_to_le32(data);
376 addr += ocm->read_addr_stride;
377 }
378 return ocm->no_ops * sizeof(u32);
379}
380
381static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
382 struct qlcnic_dump_entry *entry, __le32 *buffer)
383{
384 int i, count = 0;
385 u32 fl_addr, size, val, lck_val, addr;
386 struct __mem *rom = &entry->region.mem;
58634e74
SC
387
388 fl_addr = rom->addr;
4e60ac46 389 size = rom->size / 4;
58634e74 390lock_try:
4e60ac46 391 lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
58634e74 392 if (!lck_val && count < MAX_CTL_CHECK) {
4e60ac46 393 usleep_range(10000, 11000);
58634e74
SC
394 count++;
395 goto lock_try;
396 }
4e60ac46
SC
397 QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
398 adapter->ahw->pci_func);
58634e74
SC
399 for (i = 0; i < size; i++) {
400 addr = fl_addr & 0xFFFF0000;
4e60ac46 401 qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
58634e74 402 addr = LSW(fl_addr) + FLASH_ROM_DATA;
4e60ac46 403 val = qlcnic_ind_rd(adapter, addr);
58634e74
SC
404 fl_addr += 4;
405 *buffer++ = cpu_to_le32(val);
406 }
4e60ac46 407 QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
58634e74
SC
408 return rom->size;
409}
410
411static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
412 struct qlcnic_dump_entry *entry, __le32 *buffer)
413{
414 int i;
415 u32 cnt, val, data, addr;
58634e74
SC
416 struct __cache *l1 = &entry->region.cache;
417
418 val = l1->init_tag_val;
419
420 for (i = 0; i < l1->no_ops; i++) {
4e60ac46
SC
421 qlcnic_ind_wr(adapter, l1->addr, val);
422 qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
58634e74
SC
423 addr = l1->read_addr;
424 cnt = l1->read_addr_num;
425 while (cnt) {
4e60ac46 426 data = qlcnic_ind_rd(adapter, addr);
58634e74
SC
427 *buffer++ = cpu_to_le32(data);
428 addr += l1->read_addr_stride;
429 cnt--;
430 }
431 val += l1->stride;
432 }
433 return l1->no_ops * l1->read_addr_num * sizeof(u32);
434}
435
436static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
437 struct qlcnic_dump_entry *entry, __le32 *buffer)
438{
439 int i;
440 u32 cnt, val, data, addr;
441 u8 poll_mask, poll_to, time_out = 0;
58634e74
SC
442 struct __cache *l2 = &entry->region.cache;
443
444 val = l2->init_tag_val;
445 poll_mask = LSB(MSW(l2->ctrl_val));
446 poll_to = MSB(MSW(l2->ctrl_val));
447
448 for (i = 0; i < l2->no_ops; i++) {
4e60ac46 449 qlcnic_ind_wr(adapter, l2->addr, val);
58634e74 450 if (LSW(l2->ctrl_val))
4e60ac46
SC
451 qlcnic_ind_wr(adapter, l2->ctrl_addr,
452 LSW(l2->ctrl_val));
58634e74
SC
453 if (!poll_mask)
454 goto skip_poll;
455 do {
4e60ac46 456 data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
58634e74
SC
457 if (!(data & poll_mask))
458 break;
4e60ac46 459 usleep_range(1000, 2000);
58634e74
SC
460 time_out++;
461 } while (time_out <= poll_to);
462
463 if (time_out > poll_to) {
464 dev_err(&adapter->pdev->dev,
465 "Timeout exceeded in %s, aborting dump\n",
466 __func__);
467 return -EINVAL;
468 }
469skip_poll:
470 addr = l2->read_addr;
471 cnt = l2->read_addr_num;
472 while (cnt) {
4e60ac46 473 data = qlcnic_ind_rd(adapter, addr);
58634e74
SC
474 *buffer++ = cpu_to_le32(data);
475 addr += l2->read_addr_stride;
476 cnt--;
477 }
478 val += l2->stride;
479 }
480 return l2->no_ops * l2->read_addr_num * sizeof(u32);
481}
482
9baf1aa9
SS
483static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
484 struct __mem *mem, __le32 *buffer,
485 int *ret)
58634e74 486{
9baf1aa9 487 u32 addr, data, test;
58634e74 488 int i, reg_read;
58634e74
SC
489
490 reg_read = mem->size;
491 addr = mem->addr;
492 /* check for data size of multiple of 16 and 16 byte alignment */
493 if ((addr & 0xf) || (reg_read%16)) {
494 dev_info(&adapter->pdev->dev,
1e6b55ee
SC
495 "Unaligned memory addr:0x%x size:0x%x\n",
496 addr, reg_read);
9baf1aa9
SS
497 *ret = -EINVAL;
498 return 0;
58634e74
SC
499 }
500
501 mutex_lock(&adapter->ahw->mem_lock);
502
503 while (reg_read != 0) {
4e60ac46
SC
504 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
505 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
506 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
58634e74
SC
507
508 for (i = 0; i < MAX_CTL_CHECK; i++) {
4e60ac46 509 test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
58634e74
SC
510 if (!(test & TA_CTL_BUSY))
511 break;
512 }
513 if (i == MAX_CTL_CHECK) {
514 if (printk_ratelimit()) {
515 dev_err(&adapter->pdev->dev,
516 "failed to read through agent\n");
9baf1aa9 517 *ret = -EIO;
58634e74
SC
518 goto out;
519 }
520 }
521 for (i = 0; i < 4; i++) {
4e60ac46 522 data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
58634e74
SC
523 *buffer++ = cpu_to_le32(data);
524 }
525 addr += 16;
526 reg_read -= 16;
527 ret += 16;
528 }
529out:
530 mutex_unlock(&adapter->ahw->mem_lock);
531 return mem->size;
532}
533
9baf1aa9
SS
534/* DMA register base address */
535#define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
536
537/* DMA register offsets w.r.t base address */
538#define QLC_DMA_CMD_BUFF_ADDR_LOW 0
539#define QLC_DMA_CMD_BUFF_ADDR_HI 4
540#define QLC_DMA_CMD_STATUS_CTRL 8
541
542#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
543
544static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
545 struct __mem *mem)
546{
547 struct qlcnic_dump_template_hdr *tmpl_hdr;
548 struct device *dev = &adapter->pdev->dev;
549 u32 dma_no, dma_base_addr, temp_addr;
550 int i, ret, dma_sts;
551
552 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
553 dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
554 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
555
556 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
557 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
558 mem->desc_card_addr);
559 if (ret)
560 return ret;
561
562 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
563 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
564 if (ret)
565 return ret;
566
567 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
568 ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
569 mem->start_dma_cmd);
570 if (ret)
571 return ret;
572
573 /* Wait for DMA to complete */
574 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
575 for (i = 0; i < 400; i++) {
576 dma_sts = qlcnic_ind_rd(adapter, temp_addr);
577
578 if (dma_sts & BIT_1)
579 usleep_range(250, 500);
580 else
581 break;
582 }
583
584 if (i >= 400) {
585 dev_info(dev, "PEX DMA operation timed out");
586 ret = -EIO;
587 }
588
589 return ret;
590}
591
592static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
593 struct __mem *mem,
594 __le32 *buffer, int *ret)
595{
596 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
597 u32 temp, dma_base_addr, size = 0, read_size = 0;
598 struct qlcnic_pex_dma_descriptor *dma_descr;
599 struct qlcnic_dump_template_hdr *tmpl_hdr;
600 struct device *dev = &adapter->pdev->dev;
601 dma_addr_t dma_phys_addr;
602 void *dma_buffer;
603
604 tmpl_hdr = fw_dump->tmpl_hdr;
605
606 /* Check if DMA engine is available */
607 temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
608 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
609 temp = qlcnic_ind_rd(adapter,
610 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
611
612 if (!(temp & BIT_31)) {
613 dev_info(dev, "%s: DMA engine is not available\n", __func__);
614 *ret = -EIO;
615 return 0;
616 }
617
618 /* Create DMA descriptor */
619 dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
620 GFP_KERNEL);
621 if (!dma_descr) {
622 *ret = -ENOMEM;
623 return 0;
624 }
625
626 /* dma_desc_cmd 0:15 = 0
627 * dma_desc_cmd 16:19 = mem->dma_desc_cmd 0:3
628 * dma_desc_cmd 20:23 = pci function number
629 * dma_desc_cmd 24:31 = mem->dma_desc_cmd 8:15
630 */
631 dma_phys_addr = fw_dump->phys_addr;
632 dma_buffer = fw_dump->dma_buffer;
633 temp = 0;
634 temp = mem->dma_desc_cmd & 0xff0f;
635 temp |= (adapter->ahw->pci_func & 0xf) << 4;
636 dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
637 dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
638 dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
639 dma_descr->src_addr_high = 0;
640
641 /* Collect memory dump using multiple DMA operations if required */
642 while (read_size < mem->size) {
643 if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
644 size = QLC_PEX_DMA_READ_SIZE;
645 else
646 size = mem->size - read_size;
647
648 dma_descr->src_addr_low = mem->addr + read_size;
649 dma_descr->read_data_size = size;
650
651 /* Write DMA descriptor to MS memory*/
652 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
653 *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
654 (u32 *)dma_descr, temp);
655 if (*ret) {
656 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
657 mem->desc_card_addr);
658 goto free_dma_descr;
659 }
660
661 *ret = qlcnic_start_pex_dma(adapter, mem);
662 if (*ret) {
663 dev_info(dev, "Failed to start PEX DMA operation\n");
664 goto free_dma_descr;
665 }
666
667 memcpy(buffer, dma_buffer, size);
668 buffer += size / 4;
669 read_size += size;
670 }
671
672free_dma_descr:
673 kfree(dma_descr);
674
675 return read_size;
676}
677
678static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
679 struct qlcnic_dump_entry *entry, __le32 *buffer)
680{
681 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
682 struct device *dev = &adapter->pdev->dev;
683 struct __mem *mem = &entry->region.mem;
684 u32 data_size;
685 int ret = 0;
686
687 if (fw_dump->use_pex_dma) {
688 data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
689 &ret);
690 if (ret)
691 dev_info(dev,
692 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
693 entry->hdr.mask);
694 else
695 return data_size;
696 }
697
698 data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
699 if (ret) {
700 dev_info(dev,
701 "Failed to read memory dump using test agent method: mask[0x%x]\n",
702 entry->hdr.mask);
703 return 0;
704 } else {
705 return data_size;
706 }
707}
708
58634e74
SC
709static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
710 struct qlcnic_dump_entry *entry, __le32 *buffer)
711{
712 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
713 return 0;
714}
715
4e60ac46
SC
716static int qlcnic_valid_dump_entry(struct device *dev,
717 struct qlcnic_dump_entry *entry, u32 size)
58634e74
SC
718{
719 int ret = 1;
720 if (size != entry->hdr.cap_size) {
4e60ac46
SC
721 dev_err(dev,
722 "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
723 entry->hdr.type, entry->hdr.mask, size,
724 entry->hdr.cap_size);
58634e74
SC
725 ret = 0;
726 }
727 return ret;
728}
729
4e60ac46
SC
730static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
731 struct qlcnic_dump_entry *entry,
732 __le32 *buffer)
733{
734 struct __pollrdmwr *poll = &entry->region.pollrdmwr;
735 u32 data, wait_count, poll_wait, temp;
736
737 poll_wait = poll->poll_wait;
738
739 qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
740 wait_count = 0;
741
742 while (wait_count < poll_wait) {
743 data = qlcnic_ind_rd(adapter, poll->addr1);
744 if ((data & poll->poll_mask) != 0)
745 break;
746 wait_count++;
747 }
748
749 if (wait_count == poll_wait) {
750 dev_err(&adapter->pdev->dev,
751 "Timeout exceeded in %s, aborting dump\n",
752 __func__);
753 return 0;
754 }
755
756 data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
757 qlcnic_ind_wr(adapter, poll->addr2, data);
758 qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
759 wait_count = 0;
760
761 while (wait_count < poll_wait) {
762 temp = qlcnic_ind_rd(adapter, poll->addr1);
763 if ((temp & poll->poll_mask) != 0)
764 break;
765 wait_count++;
766 }
767
768 *buffer++ = cpu_to_le32(poll->addr2);
769 *buffer++ = cpu_to_le32(data);
770
771 return 2 * sizeof(u32);
772
773}
774
775static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
776 struct qlcnic_dump_entry *entry, __le32 *buffer)
777{
778 struct __pollrd *pollrd = &entry->region.pollrd;
779 u32 data, wait_count, poll_wait, sel_val;
780 int i;
781
782 poll_wait = pollrd->poll_wait;
783 sel_val = pollrd->sel_val;
784
785 for (i = 0; i < pollrd->no_ops; i++) {
786 qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
787 wait_count = 0;
788 while (wait_count < poll_wait) {
789 data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
790 if ((data & pollrd->poll_mask) != 0)
791 break;
792 wait_count++;
793 }
794
795 if (wait_count == poll_wait) {
796 dev_err(&adapter->pdev->dev,
797 "Timeout exceeded in %s, aborting dump\n",
798 __func__);
799 return 0;
800 }
801
802 data = qlcnic_ind_rd(adapter, pollrd->read_addr);
803 *buffer++ = cpu_to_le32(sel_val);
804 *buffer++ = cpu_to_le32(data);
805 sel_val += pollrd->sel_val_stride;
806 }
807 return pollrd->no_ops * (2 * sizeof(u32));
808}
809
810static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
811 struct qlcnic_dump_entry *entry, __le32 *buffer)
812{
813 struct __mux2 *mux2 = &entry->region.mux2;
814 u32 data;
815 u32 t_sel_val, sel_val1, sel_val2;
816 int i;
817
818 sel_val1 = mux2->sel_val1;
819 sel_val2 = mux2->sel_val2;
820
821 for (i = 0; i < mux2->no_ops; i++) {
822 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
823 t_sel_val = sel_val1 & mux2->sel_val_mask;
824 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
825 data = qlcnic_ind_rd(adapter, mux2->read_addr);
826 *buffer++ = cpu_to_le32(t_sel_val);
827 *buffer++ = cpu_to_le32(data);
828 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
829 t_sel_val = sel_val2 & mux2->sel_val_mask;
830 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
831 data = qlcnic_ind_rd(adapter, mux2->read_addr);
832 *buffer++ = cpu_to_le32(t_sel_val);
833 *buffer++ = cpu_to_le32(data);
834 sel_val1 += mux2->sel_val_stride;
835 sel_val2 += mux2->sel_val_stride;
836 }
837
838 return mux2->no_ops * (4 * sizeof(u32));
839}
840
841static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
842 struct qlcnic_dump_entry *entry, __le32 *buffer)
843{
844 u32 fl_addr, size;
845 struct __mem *rom = &entry->region.mem;
846
847 fl_addr = rom->addr;
848 size = rom->size / 4;
849
850 if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
851 (u8 *)buffer, size))
852 return rom->size;
853
854 return 0;
855}
856
857static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
858 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
859 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
860 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
861 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
862 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
863 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
864 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
865 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
866 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
867 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
868 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
869 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
870 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
871 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
872 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
873 {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
874 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
875 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
876 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
877 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
878};
879
880static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
881 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
882 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
883 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
884 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
885 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
886 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
887 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
888 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
889 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
890 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
891 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
892 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
893 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
894 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
895 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
896 {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
897 {QLCNIC_READ_MUX2, qlcnic_read_mux2},
898 {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
899 {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
900 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
901 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
902 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
903 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
904};
905
906static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
907{
908 uint64_t sum = 0;
909 int count = temp_size / sizeof(uint32_t);
910 while (count-- > 0)
911 sum += *temp_buffer++;
912 while (sum >> 32)
913 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
914 return ~sum;
915}
916
917static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
918 u8 *buffer, u32 size)
919{
920 int ret = 0;
921
922 if (qlcnic_82xx_check(adapter))
923 return -EIO;
924
925 if (qlcnic_83xx_lock_flash(adapter))
926 return -EIO;
927
928 ret = qlcnic_83xx_lockless_flash_read32(adapter,
929 QLC_83XX_MINIDUMP_FLASH,
930 buffer, size / sizeof(u32));
931
932 qlcnic_83xx_unlock_flash(adapter);
933
934 return ret;
935}
936
937static int
938qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
939 struct qlcnic_cmd_args *cmd)
940{
941 struct qlcnic_dump_template_hdr tmp_hdr;
942 u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
943 int ret = 0;
944
945 if (qlcnic_82xx_check(adapter))
946 return -EIO;
947
948 if (qlcnic_83xx_lock_flash(adapter))
949 return -EIO;
950
951 ret = qlcnic_83xx_lockless_flash_read32(adapter,
952 QLC_83XX_MINIDUMP_FLASH,
953 (u8 *)&tmp_hdr, size);
954
955 qlcnic_83xx_unlock_flash(adapter);
956
957 cmd->rsp.arg[2] = tmp_hdr.size;
958 cmd->rsp.arg[3] = tmp_hdr.version;
959
960 return ret;
961}
962
963static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
964 u32 *version, u32 *temp_size,
965 u8 *use_flash_temp)
966{
967 int err = 0;
968 struct qlcnic_cmd_args cmd;
969
970 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
971 return -ENOMEM;
972
973 err = qlcnic_issue_cmd(adapter, &cmd);
974 if (err != QLCNIC_RCODE_SUCCESS) {
975 if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
976 qlcnic_free_mbx_args(&cmd);
977 return -EIO;
978 }
979 *use_flash_temp = 1;
980 }
981
982 *temp_size = cmd.rsp.arg[2];
983 *version = cmd.rsp.arg[3];
984 qlcnic_free_mbx_args(&cmd);
985
986 if (!(*temp_size))
987 return -EIO;
988
989 return 0;
990}
991
992static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
993 u32 *buffer, u32 temp_size)
994{
995 int err = 0, i;
996 void *tmp_addr;
997 __le32 *tmp_buf;
998 struct qlcnic_cmd_args cmd;
999 dma_addr_t tmp_addr_t = 0;
1000
1001 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
1002 &tmp_addr_t, GFP_KERNEL);
d0320f75 1003 if (!tmp_addr)
4e60ac46 1004 return -ENOMEM;
4e60ac46
SC
1005
1006 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
1007 err = -ENOMEM;
1008 goto free_mem;
1009 }
1010
1011 cmd.req.arg[1] = LSD(tmp_addr_t);
1012 cmd.req.arg[2] = MSD(tmp_addr_t);
1013 cmd.req.arg[3] = temp_size;
1014 err = qlcnic_issue_cmd(adapter, &cmd);
1015
1016 tmp_buf = tmp_addr;
1017 if (err == QLCNIC_RCODE_SUCCESS) {
1018 for (i = 0; i < temp_size / sizeof(u32); i++)
1019 *buffer++ = __le32_to_cpu(*tmp_buf++);
1020 }
1021
1022 qlcnic_free_mbx_args(&cmd);
1023
1024free_mem:
1025 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
1026
1027 return err;
1028}
1029
1030int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1031{
1032 int err;
1033 u32 temp_size = 0;
1034 u32 version, csum, *tmp_buf;
1035 struct qlcnic_hardware_context *ahw;
1036 struct qlcnic_dump_template_hdr *tmpl_hdr;
1037 u8 use_flash_temp = 0;
1038
1039 ahw = adapter->ahw;
1040
1041 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1042 &use_flash_temp);
1043 if (err) {
1044 dev_err(&adapter->pdev->dev,
1045 "Can't get template size %d\n", err);
1046 return -EIO;
1047 }
1048
1049 ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
1050 if (!ahw->fw_dump.tmpl_hdr)
1051 return -ENOMEM;
1052
1053 tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
1054 if (use_flash_temp)
1055 goto flash_temp;
1056
1057 err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
1058
1059 if (err) {
1060flash_temp:
1061 err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
1062 temp_size);
1063
1064 if (err) {
1065 dev_err(&adapter->pdev->dev,
1066 "Failed to get minidump template header %d\n",
1067 err);
1068 vfree(ahw->fw_dump.tmpl_hdr);
1069 ahw->fw_dump.tmpl_hdr = NULL;
1070 return -EIO;
1071 }
1072 }
1073
1074 csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
1075
1076 if (csum) {
1077 dev_err(&adapter->pdev->dev,
1078 "Template header checksum validation failed\n");
1079 vfree(ahw->fw_dump.tmpl_hdr);
1080 ahw->fw_dump.tmpl_hdr = NULL;
1081 return -EIO;
1082 }
1083
1084 tmpl_hdr = ahw->fw_dump.tmpl_hdr;
7010bb65
SS
1085 tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
1086 dev_info(&adapter->pdev->dev,
1087 "Default minidump capture mask 0x%x\n",
1088 tmpl_hdr->cap_mask);
9baf1aa9 1089
744b66dd 1090 if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
9baf1aa9
SS
1091 ahw->fw_dump.use_pex_dma = true;
1092 else
1093 ahw->fw_dump.use_pex_dma = false;
1094
890b6e02 1095 qlcnic_enable_fw_dump_state(adapter);
4e60ac46
SC
1096
1097 return 0;
1098}
1099
58634e74
SC
1100int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1101{
1102 __le32 *buffer;
4e60ac46 1103 u32 ocm_window;
58634e74
SC
1104 char mesg[64];
1105 char *msg[] = {mesg, NULL};
1106 int i, k, ops_cnt, ops_index, dump_size = 0;
1107 u32 entry_offset, dump, no_entries, buf_offset = 0;
1108 struct qlcnic_dump_entry *entry;
1109 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1110 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
4e60ac46 1111 static const struct qlcnic_dump_operations *fw_dump_ops;
9baf1aa9 1112 struct device *dev = &adapter->pdev->dev;
4e60ac46 1113 struct qlcnic_hardware_context *ahw;
9baf1aa9 1114 void *temp_buffer;
4e60ac46
SC
1115
1116 ahw = adapter->ahw;
1117
890b6e02
SS
1118 /* Return if we don't have firmware dump template header */
1119 if (!tmpl_hdr)
1120 return -EIO;
1121
1122 if (!qlcnic_check_fw_dump_state(adapter)) {
4e60ac46
SC
1123 dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1124 return -EIO;
1125 }
58634e74
SC
1126
1127 if (fw_dump->clr) {
1128 dev_info(&adapter->pdev->dev,
1e6b55ee 1129 "Previous dump not cleared, not capturing dump\n");
58634e74
SC
1130 return -EIO;
1131 }
4e60ac46
SC
1132
1133 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
58634e74
SC
1134 /* Calculate the size for dump data area only */
1135 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1136 if (i & tmpl_hdr->drv_cap_mask)
1137 dump_size += tmpl_hdr->cap_sizes[k];
1138 if (!dump_size)
1139 return -EIO;
1140
1141 fw_dump->data = vzalloc(dump_size);
b2adaca9 1142 if (!fw_dump->data)
58634e74 1143 return -ENOMEM;
b2adaca9 1144
58634e74
SC
1145 buffer = fw_dump->data;
1146 fw_dump->size = dump_size;
1147 no_entries = tmpl_hdr->num_entries;
58634e74
SC
1148 entry_offset = tmpl_hdr->offset;
1149 tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
1150 tmpl_hdr->sys_info[1] = adapter->fw_version;
1151
9baf1aa9
SS
1152 if (fw_dump->use_pex_dma) {
1153 temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1154 &fw_dump->phys_addr,
1155 GFP_KERNEL);
1156 if (!temp_buffer)
1157 fw_dump->use_pex_dma = false;
1158 else
1159 fw_dump->dma_buffer = temp_buffer;
1160 }
1161
4e60ac46
SC
1162 if (qlcnic_82xx_check(adapter)) {
1163 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1164 fw_dump_ops = qlcnic_fw_dump_ops;
1165 } else {
1166 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1167 fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1168 ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
1169 tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1170 tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1171 }
1172
58634e74
SC
1173 for (i = 0; i < no_entries; i++) {
1174 entry = (void *)tmpl_hdr + entry_offset;
1175 if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
1176 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1177 entry_offset += entry->hdr.offset;
1178 continue;
1179 }
4e60ac46 1180
58634e74
SC
1181 /* Find the handler for this entry */
1182 ops_index = 0;
1183 while (ops_index < ops_cnt) {
1184 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1185 break;
1186 ops_index++;
1187 }
4e60ac46 1188
58634e74
SC
1189 if (ops_index == ops_cnt) {
1190 dev_info(&adapter->pdev->dev,
1e6b55ee
SC
1191 "Invalid entry type %d, exiting dump\n",
1192 entry->hdr.type);
58634e74
SC
1193 goto error;
1194 }
4e60ac46 1195
58634e74
SC
1196 /* Collect dump for this entry */
1197 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
4e60ac46 1198 if (!qlcnic_valid_dump_entry(&adapter->pdev->dev, entry, dump))
58634e74
SC
1199 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1200 buf_offset += entry->hdr.cap_size;
1201 entry_offset += entry->hdr.offset;
1202 buffer = fw_dump->data + buf_offset;
1203 }
1204 if (dump_size != buf_offset) {
1205 dev_info(&adapter->pdev->dev,
1e6b55ee
SC
1206 "Captured(%d) and expected size(%d) do not match\n",
1207 buf_offset, dump_size);
58634e74
SC
1208 goto error;
1209 } else {
1210 fw_dump->clr = 1;
1211 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s",
1e6b55ee 1212 adapter->netdev->name);
4e60ac46
SC
1213 dev_info(&adapter->pdev->dev, "%s: Dump data, %d bytes captured\n",
1214 adapter->netdev->name, fw_dump->size);
58634e74
SC
1215 /* Send a udev event to notify availability of FW dump */
1216 kobject_uevent_env(&adapter->pdev->dev.kobj, KOBJ_CHANGE, msg);
1217 return 0;
1218 }
1219error:
9baf1aa9
SS
1220 if (fw_dump->use_pex_dma)
1221 dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
1222 fw_dump->dma_buffer, fw_dump->phys_addr);
58634e74
SC
1223 vfree(fw_dump->data);
1224 return -EINVAL;
1225}
4e60ac46
SC
1226
1227void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1228{
1229 u32 prev_version, current_version;
1230 struct qlcnic_hardware_context *ahw = adapter->ahw;
1231 struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
1232 struct pci_dev *pdev = adapter->pdev;
1233
1234 prev_version = adapter->fw_version;
1235 current_version = qlcnic_83xx_get_fw_version(adapter);
1236
1237 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1238 if (fw_dump->tmpl_hdr)
1239 vfree(fw_dump->tmpl_hdr);
1240 if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
1241 dev_info(&pdev->dev, "Supports FW dump capability\n");
1242 }
1243}
This page took 0.19337 seconds and 5 git commands to generate.