85587de105a7c1041e0067086343f1acf8a319dd
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_mr.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include <linux/delay.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/vmalloc.h>
12 #include <scsi/scsi_tcq.h>
13 #include <linux/utsname.h>
14
15
16 /* QLAFX00 specific Mailbox implementation functions */
17
18 /*
19 * qlafx00_mailbox_command
20 * Issue mailbox command and waits for completion.
21 *
22 * Input:
23 * ha = adapter block pointer.
24 * mcp = driver internal mbx struct pointer.
25 *
26 * Output:
27 * mb[MAX_MAILBOX_REGISTER_COUNT] = returned mailbox data.
28 *
29 * Returns:
30 * 0 : QLA_SUCCESS = cmd performed success
31 * 1 : QLA_FUNCTION_FAILED (error encountered)
32 * 6 : QLA_FUNCTION_TIMEOUT (timeout condition encountered)
33 *
34 * Context:
35 * Kernel context.
36 */
37 static int
38 qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp)
39
40 {
41 int rval;
42 unsigned long flags = 0;
43 device_reg_t __iomem *reg;
44 uint8_t abort_active;
45 uint8_t io_lock_on;
46 uint16_t command = 0;
47 uint32_t *iptr;
48 uint32_t __iomem *optr;
49 uint32_t cnt;
50 uint32_t mboxes;
51 unsigned long wait_time;
52 struct qla_hw_data *ha = vha->hw;
53 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
54
55 if (ha->pdev->error_state > pci_channel_io_frozen) {
56 ql_log(ql_log_warn, vha, 0x115c,
57 "error_state is greater than pci_channel_io_frozen, "
58 "exiting.\n");
59 return QLA_FUNCTION_TIMEOUT;
60 }
61
62 if (vha->device_flags & DFLG_DEV_FAILED) {
63 ql_log(ql_log_warn, vha, 0x115f,
64 "Device in failed state, exiting.\n");
65 return QLA_FUNCTION_TIMEOUT;
66 }
67
68 reg = ha->iobase;
69 io_lock_on = base_vha->flags.init_done;
70
71 rval = QLA_SUCCESS;
72 abort_active = test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
73
74 if (ha->flags.pci_channel_io_perm_failure) {
75 ql_log(ql_log_warn, vha, 0x1175,
76 "Perm failure on EEH timeout MBX, exiting.\n");
77 return QLA_FUNCTION_TIMEOUT;
78 }
79
80 if (ha->flags.isp82xx_fw_hung) {
81 /* Setting Link-Down error */
82 mcp->mb[0] = MBS_LINK_DOWN_ERROR;
83 ql_log(ql_log_warn, vha, 0x1176,
84 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
85 rval = QLA_FUNCTION_FAILED;
86 goto premature_exit;
87 }
88
89 /*
90 * Wait for active mailbox commands to finish by waiting at most tov
91 * seconds. This is to serialize actual issuing of mailbox cmds during
92 * non ISP abort time.
93 */
94 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
95 /* Timeout occurred. Return error. */
96 ql_log(ql_log_warn, vha, 0x1177,
97 "Cmd access timeout, cmd=0x%x, Exiting.\n",
98 mcp->mb[0]);
99 return QLA_FUNCTION_TIMEOUT;
100 }
101
102 ha->flags.mbox_busy = 1;
103 /* Save mailbox command for debug */
104 ha->mcp32 = mcp;
105
106 ql_dbg(ql_dbg_mbx, vha, 0x1178,
107 "Prepare to issue mbox cmd=0x%x.\n", mcp->mb[0]);
108
109 spin_lock_irqsave(&ha->hardware_lock, flags);
110
111 /* Load mailbox registers. */
112 optr = (uint32_t __iomem *)&reg->ispfx00.mailbox0;
113
114 iptr = mcp->mb;
115 command = mcp->mb[0];
116 mboxes = mcp->out_mb;
117
118 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
119 if (mboxes & BIT_0)
120 WRT_REG_DWORD(optr, *iptr);
121
122 mboxes >>= 1;
123 optr++;
124 iptr++;
125 }
126
127 /* Issue set host interrupt command to send cmd out. */
128 ha->flags.mbox_int = 0;
129 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
130
131 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1172,
132 (uint8_t *)mcp->mb, 16);
133 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1173,
134 ((uint8_t *)mcp->mb + 0x10), 16);
135 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1174,
136 ((uint8_t *)mcp->mb + 0x20), 8);
137
138 /* Unlock mbx registers and wait for interrupt */
139 ql_dbg(ql_dbg_mbx, vha, 0x1179,
140 "Going to unlock irq & waiting for interrupts. "
141 "jiffies=%lx.\n", jiffies);
142
143 /* Wait for mbx cmd completion until timeout */
144 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
145 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
146
147 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
148 spin_unlock_irqrestore(&ha->hardware_lock, flags);
149
150 wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
151 } else {
152 ql_dbg(ql_dbg_mbx, vha, 0x112c,
153 "Cmd=%x Polling Mode.\n", command);
154
155 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
156 spin_unlock_irqrestore(&ha->hardware_lock, flags);
157
158 wait_time = jiffies + mcp->tov * HZ; /* wait at most tov secs */
159 while (!ha->flags.mbox_int) {
160 if (time_after(jiffies, wait_time))
161 break;
162
163 /* Check for pending interrupts. */
164 qla2x00_poll(ha->rsp_q_map[0]);
165
166 if (!ha->flags.mbox_int &&
167 !(IS_QLA2200(ha) &&
168 command == MBC_LOAD_RISC_RAM_EXTENDED))
169 usleep_range(10000, 11000);
170 } /* while */
171 ql_dbg(ql_dbg_mbx, vha, 0x112d,
172 "Waited %d sec.\n",
173 (uint)((jiffies - (wait_time - (mcp->tov * HZ)))/HZ));
174 }
175
176 /* Check whether we timed out */
177 if (ha->flags.mbox_int) {
178 uint32_t *iptr2;
179
180 ql_dbg(ql_dbg_mbx, vha, 0x112e,
181 "Cmd=%x completed.\n", command);
182
183 /* Got interrupt. Clear the flag. */
184 ha->flags.mbox_int = 0;
185 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
186
187 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
188 rval = QLA_FUNCTION_FAILED;
189
190 /* Load return mailbox registers. */
191 iptr2 = mcp->mb;
192 iptr = (uint32_t *)&ha->mailbox_out32[0];
193 mboxes = mcp->in_mb;
194 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
195 if (mboxes & BIT_0)
196 *iptr2 = *iptr;
197
198 mboxes >>= 1;
199 iptr2++;
200 iptr++;
201 }
202 } else {
203
204 rval = QLA_FUNCTION_TIMEOUT;
205 }
206
207 ha->flags.mbox_busy = 0;
208
209 /* Clean up */
210 ha->mcp32 = NULL;
211
212 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
213 ql_dbg(ql_dbg_mbx, vha, 0x113a,
214 "checking for additional resp interrupt.\n");
215
216 /* polling mode for non isp_abort commands. */
217 qla2x00_poll(ha->rsp_q_map[0]);
218 }
219
220 if (rval == QLA_FUNCTION_TIMEOUT &&
221 mcp->mb[0] != MBC_GEN_SYSTEM_ERROR) {
222 if (!io_lock_on || (mcp->flags & IOCTL_CMD) ||
223 ha->flags.eeh_busy) {
224 /* not in dpc. schedule it for dpc to take over. */
225 ql_dbg(ql_dbg_mbx, vha, 0x115d,
226 "Timeout, schedule isp_abort_needed.\n");
227
228 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
229 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
230 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
231
232 ql_log(ql_log_info, base_vha, 0x115e,
233 "Mailbox cmd timeout occurred, cmd=0x%x, "
234 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
235 "abort.\n", command, mcp->mb[0],
236 ha->flags.eeh_busy);
237 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
238 qla2xxx_wake_dpc(vha);
239 }
240 } else if (!abort_active) {
241 /* call abort directly since we are in the DPC thread */
242 ql_dbg(ql_dbg_mbx, vha, 0x1160,
243 "Timeout, calling abort_isp.\n");
244
245 if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
246 !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
247 !test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
248
249 ql_log(ql_log_info, base_vha, 0x1161,
250 "Mailbox cmd timeout occurred, cmd=0x%x, "
251 "mb[0]=0x%x. Scheduling ISP abort ",
252 command, mcp->mb[0]);
253
254 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
255 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
256 if (ha->isp_ops->abort_isp(vha)) {
257 /* Failed. retry later. */
258 set_bit(ISP_ABORT_NEEDED,
259 &vha->dpc_flags);
260 }
261 clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
262 ql_dbg(ql_dbg_mbx, vha, 0x1162,
263 "Finished abort_isp.\n");
264 }
265 }
266 }
267
268 premature_exit:
269 /* Allow next mbx cmd to come in. */
270 complete(&ha->mbx_cmd_comp);
271
272 if (rval) {
273 ql_log(ql_log_warn, base_vha, 0x1163,
274 "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, "
275 "mb[3]=%x, cmd=%x ****.\n",
276 mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
277 } else {
278 ql_dbg(ql_dbg_mbx, base_vha, 0x1164, "Done %s.\n", __func__);
279 }
280
281 return rval;
282 }
283
284 /*
285 * qlafx00_driver_shutdown
286 * Indicate a driver shutdown to firmware.
287 *
288 * Input:
289 * ha = adapter block pointer.
290 *
291 * Returns:
292 * local function return status code.
293 *
294 * Context:
295 * Kernel context.
296 */
297 int
298 qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
299 {
300 int rval;
301 struct mbx_cmd_32 mc;
302 struct mbx_cmd_32 *mcp = &mc;
303
304 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1166,
305 "Entered %s.\n", __func__);
306
307 mcp->mb[0] = MBC_MR_DRV_SHUTDOWN;
308 mcp->out_mb = MBX_0;
309 mcp->in_mb = MBX_0;
310 if (tmo)
311 mcp->tov = tmo;
312 else
313 mcp->tov = MBX_TOV_SECONDS;
314 mcp->flags = 0;
315 rval = qlafx00_mailbox_command(vha, mcp);
316
317 if (rval != QLA_SUCCESS) {
318 ql_dbg(ql_dbg_mbx, vha, 0x1167,
319 "Failed=%x.\n", rval);
320 } else {
321 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1168,
322 "Done %s.\n", __func__);
323 }
324
325 return rval;
326 }
327
328 /*
329 * qlafx00_get_firmware_state
330 * Get adapter firmware state.
331 *
332 * Input:
333 * ha = adapter block pointer.
334 * TARGET_QUEUE_LOCK must be released.
335 * ADAPTER_STATE_LOCK must be released.
336 *
337 * Returns:
338 * qla7xxx local function return status code.
339 *
340 * Context:
341 * Kernel context.
342 */
343 static int
344 qlafx00_get_firmware_state(scsi_qla_host_t *vha, uint32_t *states)
345 {
346 int rval;
347 struct mbx_cmd_32 mc;
348 struct mbx_cmd_32 *mcp = &mc;
349
350 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1169,
351 "Entered %s.\n", __func__);
352
353 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
354 mcp->out_mb = MBX_0;
355 mcp->in_mb = MBX_1|MBX_0;
356 mcp->tov = MBX_TOV_SECONDS;
357 mcp->flags = 0;
358 rval = qlafx00_mailbox_command(vha, mcp);
359
360 /* Return firmware states. */
361 states[0] = mcp->mb[1];
362
363 if (rval != QLA_SUCCESS) {
364 ql_dbg(ql_dbg_mbx, vha, 0x116a,
365 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
366 } else {
367 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116b,
368 "Done %s.\n", __func__);
369 }
370 return rval;
371 }
372
373 /*
374 * qlafx00_init_firmware
375 * Initialize adapter firmware.
376 *
377 * Input:
378 * ha = adapter block pointer.
379 * dptr = Initialization control block pointer.
380 * size = size of initialization control block.
381 * TARGET_QUEUE_LOCK must be released.
382 * ADAPTER_STATE_LOCK must be released.
383 *
384 * Returns:
385 * qlafx00 local function return status code.
386 *
387 * Context:
388 * Kernel context.
389 */
390 int
391 qlafx00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
392 {
393 int rval;
394 struct mbx_cmd_32 mc;
395 struct mbx_cmd_32 *mcp = &mc;
396 struct qla_hw_data *ha = vha->hw;
397
398 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116c,
399 "Entered %s.\n", __func__);
400
401 mcp->mb[0] = MBC_INITIALIZE_FIRMWARE;
402
403 mcp->mb[1] = 0;
404 mcp->mb[2] = MSD(ha->init_cb_dma);
405 mcp->mb[3] = LSD(ha->init_cb_dma);
406
407 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
408 mcp->in_mb = MBX_0;
409 mcp->buf_size = size;
410 mcp->flags = MBX_DMA_OUT;
411 mcp->tov = MBX_TOV_SECONDS;
412 rval = qlafx00_mailbox_command(vha, mcp);
413
414 if (rval != QLA_SUCCESS) {
415 ql_dbg(ql_dbg_mbx, vha, 0x116d,
416 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
417 } else {
418 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116e,
419 "Done %s.\n", __func__);
420 }
421 return rval;
422 }
423
424 /*
425 * qlafx00_mbx_reg_test
426 */
427 static int
428 qlafx00_mbx_reg_test(scsi_qla_host_t *vha)
429 {
430 int rval;
431 struct mbx_cmd_32 mc;
432 struct mbx_cmd_32 *mcp = &mc;
433
434 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x116f,
435 "Entered %s.\n", __func__);
436
437
438 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
439 mcp->mb[1] = 0xAAAA;
440 mcp->mb[2] = 0x5555;
441 mcp->mb[3] = 0xAA55;
442 mcp->mb[4] = 0x55AA;
443 mcp->mb[5] = 0xA5A5;
444 mcp->mb[6] = 0x5A5A;
445 mcp->mb[7] = 0x2525;
446 mcp->mb[8] = 0xBBBB;
447 mcp->mb[9] = 0x6666;
448 mcp->mb[10] = 0xBB66;
449 mcp->mb[11] = 0x66BB;
450 mcp->mb[12] = 0xB6B6;
451 mcp->mb[13] = 0x6B6B;
452 mcp->mb[14] = 0x3636;
453 mcp->mb[15] = 0xCCCC;
454
455
456 mcp->out_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
457 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
458 mcp->in_mb = MBX_15|MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|
459 MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
460 mcp->buf_size = 0;
461 mcp->flags = MBX_DMA_OUT;
462 mcp->tov = MBX_TOV_SECONDS;
463 rval = qlafx00_mailbox_command(vha, mcp);
464 if (rval == QLA_SUCCESS) {
465 if (mcp->mb[17] != 0xAAAA || mcp->mb[18] != 0x5555 ||
466 mcp->mb[19] != 0xAA55 || mcp->mb[20] != 0x55AA)
467 rval = QLA_FUNCTION_FAILED;
468 if (mcp->mb[21] != 0xA5A5 || mcp->mb[22] != 0x5A5A ||
469 mcp->mb[23] != 0x2525 || mcp->mb[24] != 0xBBBB)
470 rval = QLA_FUNCTION_FAILED;
471 if (mcp->mb[25] != 0x6666 || mcp->mb[26] != 0xBB66 ||
472 mcp->mb[27] != 0x66BB || mcp->mb[28] != 0xB6B6)
473 rval = QLA_FUNCTION_FAILED;
474 if (mcp->mb[29] != 0x6B6B || mcp->mb[30] != 0x3636 ||
475 mcp->mb[31] != 0xCCCC)
476 rval = QLA_FUNCTION_FAILED;
477 }
478
479 if (rval != QLA_SUCCESS) {
480 ql_dbg(ql_dbg_mbx, vha, 0x1170,
481 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
482 } else {
483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1171,
484 "Done %s.\n", __func__);
485 }
486 return rval;
487 }
488
489 /**
490 * qlafx00_pci_config() - Setup ISPFx00 PCI configuration registers.
491 * @ha: HA context
492 *
493 * Returns 0 on success.
494 */
495 int
496 qlafx00_pci_config(scsi_qla_host_t *vha)
497 {
498 uint16_t w;
499 struct qla_hw_data *ha = vha->hw;
500
501 pci_set_master(ha->pdev);
502 pci_try_set_mwi(ha->pdev);
503
504 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
505 w |= (PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
506 w &= ~PCI_COMMAND_INTX_DISABLE;
507 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
508
509 /* PCIe -- adjust Maximum Read Request Size (2048). */
510 if (pci_find_capability(ha->pdev, PCI_CAP_ID_EXP))
511 pcie_set_readrq(ha->pdev, 2048);
512
513 ha->chip_revision = ha->pdev->revision;
514
515 return QLA_SUCCESS;
516 }
517
518 /**
519 * qlafx00_warm_reset() - Perform warm reset of iSA(CPUs being reset on SOC).
520 * @ha: HA context
521 *
522 */
523 static inline void
524 qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
525 {
526 unsigned long flags = 0;
527 struct qla_hw_data *ha = vha->hw;
528 int i, core;
529 uint32_t cnt;
530
531 /* Set all 4 cores in reset */
532 for (i = 0; i < 4; i++) {
533 QLAFX00_SET_HBA_SOC_REG(ha,
534 (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
535 }
536
537 /* Set all 4 core Clock gating control */
538 for (i = 0; i < 4; i++) {
539 QLAFX00_SET_HBA_SOC_REG(ha,
540 (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
541 }
542
543 /* Reset all units in Fabric */
544 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
545
546 /* Reset all interrupt control registers */
547 for (i = 0; i < 115; i++) {
548 QLAFX00_SET_HBA_SOC_REG(ha,
549 (SOC_INTERRUPT_SOURCE_I_CONTROL_REG + 4*i), (0x0));
550 }
551
552 /* Reset Timers control registers. per core */
553 for (core = 0; core < 4; core++)
554 for (i = 0; i < 8; i++)
555 QLAFX00_SET_HBA_SOC_REG(ha,
556 (SOC_CORE_TIMER_REG + 0x100*core + 4*i), (0x0));
557
558 /* Reset per core IRQ ack register */
559 for (core = 0; core < 4; core++)
560 QLAFX00_SET_HBA_SOC_REG(ha,
561 (SOC_IRQ_ACK_REG + 0x100*core), (0x3FF));
562
563 /* Set Fabric control and config to defaults */
564 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
565 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
566
567 spin_lock_irqsave(&ha->hardware_lock, flags);
568
569 /* Kick in Fabric units */
570 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
571
572 /* Kick in Core0 to start boot process */
573 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
574
575 /* Wait 10secs for soft-reset to complete. */
576 for (cnt = 10; cnt; cnt--) {
577 msleep(1000);
578 barrier();
579 }
580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
581 }
582
583 /**
584 * qlafx00_soft_reset() - Soft Reset ISPFx00.
585 * @ha: HA context
586 *
587 * Returns 0 on success.
588 */
589 void
590 qlafx00_soft_reset(scsi_qla_host_t *vha)
591 {
592 struct qla_hw_data *ha = vha->hw;
593
594 if (unlikely(pci_channel_offline(ha->pdev) &&
595 ha->flags.pci_channel_io_perm_failure))
596 return;
597
598 ha->isp_ops->disable_intrs(ha);
599 qlafx00_soc_cpu_reset(vha);
600 ha->isp_ops->enable_intrs(ha);
601 }
602
603 /**
604 * qlafx00_chip_diag() - Test ISPFx00 for proper operation.
605 * @ha: HA context
606 *
607 * Returns 0 on success.
608 */
609 int
610 qlafx00_chip_diag(scsi_qla_host_t *vha)
611 {
612 int rval = 0;
613 struct qla_hw_data *ha = vha->hw;
614 struct req_que *req = ha->req_q_map[0];
615
616 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
617
618 rval = qlafx00_mbx_reg_test(vha);
619 if (rval) {
620 ql_log(ql_log_warn, vha, 0x1165,
621 "Failed mailbox send register test\n");
622 } else {
623 /* Flag a successful rval */
624 rval = QLA_SUCCESS;
625 }
626 return rval;
627 }
628
629 void
630 qlafx00_config_rings(struct scsi_qla_host *vha)
631 {
632 struct qla_hw_data *ha = vha->hw;
633 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
634 struct init_cb_fx *icb;
635 struct req_que *req = ha->req_q_map[0];
636 struct rsp_que *rsp = ha->rsp_q_map[0];
637
638 /* Setup ring parameters in initialization control block. */
639 icb = (struct init_cb_fx *)ha->init_cb;
640 icb->request_q_outpointer = __constant_cpu_to_le16(0);
641 icb->response_q_inpointer = __constant_cpu_to_le16(0);
642 icb->request_q_length = cpu_to_le16(req->length);
643 icb->response_q_length = cpu_to_le16(rsp->length);
644 icb->request_q_address[0] = cpu_to_le32(LSD(req->dma));
645 icb->request_q_address[1] = cpu_to_le32(MSD(req->dma));
646 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
647 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
648
649 WRT_REG_DWORD(&reg->req_q_in, 0);
650 WRT_REG_DWORD(&reg->req_q_out, 0);
651
652 WRT_REG_DWORD(&reg->rsp_q_in, 0);
653 WRT_REG_DWORD(&reg->rsp_q_out, 0);
654
655 /* PCI posting */
656 RD_REG_DWORD(&reg->rsp_q_out);
657 }
658
659 char *
660 qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str)
661 {
662 struct qla_hw_data *ha = vha->hw;
663 int pcie_reg;
664
665 pcie_reg = pci_find_capability(ha->pdev, PCI_CAP_ID_EXP);
666 if (pcie_reg) {
667 strcpy(str, "PCIe iSA");
668 return str;
669 }
670 return str;
671 }
672
673 char *
674 qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
675 {
676 struct qla_hw_data *ha = vha->hw;
677
678 sprintf(str, "%s", ha->mr.fw_version);
679 return str;
680 }
681
682 void
683 qlafx00_enable_intrs(struct qla_hw_data *ha)
684 {
685 unsigned long flags = 0;
686
687 spin_lock_irqsave(&ha->hardware_lock, flags);
688 ha->interrupts_on = 1;
689 QLAFX00_ENABLE_ICNTRL_REG(ha);
690 spin_unlock_irqrestore(&ha->hardware_lock, flags);
691 }
692
693 void
694 qlafx00_disable_intrs(struct qla_hw_data *ha)
695 {
696 unsigned long flags = 0;
697
698 spin_lock_irqsave(&ha->hardware_lock, flags);
699 ha->interrupts_on = 0;
700 QLAFX00_DISABLE_ICNTRL_REG(ha);
701 spin_unlock_irqrestore(&ha->hardware_lock, flags);
702 }
703
704 static void
705 qlafx00_tmf_iocb_timeout(void *data)
706 {
707 srb_t *sp = (srb_t *)data;
708 struct srb_iocb *tmf = &sp->u.iocb_cmd;
709
710 tmf->u.tmf.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
711 complete(&tmf->u.tmf.comp);
712 }
713
714 static void
715 qlafx00_tmf_sp_done(void *data, void *ptr, int res)
716 {
717 srb_t *sp = (srb_t *)ptr;
718 struct srb_iocb *tmf = &sp->u.iocb_cmd;
719
720 complete(&tmf->u.tmf.comp);
721 }
722
723 static int
724 qlafx00_async_tm_cmd(fc_port_t *fcport, uint32_t flags,
725 uint32_t lun, uint32_t tag)
726 {
727 scsi_qla_host_t *vha = fcport->vha;
728 struct srb_iocb *tm_iocb;
729 srb_t *sp;
730 int rval = QLA_FUNCTION_FAILED;
731
732 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
733 if (!sp)
734 goto done;
735
736 tm_iocb = &sp->u.iocb_cmd;
737 sp->type = SRB_TM_CMD;
738 sp->name = "tmf";
739 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
740 tm_iocb->u.tmf.flags = flags;
741 tm_iocb->u.tmf.lun = lun;
742 tm_iocb->u.tmf.data = tag;
743 sp->done = qlafx00_tmf_sp_done;
744 tm_iocb->timeout = qlafx00_tmf_iocb_timeout;
745 init_completion(&tm_iocb->u.tmf.comp);
746
747 rval = qla2x00_start_sp(sp);
748 if (rval != QLA_SUCCESS)
749 goto done_free_sp;
750
751 ql_dbg(ql_dbg_async, vha, 0x507b,
752 "Task management command issued target_id=%x\n",
753 fcport->tgt_id);
754
755 wait_for_completion(&tm_iocb->u.tmf.comp);
756
757 rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ?
758 QLA_SUCCESS : QLA_FUNCTION_FAILED;
759
760 done_free_sp:
761 sp->free(vha, sp);
762 done:
763 return rval;
764 }
765
766 int
767 qlafx00_abort_target(fc_port_t *fcport, unsigned int l, int tag)
768 {
769 return qlafx00_async_tm_cmd(fcport, TCF_TARGET_RESET, l, tag);
770 }
771
772 int
773 qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
774 {
775 return qlafx00_async_tm_cmd(fcport, TCF_LUN_RESET, l, tag);
776 }
777
778 int
779 qlafx00_loop_reset(scsi_qla_host_t *vha)
780 {
781 int ret;
782 struct fc_port *fcport;
783 struct qla_hw_data *ha = vha->hw;
784
785 if (ql2xtargetreset) {
786 list_for_each_entry(fcport, &vha->vp_fcports, list) {
787 if (fcport->port_type != FCT_TARGET)
788 continue;
789
790 ret = ha->isp_ops->target_reset(fcport, 0, 0);
791 if (ret != QLA_SUCCESS) {
792 ql_dbg(ql_dbg_taskm, vha, 0x803d,
793 "Bus Reset failed: Reset=%d "
794 "d_id=%x.\n", ret, fcport->d_id.b24);
795 }
796 }
797 }
798 return QLA_SUCCESS;
799 }
800
801 int
802 qlafx00_iospace_config(struct qla_hw_data *ha)
803 {
804 if (pci_request_selected_regions(ha->pdev, ha->bars,
805 QLA2XXX_DRIVER_NAME)) {
806 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
807 "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
808 pci_name(ha->pdev));
809 goto iospace_error_exit;
810 }
811
812 /* Use MMIO operations for all accesses. */
813 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
814 ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
815 "Invalid pci I/O region size (%s).\n",
816 pci_name(ha->pdev));
817 goto iospace_error_exit;
818 }
819 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
820 ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
821 "Invalid PCI mem BAR0 region size (%s), aborting\n",
822 pci_name(ha->pdev));
823 goto iospace_error_exit;
824 }
825
826 ha->cregbase =
827 ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
828 if (!ha->cregbase) {
829 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
830 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
831 goto iospace_error_exit;
832 }
833
834 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
835 ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
836 "region #2 not an MMIO resource (%s), aborting\n",
837 pci_name(ha->pdev));
838 goto iospace_error_exit;
839 }
840 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
841 ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
842 "Invalid PCI mem BAR2 region size (%s), aborting\n",
843 pci_name(ha->pdev));
844 goto iospace_error_exit;
845 }
846
847 ha->iobase =
848 ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
849 if (!ha->iobase) {
850 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
851 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
852 goto iospace_error_exit;
853 }
854
855 /* Determine queue resources */
856 ha->max_req_queues = ha->max_rsp_queues = 1;
857
858 ql_log_pci(ql_log_info, ha->pdev, 0x012c,
859 "Bars 0x%x, iobase0 0x%p, iobase2 0x%p\n",
860 ha->bars, ha->cregbase, ha->iobase);
861
862 return 0;
863
864 iospace_error_exit:
865 return -ENOMEM;
866 }
867
868 static void
869 qlafx00_save_queue_ptrs(struct scsi_qla_host *vha)
870 {
871 struct qla_hw_data *ha = vha->hw;
872 struct req_que *req = ha->req_q_map[0];
873 struct rsp_que *rsp = ha->rsp_q_map[0];
874
875 req->length_fx00 = req->length;
876 req->ring_fx00 = req->ring;
877 req->dma_fx00 = req->dma;
878
879 rsp->length_fx00 = rsp->length;
880 rsp->ring_fx00 = rsp->ring;
881 rsp->dma_fx00 = rsp->dma;
882
883 ql_dbg(ql_dbg_init, vha, 0x012d,
884 "req: %p, ring_fx00: %p, length_fx00: 0x%x,"
885 "req->dma_fx00: 0x%llx\n", req, req->ring_fx00,
886 req->length_fx00, (u64)req->dma_fx00);
887
888 ql_dbg(ql_dbg_init, vha, 0x012e,
889 "rsp: %p, ring_fx00: %p, length_fx00: 0x%x,"
890 "rsp->dma_fx00: 0x%llx\n", rsp, rsp->ring_fx00,
891 rsp->length_fx00, (u64)rsp->dma_fx00);
892 }
893
894 static int
895 qlafx00_config_queues(struct scsi_qla_host *vha)
896 {
897 struct qla_hw_data *ha = vha->hw;
898 struct req_que *req = ha->req_q_map[0];
899 struct rsp_que *rsp = ha->rsp_q_map[0];
900 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
901
902 req->length = ha->req_que_len;
903 req->ring = (void *)ha->iobase + ha->req_que_off;
904 req->dma = bar2_hdl + ha->req_que_off;
905 if ((!req->ring) || (req->length == 0)) {
906 ql_log_pci(ql_log_info, ha->pdev, 0x012f,
907 "Unable to allocate memory for req_ring\n");
908 return QLA_FUNCTION_FAILED;
909 }
910
911 ql_dbg(ql_dbg_init, vha, 0x0130,
912 "req: %p req_ring pointer %p req len 0x%x "
913 "req off 0x%x\n, req->dma: 0x%llx",
914 req, req->ring, req->length,
915 ha->req_que_off, (u64)req->dma);
916
917 rsp->length = ha->rsp_que_len;
918 rsp->ring = (void *)ha->iobase + ha->rsp_que_off;
919 rsp->dma = bar2_hdl + ha->rsp_que_off;
920 if ((!rsp->ring) || (rsp->length == 0)) {
921 ql_log_pci(ql_log_info, ha->pdev, 0x0131,
922 "Unable to allocate memory for rsp_ring\n");
923 return QLA_FUNCTION_FAILED;
924 }
925
926 ql_dbg(ql_dbg_init, vha, 0x0132,
927 "rsp: %p rsp_ring pointer %p rsp len 0x%x "
928 "rsp off 0x%x, rsp->dma: 0x%llx\n",
929 rsp, rsp->ring, rsp->length,
930 ha->rsp_que_off, (u64)rsp->dma);
931
932 return QLA_SUCCESS;
933 }
934
935 static int
936 qlafx00_init_fw_ready(scsi_qla_host_t *vha)
937 {
938 int rval = 0;
939 unsigned long wtime;
940 uint16_t wait_time; /* Wait time */
941 struct qla_hw_data *ha = vha->hw;
942 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
943 uint32_t aenmbx, aenmbx7 = 0;
944 uint32_t pseudo_aen;
945 uint32_t state[5];
946 bool done = false;
947
948 /* 30 seconds wait - Adjust if required */
949 wait_time = 30;
950
951 pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
952 if (pseudo_aen == 1) {
953 aenmbx7 = RD_REG_DWORD(&reg->initval7);
954 ha->mbx_intr_code = MSW(aenmbx7);
955 ha->rqstq_intr_code = LSW(aenmbx7);
956 rval = qlafx00_driver_shutdown(vha, 10);
957 if (rval != QLA_SUCCESS)
958 qlafx00_soft_reset(vha);
959 }
960
961 /* wait time before firmware ready */
962 wtime = jiffies + (wait_time * HZ);
963 do {
964 aenmbx = RD_REG_DWORD(&reg->aenmailbox0);
965 barrier();
966 ql_dbg(ql_dbg_mbx, vha, 0x0133,
967 "aenmbx: 0x%x\n", aenmbx);
968
969 switch (aenmbx) {
970 case MBA_FW_NOT_STARTED:
971 case MBA_FW_STARTING:
972 break;
973
974 case MBA_SYSTEM_ERR:
975 case MBA_REQ_TRANSFER_ERR:
976 case MBA_RSP_TRANSFER_ERR:
977 case MBA_FW_INIT_FAILURE:
978 qlafx00_soft_reset(vha);
979 break;
980
981 case MBA_FW_RESTART_CMPLT:
982 /* Set the mbx and rqstq intr code */
983 aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
984 ha->mbx_intr_code = MSW(aenmbx7);
985 ha->rqstq_intr_code = LSW(aenmbx7);
986 ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
987 ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
988 ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
989 ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
990 WRT_REG_DWORD(&reg->aenmailbox0, 0);
991 RD_REG_DWORD_RELAXED(&reg->aenmailbox0);
992 ql_dbg(ql_dbg_init, vha, 0x0134,
993 "f/w returned mbx_intr_code: 0x%x, "
994 "rqstq_intr_code: 0x%x\n",
995 ha->mbx_intr_code, ha->rqstq_intr_code);
996 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
997 rval = QLA_SUCCESS;
998 done = true;
999 break;
1000
1001 default:
1002 /* If fw is apparently not ready. In order to continue,
1003 * we might need to issue Mbox cmd, but the problem is
1004 * that the DoorBell vector values that come with the
1005 * 8060 AEN are most likely gone by now (and thus no
1006 * bell would be rung on the fw side when mbox cmd is
1007 * issued). We have to therefore grab the 8060 AEN
1008 * shadow regs (filled in by FW when the last 8060
1009 * AEN was being posted).
1010 * Do the following to determine what is needed in
1011 * order to get the FW ready:
1012 * 1. reload the 8060 AEN values from the shadow regs
1013 * 2. clear int status to get rid of possible pending
1014 * interrupts
1015 * 3. issue Get FW State Mbox cmd to determine fw state
1016 * Set the mbx and rqstq intr code from Shadow Regs
1017 */
1018 aenmbx7 = RD_REG_DWORD(&reg->initval7);
1019 ha->mbx_intr_code = MSW(aenmbx7);
1020 ha->rqstq_intr_code = LSW(aenmbx7);
1021 ha->req_que_off = RD_REG_DWORD(&reg->initval1);
1022 ha->rsp_que_off = RD_REG_DWORD(&reg->initval3);
1023 ha->req_que_len = RD_REG_DWORD(&reg->initval5);
1024 ha->rsp_que_len = RD_REG_DWORD(&reg->initval6);
1025 ql_dbg(ql_dbg_init, vha, 0x0135,
1026 "f/w returned mbx_intr_code: 0x%x, "
1027 "rqstq_intr_code: 0x%x\n",
1028 ha->mbx_intr_code, ha->rqstq_intr_code);
1029 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1030
1031 /* Get the FW state */
1032 rval = qlafx00_get_firmware_state(vha, state);
1033 if (rval != QLA_SUCCESS) {
1034 /* Retry if timer has not expired */
1035 break;
1036 }
1037
1038 if (state[0] == FSTATE_FX00_CONFIG_WAIT) {
1039 /* Firmware is waiting to be
1040 * initialized by driver
1041 */
1042 rval = QLA_SUCCESS;
1043 done = true;
1044 break;
1045 }
1046
1047 /* Issue driver shutdown and wait until f/w recovers.
1048 * Driver should continue to poll until 8060 AEN is
1049 * received indicating firmware recovery.
1050 */
1051 ql_dbg(ql_dbg_init, vha, 0x0136,
1052 "Sending Driver shutdown fw_state 0x%x\n",
1053 state[0]);
1054
1055 rval = qlafx00_driver_shutdown(vha, 10);
1056 if (rval != QLA_SUCCESS) {
1057 rval = QLA_FUNCTION_FAILED;
1058 break;
1059 }
1060 msleep(500);
1061
1062 wtime = jiffies + (wait_time * HZ);
1063 break;
1064 }
1065
1066 if (!done) {
1067 if (time_after_eq(jiffies, wtime)) {
1068 ql_dbg(ql_dbg_init, vha, 0x0137,
1069 "Init f/w failed: aen[7]: 0x%x\n",
1070 RD_REG_DWORD(&reg->aenmailbox7));
1071 rval = QLA_FUNCTION_FAILED;
1072 done = true;
1073 break;
1074 }
1075 /* Delay for a while */
1076 msleep(500);
1077 }
1078 } while (!done);
1079
1080 if (rval)
1081 ql_dbg(ql_dbg_init, vha, 0x0138,
1082 "%s **** FAILED ****.\n", __func__);
1083 else
1084 ql_dbg(ql_dbg_init, vha, 0x0139,
1085 "%s **** SUCCESS ****.\n", __func__);
1086
1087 return rval;
1088 }
1089
1090 /*
1091 * qlafx00_fw_ready() - Waits for firmware ready.
1092 * @ha: HA context
1093 *
1094 * Returns 0 on success.
1095 */
1096 int
1097 qlafx00_fw_ready(scsi_qla_host_t *vha)
1098 {
1099 int rval;
1100 unsigned long wtime;
1101 uint16_t wait_time; /* Wait time if loop is coming ready */
1102 uint32_t state[5];
1103
1104 rval = QLA_SUCCESS;
1105
1106 wait_time = 10;
1107
1108 /* wait time before firmware ready */
1109 wtime = jiffies + (wait_time * HZ);
1110
1111 /* Wait for ISP to finish init */
1112 if (!vha->flags.init_done)
1113 ql_dbg(ql_dbg_init, vha, 0x013a,
1114 "Waiting for init to complete...\n");
1115
1116 do {
1117 rval = qlafx00_get_firmware_state(vha, state);
1118
1119 if (rval == QLA_SUCCESS) {
1120 if (state[0] == FSTATE_FX00_INITIALIZED) {
1121 ql_dbg(ql_dbg_init, vha, 0x013b,
1122 "fw_state=%x\n", state[0]);
1123 rval = QLA_SUCCESS;
1124 break;
1125 }
1126 }
1127 rval = QLA_FUNCTION_FAILED;
1128
1129 if (time_after_eq(jiffies, wtime))
1130 break;
1131
1132 /* Delay for a while */
1133 msleep(500);
1134
1135 ql_dbg(ql_dbg_init, vha, 0x013c,
1136 "fw_state=%x curr time=%lx.\n", state[0], jiffies);
1137 } while (1);
1138
1139
1140 if (rval)
1141 ql_dbg(ql_dbg_init, vha, 0x013d,
1142 "Firmware ready **** FAILED ****.\n");
1143 else
1144 ql_dbg(ql_dbg_init, vha, 0x013e,
1145 "Firmware ready **** SUCCESS ****.\n");
1146
1147 return rval;
1148 }
1149
1150 static int
1151 qlafx00_find_all_targets(scsi_qla_host_t *vha,
1152 struct list_head *new_fcports)
1153 {
1154 int rval;
1155 uint16_t tgt_id;
1156 fc_port_t *fcport, *new_fcport;
1157 int found;
1158 struct qla_hw_data *ha = vha->hw;
1159
1160 rval = QLA_SUCCESS;
1161
1162 if (!test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))
1163 return QLA_FUNCTION_FAILED;
1164
1165 if ((atomic_read(&vha->loop_down_timer) ||
1166 STATE_TRANSITION(vha))) {
1167 atomic_set(&vha->loop_down_timer, 0);
1168 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1169 return QLA_FUNCTION_FAILED;
1170 }
1171
1172 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x2088,
1173 "Listing Target bit map...\n");
1174 ql_dump_buffer(ql_dbg_disc + ql_dbg_init, vha,
1175 0x2089, (uint8_t *)ha->gid_list, 32);
1176
1177 /* Allocate temporary rmtport for any new rmtports discovered. */
1178 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1179 if (new_fcport == NULL)
1180 return QLA_MEMORY_ALLOC_FAILED;
1181
1182 for_each_set_bit(tgt_id, (void *)ha->gid_list,
1183 QLAFX00_TGT_NODE_LIST_SIZE) {
1184
1185 /* Send get target node info */
1186 new_fcport->tgt_id = tgt_id;
1187 rval = qlafx00_fx_disc(vha, new_fcport,
1188 FXDISC_GET_TGT_NODE_INFO);
1189 if (rval != QLA_SUCCESS) {
1190 ql_log(ql_log_warn, vha, 0x208a,
1191 "Target info scan failed -- assuming zero-entry "
1192 "result...\n");
1193 continue;
1194 }
1195
1196 /* Locate matching device in database. */
1197 found = 0;
1198 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1199 if (memcmp(new_fcport->port_name,
1200 fcport->port_name, WWN_SIZE))
1201 continue;
1202
1203 found++;
1204
1205 /*
1206 * If tgt_id is same and state FCS_ONLINE, nothing
1207 * changed.
1208 */
1209 if (fcport->tgt_id == new_fcport->tgt_id &&
1210 atomic_read(&fcport->state) == FCS_ONLINE)
1211 break;
1212
1213 /*
1214 * Tgt ID changed or device was marked to be updated.
1215 */
1216 ql_dbg(ql_dbg_disc + ql_dbg_init, vha, 0x208b,
1217 "TGT-ID Change(%s): Present tgt id: "
1218 "0x%x state: 0x%x "
1219 "wwnn = %llx wwpn = %llx.\n",
1220 __func__, fcport->tgt_id,
1221 atomic_read(&fcport->state),
1222 (unsigned long long)wwn_to_u64(fcport->node_name),
1223 (unsigned long long)wwn_to_u64(fcport->port_name));
1224
1225 ql_log(ql_log_info, vha, 0x208c,
1226 "TGT-ID Announce(%s): Discovered tgt "
1227 "id 0x%x wwnn = %llx "
1228 "wwpn = %llx.\n", __func__, new_fcport->tgt_id,
1229 (unsigned long long)
1230 wwn_to_u64(new_fcport->node_name),
1231 (unsigned long long)
1232 wwn_to_u64(new_fcport->port_name));
1233
1234 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1235 fcport->old_tgt_id = fcport->tgt_id;
1236 fcport->tgt_id = new_fcport->tgt_id;
1237 ql_log(ql_log_info, vha, 0x208d,
1238 "TGT-ID: New fcport Added: %p\n", fcport);
1239 qla2x00_update_fcport(vha, fcport);
1240 } else {
1241 ql_log(ql_log_info, vha, 0x208e,
1242 " Existing TGT-ID %x did not get "
1243 " offline event from firmware.\n",
1244 fcport->old_tgt_id);
1245 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1246 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1247 kfree(new_fcport);
1248 return rval;
1249 }
1250 break;
1251 }
1252
1253 if (found)
1254 continue;
1255
1256 /* If device was not in our fcports list, then add it. */
1257 list_add_tail(&new_fcport->list, new_fcports);
1258
1259 /* Allocate a new replacement fcport. */
1260 new_fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1261 if (new_fcport == NULL)
1262 return QLA_MEMORY_ALLOC_FAILED;
1263 }
1264
1265 kfree(new_fcport);
1266 return rval;
1267 }
1268
1269 /*
1270 * qlafx00_configure_all_targets
1271 * Setup target devices with node ID's.
1272 *
1273 * Input:
1274 * ha = adapter block pointer.
1275 *
1276 * Returns:
1277 * 0 = success.
1278 * BIT_0 = error
1279 */
1280 static int
1281 qlafx00_configure_all_targets(scsi_qla_host_t *vha)
1282 {
1283 int rval;
1284 fc_port_t *fcport, *rmptemp;
1285 LIST_HEAD(new_fcports);
1286
1287 rval = qlafx00_fx_disc(vha, &vha->hw->mr.fcport,
1288 FXDISC_GET_TGT_NODE_LIST);
1289 if (rval != QLA_SUCCESS) {
1290 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1291 return rval;
1292 }
1293
1294 rval = qlafx00_find_all_targets(vha, &new_fcports);
1295 if (rval != QLA_SUCCESS) {
1296 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1297 return rval;
1298 }
1299
1300 /*
1301 * Delete all previous devices marked lost.
1302 */
1303 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1304 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1305 break;
1306
1307 if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
1308 if (fcport->port_type != FCT_INITIATOR)
1309 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1310 }
1311 }
1312
1313 /*
1314 * Add the new devices to our devices list.
1315 */
1316 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1317 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
1318 break;
1319
1320 qla2x00_update_fcport(vha, fcport);
1321 list_move_tail(&fcport->list, &vha->vp_fcports);
1322 ql_log(ql_log_info, vha, 0x208f,
1323 "Attach new target id 0x%x wwnn = %llx "
1324 "wwpn = %llx.\n",
1325 fcport->tgt_id,
1326 (unsigned long long)wwn_to_u64(fcport->node_name),
1327 (unsigned long long)wwn_to_u64(fcport->port_name));
1328 }
1329
1330 /* Free all new device structures not processed. */
1331 list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) {
1332 list_del(&fcport->list);
1333 kfree(fcport);
1334 }
1335
1336 return rval;
1337 }
1338
1339 /*
1340 * qlafx00_configure_devices
1341 * Updates Fibre Channel Device Database with what is actually on loop.
1342 *
1343 * Input:
1344 * ha = adapter block pointer.
1345 *
1346 * Returns:
1347 * 0 = success.
1348 * 1 = error.
1349 * 2 = database was full and device was not configured.
1350 */
1351 int
1352 qlafx00_configure_devices(scsi_qla_host_t *vha)
1353 {
1354 int rval;
1355 unsigned long flags, save_flags;
1356 rval = QLA_SUCCESS;
1357
1358 save_flags = flags = vha->dpc_flags;
1359
1360 ql_dbg(ql_dbg_disc, vha, 0x2090,
1361 "Configure devices -- dpc flags =0x%lx\n", flags);
1362
1363 rval = qlafx00_configure_all_targets(vha);
1364
1365 if (rval == QLA_SUCCESS) {
1366 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
1367 rval = QLA_FUNCTION_FAILED;
1368 } else {
1369 atomic_set(&vha->loop_state, LOOP_READY);
1370 ql_log(ql_log_info, vha, 0x2091,
1371 "Device Ready\n");
1372 }
1373 }
1374
1375 if (rval) {
1376 ql_dbg(ql_dbg_disc, vha, 0x2092,
1377 "%s *** FAILED ***.\n", __func__);
1378 } else {
1379 ql_dbg(ql_dbg_disc, vha, 0x2093,
1380 "%s: exiting normally.\n", __func__);
1381 }
1382 return rval;
1383 }
1384
1385 static void
1386 qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
1387 {
1388 struct qla_hw_data *ha = vha->hw;
1389 fc_port_t *fcport;
1390
1391 vha->flags.online = 0;
1392 ha->mr.fw_hbt_en = 0;
1393
1394 if (!critemp) {
1395 ha->flags.chip_reset_done = 0;
1396 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1397 vha->qla_stats.total_isp_aborts++;
1398 ql_log(ql_log_info, vha, 0x013f,
1399 "Performing ISP error recovery - ha = %p.\n", ha);
1400 ha->isp_ops->reset_chip(vha);
1401 }
1402
1403 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
1404 atomic_set(&vha->loop_state, LOOP_DOWN);
1405 atomic_set(&vha->loop_down_timer,
1406 QLAFX00_LOOP_DOWN_TIME);
1407 } else {
1408 if (!atomic_read(&vha->loop_down_timer))
1409 atomic_set(&vha->loop_down_timer,
1410 QLAFX00_LOOP_DOWN_TIME);
1411 }
1412
1413 /* Clear all async request states across all VPs. */
1414 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1415 fcport->flags = 0;
1416 if (atomic_read(&fcport->state) == FCS_ONLINE)
1417 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
1418 }
1419
1420 if (!ha->flags.eeh_busy) {
1421 if (critemp) {
1422 qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
1423 } else {
1424 /* Requeue all commands in outstanding command list. */
1425 qla2x00_abort_all_cmds(vha, DID_RESET << 16);
1426 }
1427 }
1428
1429 qla2x00_free_irqs(vha);
1430 if (critemp)
1431 set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
1432 else
1433 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1434
1435 /* Clear the Interrupts */
1436 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1437
1438 ql_log(ql_log_info, vha, 0x0140,
1439 "%s Done done - ha=%p.\n", __func__, ha);
1440 }
1441
1442 /**
1443 * qlafx00_init_response_q_entries() - Initializes response queue entries.
1444 * @ha: HA context
1445 *
1446 * Beginning of request ring has initialization control block already built
1447 * by nvram config routine.
1448 *
1449 * Returns 0 on success.
1450 */
1451 void
1452 qlafx00_init_response_q_entries(struct rsp_que *rsp)
1453 {
1454 uint16_t cnt;
1455 response_t *pkt;
1456
1457 rsp->ring_ptr = rsp->ring;
1458 rsp->ring_index = 0;
1459 rsp->status_srb = NULL;
1460 pkt = rsp->ring_ptr;
1461 for (cnt = 0; cnt < rsp->length; cnt++) {
1462 pkt->signature = RESPONSE_PROCESSED;
1463 WRT_REG_DWORD((void __iomem *)&pkt->signature,
1464 RESPONSE_PROCESSED);
1465 pkt++;
1466 }
1467 }
1468
1469 int
1470 qlafx00_rescan_isp(scsi_qla_host_t *vha)
1471 {
1472 uint32_t status = QLA_FUNCTION_FAILED;
1473 struct qla_hw_data *ha = vha->hw;
1474 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1475 uint32_t aenmbx7;
1476
1477 qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
1478
1479 aenmbx7 = RD_REG_DWORD(&reg->aenmailbox7);
1480 ha->mbx_intr_code = MSW(aenmbx7);
1481 ha->rqstq_intr_code = LSW(aenmbx7);
1482 ha->req_que_off = RD_REG_DWORD(&reg->aenmailbox1);
1483 ha->rsp_que_off = RD_REG_DWORD(&reg->aenmailbox3);
1484 ha->req_que_len = RD_REG_DWORD(&reg->aenmailbox5);
1485 ha->rsp_que_len = RD_REG_DWORD(&reg->aenmailbox6);
1486
1487 ql_dbg(ql_dbg_disc, vha, 0x2094,
1488 "fw returned mbx_intr_code: 0x%x, rqstq_intr_code: 0x%x "
1489 " Req que offset 0x%x Rsp que offset 0x%x\n",
1490 ha->mbx_intr_code, ha->rqstq_intr_code,
1491 ha->req_que_off, ha->rsp_que_len);
1492
1493 /* Clear the Interrupts */
1494 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1495
1496 status = qla2x00_init_rings(vha);
1497 if (!status) {
1498 vha->flags.online = 1;
1499
1500 /* if no cable then assume it's good */
1501 if ((vha->device_flags & DFLG_NO_CABLE))
1502 status = 0;
1503 /* Register system information */
1504 if (qlafx00_fx_disc(vha,
1505 &vha->hw->mr.fcport, FXDISC_REG_HOST_INFO))
1506 ql_dbg(ql_dbg_disc, vha, 0x2095,
1507 "failed to register host info\n");
1508 }
1509 scsi_unblock_requests(vha->host);
1510 return status;
1511 }
1512
1513 void
1514 qlafx00_timer_routine(scsi_qla_host_t *vha)
1515 {
1516 struct qla_hw_data *ha = vha->hw;
1517 uint32_t fw_heart_beat;
1518 uint32_t aenmbx0;
1519 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1520 uint32_t tempc;
1521
1522 /* Check firmware health */
1523 if (ha->mr.fw_hbt_cnt)
1524 ha->mr.fw_hbt_cnt--;
1525 else {
1526 if ((!ha->flags.mr_reset_hdlr_active) &&
1527 (!test_bit(UNLOADING, &vha->dpc_flags)) &&
1528 (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
1529 (ha->mr.fw_hbt_en)) {
1530 fw_heart_beat = RD_REG_DWORD(&reg->fwheartbeat);
1531 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
1532 ha->mr.old_fw_hbt_cnt = fw_heart_beat;
1533 ha->mr.fw_hbt_miss_cnt = 0;
1534 } else {
1535 ha->mr.fw_hbt_miss_cnt++;
1536 if (ha->mr.fw_hbt_miss_cnt ==
1537 QLAFX00_HEARTBEAT_MISS_CNT) {
1538 set_bit(ISP_ABORT_NEEDED,
1539 &vha->dpc_flags);
1540 qla2xxx_wake_dpc(vha);
1541 ha->mr.fw_hbt_miss_cnt = 0;
1542 }
1543 }
1544 }
1545 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
1546 }
1547
1548 if (test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags)) {
1549 /* Reset recovery to be performed in timer routine */
1550 aenmbx0 = RD_REG_DWORD(&reg->aenmailbox0);
1551 if (ha->mr.fw_reset_timer_exp) {
1552 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1553 qla2xxx_wake_dpc(vha);
1554 ha->mr.fw_reset_timer_exp = 0;
1555 } else if (aenmbx0 == MBA_FW_RESTART_CMPLT) {
1556 /* Wake up DPC to rescan the targets */
1557 set_bit(FX00_TARGET_SCAN, &vha->dpc_flags);
1558 clear_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1559 qla2xxx_wake_dpc(vha);
1560 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1561 } else if ((aenmbx0 == MBA_FW_STARTING) &&
1562 (!ha->mr.fw_hbt_en)) {
1563 ha->mr.fw_hbt_en = 1;
1564 } else if (!ha->mr.fw_reset_timer_tick) {
1565 if (aenmbx0 == ha->mr.old_aenmbx0_state)
1566 ha->mr.fw_reset_timer_exp = 1;
1567 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1568 } else if (aenmbx0 == 0xFFFFFFFF) {
1569 uint32_t data0, data1;
1570
1571 data0 = QLAFX00_RD_REG(ha,
1572 QLAFX00_BAR1_BASE_ADDR_REG);
1573 data1 = QLAFX00_RD_REG(ha,
1574 QLAFX00_PEX0_WIN0_BASE_ADDR_REG);
1575
1576 data0 &= 0xffff0000;
1577 data1 &= 0x0000ffff;
1578
1579 QLAFX00_WR_REG(ha,
1580 QLAFX00_PEX0_WIN0_BASE_ADDR_REG,
1581 (data0 | data1));
1582 } else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
1583 ha->mr.fw_reset_timer_tick =
1584 QLAFX00_MAX_RESET_INTERVAL;
1585 } else if (aenmbx0 == MBA_FW_RESET_FCT) {
1586 ha->mr.fw_reset_timer_tick =
1587 QLAFX00_MAX_RESET_INTERVAL;
1588 }
1589 ha->mr.old_aenmbx0_state = aenmbx0;
1590 ha->mr.fw_reset_timer_tick--;
1591 }
1592 if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
1593 /*
1594 * Critical temperature recovery to be
1595 * performed in timer routine
1596 */
1597 if (ha->mr.fw_critemp_timer_tick == 0) {
1598 tempc = QLAFX00_GET_TEMPERATURE(ha);
1599 ql_log(ql_dbg_timer, vha, 0x6012,
1600 "ISPFx00(%s): Critical temp timer, "
1601 "current SOC temperature: %d\n",
1602 __func__, tempc);
1603 if (tempc < ha->mr.critical_temperature) {
1604 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1605 clear_bit(FX00_CRITEMP_RECOVERY,
1606 &vha->dpc_flags);
1607 qla2xxx_wake_dpc(vha);
1608 }
1609 ha->mr.fw_critemp_timer_tick =
1610 QLAFX00_CRITEMP_INTERVAL;
1611 } else {
1612 ha->mr.fw_critemp_timer_tick--;
1613 }
1614 }
1615 }
1616
1617 /*
1618 * qlfx00a_reset_initialize
1619 * Re-initialize after a iSA device reset.
1620 *
1621 * Input:
1622 * ha = adapter block pointer.
1623 *
1624 * Returns:
1625 * 0 = success
1626 */
1627 int
1628 qlafx00_reset_initialize(scsi_qla_host_t *vha)
1629 {
1630 struct qla_hw_data *ha = vha->hw;
1631
1632 if (vha->device_flags & DFLG_DEV_FAILED) {
1633 ql_dbg(ql_dbg_init, vha, 0x0142,
1634 "Device in failed state\n");
1635 return QLA_SUCCESS;
1636 }
1637
1638 ha->flags.mr_reset_hdlr_active = 1;
1639
1640 if (vha->flags.online) {
1641 scsi_block_requests(vha->host);
1642 qlafx00_abort_isp_cleanup(vha, false);
1643 }
1644
1645 ql_log(ql_log_info, vha, 0x0143,
1646 "(%s): succeeded.\n", __func__);
1647 ha->flags.mr_reset_hdlr_active = 0;
1648 return QLA_SUCCESS;
1649 }
1650
1651 /*
1652 * qlafx00_abort_isp
1653 * Resets ISP and aborts all outstanding commands.
1654 *
1655 * Input:
1656 * ha = adapter block pointer.
1657 *
1658 * Returns:
1659 * 0 = success
1660 */
1661 int
1662 qlafx00_abort_isp(scsi_qla_host_t *vha)
1663 {
1664 struct qla_hw_data *ha = vha->hw;
1665
1666 if (vha->flags.online) {
1667 if (unlikely(pci_channel_offline(ha->pdev) &&
1668 ha->flags.pci_channel_io_perm_failure)) {
1669 clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
1670 return QLA_SUCCESS;
1671 }
1672
1673 scsi_block_requests(vha->host);
1674 qlafx00_abort_isp_cleanup(vha, false);
1675 } else {
1676 scsi_block_requests(vha->host);
1677 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1678 vha->qla_stats.total_isp_aborts++;
1679 ha->isp_ops->reset_chip(vha);
1680 set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
1681 /* Clear the Interrupts */
1682 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1683 }
1684
1685 ql_log(ql_log_info, vha, 0x0145,
1686 "(%s): succeeded.\n", __func__);
1687
1688 return QLA_SUCCESS;
1689 }
1690
1691 static inline fc_port_t*
1692 qlafx00_get_fcport(struct scsi_qla_host *vha, int tgt_id)
1693 {
1694 fc_port_t *fcport;
1695
1696 /* Check for matching device in remote port list. */
1697 fcport = NULL;
1698 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1699 if (fcport->tgt_id == tgt_id) {
1700 ql_dbg(ql_dbg_async, vha, 0x5072,
1701 "Matching fcport(%p) found with TGT-ID: 0x%x "
1702 "and Remote TGT_ID: 0x%x\n",
1703 fcport, fcport->tgt_id, tgt_id);
1704 break;
1705 }
1706 }
1707 return fcport;
1708 }
1709
1710 static void
1711 qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id)
1712 {
1713 fc_port_t *fcport;
1714
1715 ql_log(ql_log_info, vha, 0x5073,
1716 "Detach TGT-ID: 0x%x\n", tgt_id);
1717
1718 fcport = qlafx00_get_fcport(vha, tgt_id);
1719 if (!fcport)
1720 return;
1721
1722 qla2x00_mark_device_lost(vha, fcport, 0, 0);
1723
1724 return;
1725 }
1726
1727 int
1728 qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
1729 {
1730 int rval = 0;
1731 uint32_t aen_code, aen_data;
1732
1733 aen_code = FCH_EVT_VENDOR_UNIQUE;
1734 aen_data = evt->u.aenfx.evtcode;
1735
1736 switch (evt->u.aenfx.evtcode) {
1737 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
1738 if (evt->u.aenfx.mbx[1] == 0) {
1739 if (evt->u.aenfx.mbx[2] == 1) {
1740 if (!vha->flags.fw_tgt_reported)
1741 vha->flags.fw_tgt_reported = 1;
1742 atomic_set(&vha->loop_down_timer, 0);
1743 atomic_set(&vha->loop_state, LOOP_UP);
1744 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1745 qla2xxx_wake_dpc(vha);
1746 } else if (evt->u.aenfx.mbx[2] == 2) {
1747 qlafx00_tgt_detach(vha, evt->u.aenfx.mbx[3]);
1748 }
1749 } else if (evt->u.aenfx.mbx[1] == 0xffff) {
1750 if (evt->u.aenfx.mbx[2] == 1) {
1751 if (!vha->flags.fw_tgt_reported)
1752 vha->flags.fw_tgt_reported = 1;
1753 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
1754 } else if (evt->u.aenfx.mbx[2] == 2) {
1755 vha->device_flags |= DFLG_NO_CABLE;
1756 qla2x00_mark_all_devices_lost(vha, 1);
1757 }
1758 }
1759 break;
1760 case QLAFX00_MBA_LINK_UP:
1761 aen_code = FCH_EVT_LINKUP;
1762 aen_data = 0;
1763 break;
1764 case QLAFX00_MBA_LINK_DOWN:
1765 aen_code = FCH_EVT_LINKDOWN;
1766 aen_data = 0;
1767 break;
1768 case QLAFX00_MBA_TEMP_OVER:
1769 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
1770 ql_log(ql_log_info, vha, 0x5082,
1771 "Process critical temperature event "
1772 "aenmb[0]: %x\n",
1773 evt->u.aenfx.evtcode);
1774 scsi_block_requests(vha->host);
1775 qlafx00_abort_isp_cleanup(vha, true);
1776 scsi_unblock_requests(vha->host);
1777 break;
1778 }
1779
1780 fc_host_post_event(vha->host, fc_get_event_number(),
1781 aen_code, aen_data);
1782
1783 return rval;
1784 }
1785
1786 static void
1787 qlafx00_update_host_attr(scsi_qla_host_t *vha, struct port_info_data *pinfo)
1788 {
1789 u64 port_name = 0, node_name = 0;
1790
1791 port_name = (unsigned long long)wwn_to_u64(pinfo->port_name);
1792 node_name = (unsigned long long)wwn_to_u64(pinfo->node_name);
1793
1794 fc_host_node_name(vha->host) = node_name;
1795 fc_host_port_name(vha->host) = port_name;
1796 if (!pinfo->port_type)
1797 vha->hw->current_topology = ISP_CFG_F;
1798 if (pinfo->link_status == QLAFX00_LINK_STATUS_UP)
1799 atomic_set(&vha->loop_state, LOOP_READY);
1800 else if (pinfo->link_status == QLAFX00_LINK_STATUS_DOWN)
1801 atomic_set(&vha->loop_state, LOOP_DOWN);
1802 vha->hw->link_data_rate = (uint16_t)pinfo->link_config;
1803 }
1804
1805 static void
1806 qla2x00_fxdisc_iocb_timeout(void *data)
1807 {
1808 srb_t *sp = (srb_t *)data;
1809 struct srb_iocb *lio = &sp->u.iocb_cmd;
1810
1811 complete(&lio->u.fxiocb.fxiocb_comp);
1812 }
1813
1814 static void
1815 qla2x00_fxdisc_sp_done(void *data, void *ptr, int res)
1816 {
1817 srb_t *sp = (srb_t *)ptr;
1818 struct srb_iocb *lio = &sp->u.iocb_cmd;
1819
1820 complete(&lio->u.fxiocb.fxiocb_comp);
1821 }
1822
1823 int
1824 qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
1825 {
1826 srb_t *sp;
1827 struct srb_iocb *fdisc;
1828 int rval = QLA_FUNCTION_FAILED;
1829 struct qla_hw_data *ha = vha->hw;
1830 struct host_system_info *phost_info;
1831 struct register_host_info *preg_hsi;
1832 struct new_utsname *p_sysid = NULL;
1833 struct timeval tv;
1834
1835 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1836 if (!sp)
1837 goto done;
1838
1839 fdisc = &sp->u.iocb_cmd;
1840 switch (fx_type) {
1841 case FXDISC_GET_CONFIG_INFO:
1842 fdisc->u.fxiocb.flags =
1843 SRB_FXDISC_RESP_DMA_VALID;
1844 fdisc->u.fxiocb.rsp_len = sizeof(struct config_info_data);
1845 break;
1846 case FXDISC_GET_PORT_INFO:
1847 fdisc->u.fxiocb.flags =
1848 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1849 fdisc->u.fxiocb.rsp_len = QLAFX00_PORT_DATA_INFO;
1850 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->port_id);
1851 break;
1852 case FXDISC_GET_TGT_NODE_INFO:
1853 fdisc->u.fxiocb.flags =
1854 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1855 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_INFO;
1856 fdisc->u.fxiocb.req_data = cpu_to_le32(fcport->tgt_id);
1857 break;
1858 case FXDISC_GET_TGT_NODE_LIST:
1859 fdisc->u.fxiocb.flags =
1860 SRB_FXDISC_RESP_DMA_VALID | SRB_FXDISC_REQ_DWRD_VALID;
1861 fdisc->u.fxiocb.rsp_len = QLAFX00_TGT_NODE_LIST_SIZE;
1862 break;
1863 case FXDISC_REG_HOST_INFO:
1864 fdisc->u.fxiocb.flags = SRB_FXDISC_REQ_DMA_VALID;
1865 fdisc->u.fxiocb.req_len = sizeof(struct register_host_info);
1866 p_sysid = utsname();
1867 if (!p_sysid) {
1868 ql_log(ql_log_warn, vha, 0x303c,
1869 "Not able to get the system informtion\n");
1870 goto done_free_sp;
1871 }
1872 break;
1873 default:
1874 break;
1875 }
1876
1877 if (fdisc->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
1878 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
1879 fdisc->u.fxiocb.req_len,
1880 &fdisc->u.fxiocb.req_dma_handle, GFP_KERNEL);
1881 if (!fdisc->u.fxiocb.req_addr)
1882 goto done_free_sp;
1883
1884 if (fx_type == FXDISC_REG_HOST_INFO) {
1885 preg_hsi = (struct register_host_info *)
1886 fdisc->u.fxiocb.req_addr;
1887 phost_info = &preg_hsi->hsi;
1888 memset(preg_hsi, 0, sizeof(struct register_host_info));
1889 phost_info->os_type = OS_TYPE_LINUX;
1890 strncpy(phost_info->sysname,
1891 p_sysid->sysname, SYSNAME_LENGTH);
1892 strncpy(phost_info->nodename,
1893 p_sysid->nodename, NODENAME_LENGTH);
1894 strncpy(phost_info->release,
1895 p_sysid->release, RELEASE_LENGTH);
1896 strncpy(phost_info->version,
1897 p_sysid->version, VERSION_LENGTH);
1898 strncpy(phost_info->machine,
1899 p_sysid->machine, MACHINE_LENGTH);
1900 strncpy(phost_info->domainname,
1901 p_sysid->domainname, DOMNAME_LENGTH);
1902 strncpy(phost_info->hostdriver,
1903 QLA2XXX_VERSION, VERSION_LENGTH);
1904 do_gettimeofday(&tv);
1905 preg_hsi->utc = (uint64_t)tv.tv_sec;
1906 ql_dbg(ql_dbg_init, vha, 0x0149,
1907 "ISP%04X: Host registration with firmware\n",
1908 ha->pdev->device);
1909 ql_dbg(ql_dbg_init, vha, 0x014a,
1910 "os_type = '%d', sysname = '%s', nodname = '%s'\n",
1911 phost_info->os_type,
1912 phost_info->sysname,
1913 phost_info->nodename);
1914 ql_dbg(ql_dbg_init, vha, 0x014b,
1915 "release = '%s', version = '%s'\n",
1916 phost_info->release,
1917 phost_info->version);
1918 ql_dbg(ql_dbg_init, vha, 0x014c,
1919 "machine = '%s' "
1920 "domainname = '%s', hostdriver = '%s'\n",
1921 phost_info->machine,
1922 phost_info->domainname,
1923 phost_info->hostdriver);
1924 ql_dump_buffer(ql_dbg_init + ql_dbg_disc, vha, 0x014d,
1925 (uint8_t *)phost_info,
1926 sizeof(struct host_system_info));
1927 }
1928 }
1929
1930 if (fdisc->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
1931 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
1932 fdisc->u.fxiocb.rsp_len,
1933 &fdisc->u.fxiocb.rsp_dma_handle, GFP_KERNEL);
1934 if (!fdisc->u.fxiocb.rsp_addr)
1935 goto done_unmap_req;
1936 }
1937
1938 sp->type = SRB_FXIOCB_DCMD;
1939 sp->name = "fxdisc";
1940 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
1941 fdisc->timeout = qla2x00_fxdisc_iocb_timeout;
1942 fdisc->u.fxiocb.req_func_type = cpu_to_le16(fx_type);
1943 sp->done = qla2x00_fxdisc_sp_done;
1944
1945 rval = qla2x00_start_sp(sp);
1946 if (rval != QLA_SUCCESS)
1947 goto done_unmap_dma;
1948
1949 wait_for_completion(&fdisc->u.fxiocb.fxiocb_comp);
1950
1951 if (fx_type == FXDISC_GET_CONFIG_INFO) {
1952 struct config_info_data *pinfo =
1953 (struct config_info_data *) fdisc->u.fxiocb.rsp_addr;
1954 memcpy(&vha->hw->mr.product_name, pinfo->product_name,
1955 sizeof(vha->hw->mr.product_name));
1956 memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name,
1957 sizeof(vha->hw->mr.symbolic_name));
1958 memcpy(&vha->hw->mr.serial_num, pinfo->serial_num,
1959 sizeof(vha->hw->mr.serial_num));
1960 memcpy(&vha->hw->mr.hw_version, pinfo->hw_version,
1961 sizeof(vha->hw->mr.hw_version));
1962 memcpy(&vha->hw->mr.fw_version, pinfo->fw_version,
1963 sizeof(vha->hw->mr.fw_version));
1964 strim(vha->hw->mr.fw_version);
1965 memcpy(&vha->hw->mr.uboot_version, pinfo->uboot_version,
1966 sizeof(vha->hw->mr.uboot_version));
1967 memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
1968 sizeof(vha->hw->mr.fru_serial_num));
1969 vha->hw->mr.critical_temperature = pinfo->nominal_temp_value;
1970 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
1971 QLAFX00_EXTENDED_IO_EN_MASK) != 0;
1972 } else if (fx_type == FXDISC_GET_PORT_INFO) {
1973 struct port_info_data *pinfo =
1974 (struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
1975 memcpy(vha->node_name, pinfo->node_name, WWN_SIZE);
1976 memcpy(vha->port_name, pinfo->port_name, WWN_SIZE);
1977 vha->d_id.b.domain = pinfo->port_id[0];
1978 vha->d_id.b.area = pinfo->port_id[1];
1979 vha->d_id.b.al_pa = pinfo->port_id[2];
1980 qlafx00_update_host_attr(vha, pinfo);
1981 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0141,
1982 (uint8_t *)pinfo, 16);
1983 } else if (fx_type == FXDISC_GET_TGT_NODE_INFO) {
1984 struct qlafx00_tgt_node_info *pinfo =
1985 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1986 memcpy(fcport->node_name, pinfo->tgt_node_wwnn, WWN_SIZE);
1987 memcpy(fcport->port_name, pinfo->tgt_node_wwpn, WWN_SIZE);
1988 fcport->port_type = FCT_TARGET;
1989 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0144,
1990 (uint8_t *)pinfo, 16);
1991 } else if (fx_type == FXDISC_GET_TGT_NODE_LIST) {
1992 struct qlafx00_tgt_node_info *pinfo =
1993 (struct qlafx00_tgt_node_info *) fdisc->u.fxiocb.rsp_addr;
1994 ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0146,
1995 (uint8_t *)pinfo, 16);
1996 memcpy(vha->hw->gid_list, pinfo, QLAFX00_TGT_NODE_LIST_SIZE);
1997 }
1998 rval = le32_to_cpu(fdisc->u.fxiocb.result);
1999
2000 done_unmap_dma:
2001 if (fdisc->u.fxiocb.rsp_addr)
2002 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
2003 fdisc->u.fxiocb.rsp_addr, fdisc->u.fxiocb.rsp_dma_handle);
2004
2005 done_unmap_req:
2006 if (fdisc->u.fxiocb.req_addr)
2007 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
2008 fdisc->u.fxiocb.req_addr, fdisc->u.fxiocb.req_dma_handle);
2009 done_free_sp:
2010 sp->free(vha, sp);
2011 done:
2012 return rval;
2013 }
2014
2015 static void
2016 qlafx00_abort_iocb_timeout(void *data)
2017 {
2018 srb_t *sp = (srb_t *)data;
2019 struct srb_iocb *abt = &sp->u.iocb_cmd;
2020
2021 abt->u.abt.comp_status = cpu_to_le16((uint16_t)CS_TIMEOUT);
2022 complete(&abt->u.abt.comp);
2023 }
2024
2025 static void
2026 qlafx00_abort_sp_done(void *data, void *ptr, int res)
2027 {
2028 srb_t *sp = (srb_t *)ptr;
2029 struct srb_iocb *abt = &sp->u.iocb_cmd;
2030
2031 complete(&abt->u.abt.comp);
2032 }
2033
2034 static int
2035 qlafx00_async_abt_cmd(srb_t *cmd_sp)
2036 {
2037 scsi_qla_host_t *vha = cmd_sp->fcport->vha;
2038 fc_port_t *fcport = cmd_sp->fcport;
2039 struct srb_iocb *abt_iocb;
2040 srb_t *sp;
2041 int rval = QLA_FUNCTION_FAILED;
2042
2043 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2044 if (!sp)
2045 goto done;
2046
2047 abt_iocb = &sp->u.iocb_cmd;
2048 sp->type = SRB_ABT_CMD;
2049 sp->name = "abort";
2050 qla2x00_init_timer(sp, FXDISC_TIMEOUT);
2051 abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
2052 sp->done = qlafx00_abort_sp_done;
2053 abt_iocb->timeout = qlafx00_abort_iocb_timeout;
2054 init_completion(&abt_iocb->u.abt.comp);
2055
2056 rval = qla2x00_start_sp(sp);
2057 if (rval != QLA_SUCCESS)
2058 goto done_free_sp;
2059
2060 ql_dbg(ql_dbg_async, vha, 0x507c,
2061 "Abort command issued - hdl=%x, target_id=%x\n",
2062 cmd_sp->handle, fcport->tgt_id);
2063
2064 wait_for_completion(&abt_iocb->u.abt.comp);
2065
2066 rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
2067 QLA_SUCCESS : QLA_FUNCTION_FAILED;
2068
2069 done_free_sp:
2070 sp->free(vha, sp);
2071 done:
2072 return rval;
2073 }
2074
2075 int
2076 qlafx00_abort_command(srb_t *sp)
2077 {
2078 unsigned long flags = 0;
2079
2080 uint32_t handle;
2081 fc_port_t *fcport = sp->fcport;
2082 struct scsi_qla_host *vha = fcport->vha;
2083 struct qla_hw_data *ha = vha->hw;
2084 struct req_que *req = vha->req;
2085
2086 spin_lock_irqsave(&ha->hardware_lock, flags);
2087 for (handle = 1; handle < DEFAULT_OUTSTANDING_COMMANDS; handle++) {
2088 if (req->outstanding_cmds[handle] == sp)
2089 break;
2090 }
2091 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2092 if (handle == DEFAULT_OUTSTANDING_COMMANDS) {
2093 /* Command not found. */
2094 return QLA_FUNCTION_FAILED;
2095 }
2096 return qlafx00_async_abt_cmd(sp);
2097 }
2098
2099 /*
2100 * qlafx00_initialize_adapter
2101 * Initialize board.
2102 *
2103 * Input:
2104 * ha = adapter block pointer.
2105 *
2106 * Returns:
2107 * 0 = success
2108 */
2109 int
2110 qlafx00_initialize_adapter(scsi_qla_host_t *vha)
2111 {
2112 int rval;
2113 struct qla_hw_data *ha = vha->hw;
2114 uint32_t tempc;
2115
2116 /* Clear adapter flags. */
2117 vha->flags.online = 0;
2118 ha->flags.chip_reset_done = 0;
2119 vha->flags.reset_active = 0;
2120 ha->flags.pci_channel_io_perm_failure = 0;
2121 ha->flags.eeh_busy = 0;
2122 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
2123 atomic_set(&vha->loop_state, LOOP_DOWN);
2124 vha->device_flags = DFLG_NO_CABLE;
2125 vha->dpc_flags = 0;
2126 vha->flags.management_server_logged_in = 0;
2127 vha->marker_needed = 0;
2128 ha->isp_abort_cnt = 0;
2129 ha->beacon_blink_led = 0;
2130
2131 set_bit(0, ha->req_qid_map);
2132 set_bit(0, ha->rsp_qid_map);
2133
2134 ql_dbg(ql_dbg_init, vha, 0x0147,
2135 "Configuring PCI space...\n");
2136
2137 rval = ha->isp_ops->pci_config(vha);
2138 if (rval) {
2139 ql_log(ql_log_warn, vha, 0x0148,
2140 "Unable to configure PCI space.\n");
2141 return rval;
2142 }
2143
2144 rval = qlafx00_init_fw_ready(vha);
2145 if (rval != QLA_SUCCESS)
2146 return rval;
2147
2148 qlafx00_save_queue_ptrs(vha);
2149
2150 rval = qlafx00_config_queues(vha);
2151 if (rval != QLA_SUCCESS)
2152 return rval;
2153
2154 /*
2155 * Allocate the array of outstanding commands
2156 * now that we know the firmware resources.
2157 */
2158 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
2159 if (rval != QLA_SUCCESS)
2160 return rval;
2161
2162 rval = qla2x00_init_rings(vha);
2163 ha->flags.chip_reset_done = 1;
2164
2165 tempc = QLAFX00_GET_TEMPERATURE(ha);
2166 ql_dbg(ql_dbg_init, vha, 0x0152,
2167 "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
2168 __func__, tempc);
2169
2170 return rval;
2171 }
2172
2173 uint32_t
2174 qlafx00_fw_state_show(struct device *dev, struct device_attribute *attr,
2175 char *buf)
2176 {
2177 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
2178 int rval = QLA_FUNCTION_FAILED;
2179 uint32_t state[1];
2180
2181 if (qla2x00_reset_active(vha))
2182 ql_log(ql_log_warn, vha, 0x70ce,
2183 "ISP reset active.\n");
2184 else if (!vha->hw->flags.eeh_busy) {
2185 rval = qlafx00_get_firmware_state(vha, state);
2186 }
2187 if (rval != QLA_SUCCESS)
2188 memset(state, -1, sizeof(state));
2189
2190 return state[0];
2191 }
2192
2193 void
2194 qlafx00_get_host_speed(struct Scsi_Host *shost)
2195 {
2196 struct qla_hw_data *ha = ((struct scsi_qla_host *)
2197 (shost_priv(shost)))->hw;
2198 u32 speed = FC_PORTSPEED_UNKNOWN;
2199
2200 switch (ha->link_data_rate) {
2201 case QLAFX00_PORT_SPEED_2G:
2202 speed = FC_PORTSPEED_2GBIT;
2203 break;
2204 case QLAFX00_PORT_SPEED_4G:
2205 speed = FC_PORTSPEED_4GBIT;
2206 break;
2207 case QLAFX00_PORT_SPEED_8G:
2208 speed = FC_PORTSPEED_8GBIT;
2209 break;
2210 case QLAFX00_PORT_SPEED_10G:
2211 speed = FC_PORTSPEED_10GBIT;
2212 break;
2213 }
2214 fc_host_speed(shost) = speed;
2215 }
2216
2217 /** QLAFX00 specific ISR implementation functions */
2218
2219 static inline void
2220 qlafx00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
2221 uint32_t sense_len, struct rsp_que *rsp, int res)
2222 {
2223 struct scsi_qla_host *vha = sp->fcport->vha;
2224 struct scsi_cmnd *cp = GET_CMD_SP(sp);
2225 uint32_t track_sense_len;
2226
2227 SET_FW_SENSE_LEN(sp, sense_len);
2228
2229 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
2230 sense_len = SCSI_SENSE_BUFFERSIZE;
2231
2232 SET_CMD_SENSE_LEN(sp, sense_len);
2233 SET_CMD_SENSE_PTR(sp, cp->sense_buffer);
2234 track_sense_len = sense_len;
2235
2236 if (sense_len > par_sense_len)
2237 sense_len = par_sense_len;
2238
2239 memcpy(cp->sense_buffer, sense_data, sense_len);
2240
2241 SET_FW_SENSE_LEN(sp, GET_FW_SENSE_LEN(sp) - sense_len);
2242
2243 SET_CMD_SENSE_PTR(sp, cp->sense_buffer + sense_len);
2244 track_sense_len -= sense_len;
2245 SET_CMD_SENSE_LEN(sp, track_sense_len);
2246
2247 ql_dbg(ql_dbg_io, vha, 0x304d,
2248 "sense_len=0x%x par_sense_len=0x%x track_sense_len=0x%x.\n",
2249 sense_len, par_sense_len, track_sense_len);
2250 if (GET_FW_SENSE_LEN(sp) > 0) {
2251 rsp->status_srb = sp;
2252 cp->result = res;
2253 }
2254
2255 if (sense_len) {
2256 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3039,
2257 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
2258 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
2259 cp);
2260 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3049,
2261 cp->sense_buffer, sense_len);
2262 }
2263 }
2264
2265 static void
2266 qlafx00_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2267 struct tsk_mgmt_entry_fx00 *pkt, srb_t *sp,
2268 __le16 sstatus, __le16 cpstatus)
2269 {
2270 struct srb_iocb *tmf;
2271
2272 tmf = &sp->u.iocb_cmd;
2273 if (cpstatus != cpu_to_le16((uint16_t)CS_COMPLETE) ||
2274 (sstatus & cpu_to_le16((uint16_t)SS_RESPONSE_INFO_LEN_VALID)))
2275 cpstatus = cpu_to_le16((uint16_t)CS_INCOMPLETE);
2276 tmf->u.tmf.comp_status = cpstatus;
2277 sp->done(vha, sp, 0);
2278 }
2279
2280 static void
2281 qlafx00_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
2282 struct abort_iocb_entry_fx00 *pkt)
2283 {
2284 const char func[] = "ABT_IOCB";
2285 srb_t *sp;
2286 struct srb_iocb *abt;
2287
2288 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2289 if (!sp)
2290 return;
2291
2292 abt = &sp->u.iocb_cmd;
2293 abt->u.abt.comp_status = pkt->tgt_id_sts;
2294 sp->done(vha, sp, 0);
2295 }
2296
2297 static void
2298 qlafx00_ioctl_iosb_entry(scsi_qla_host_t *vha, struct req_que *req,
2299 struct ioctl_iocb_entry_fx00 *pkt)
2300 {
2301 const char func[] = "IOSB_IOCB";
2302 srb_t *sp;
2303 struct fc_bsg_job *bsg_job;
2304 struct srb_iocb *iocb_job;
2305 int res;
2306 struct qla_mt_iocb_rsp_fx00 fstatus;
2307 uint8_t *fw_sts_ptr;
2308
2309 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2310 if (!sp)
2311 return;
2312
2313 if (sp->type == SRB_FXIOCB_DCMD) {
2314 iocb_job = &sp->u.iocb_cmd;
2315 iocb_job->u.fxiocb.seq_number = pkt->seq_no;
2316 iocb_job->u.fxiocb.fw_flags = pkt->fw_iotcl_flags;
2317 iocb_job->u.fxiocb.result = pkt->status;
2318 if (iocb_job->u.fxiocb.flags & SRB_FXDISC_RSP_DWRD_VALID)
2319 iocb_job->u.fxiocb.req_data =
2320 pkt->dataword_r;
2321 } else {
2322 bsg_job = sp->u.bsg_job;
2323
2324 memset(&fstatus, 0, sizeof(struct qla_mt_iocb_rsp_fx00));
2325
2326 fstatus.reserved_1 = pkt->reserved_0;
2327 fstatus.func_type = pkt->comp_func_num;
2328 fstatus.ioctl_flags = pkt->fw_iotcl_flags;
2329 fstatus.ioctl_data = pkt->dataword_r;
2330 fstatus.adapid = pkt->adapid;
2331 fstatus.adapid_hi = pkt->adapid_hi;
2332 fstatus.reserved_2 = pkt->reserved_1;
2333 fstatus.res_count = pkt->residuallen;
2334 fstatus.status = pkt->status;
2335 fstatus.seq_number = pkt->seq_no;
2336 memcpy(fstatus.reserved_3,
2337 pkt->reserved_2, 20 * sizeof(uint8_t));
2338
2339 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
2340 sizeof(struct fc_bsg_reply);
2341
2342 memcpy(fw_sts_ptr, (uint8_t *)&fstatus,
2343 sizeof(struct qla_mt_iocb_rsp_fx00));
2344 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
2345 sizeof(struct qla_mt_iocb_rsp_fx00) + sizeof(uint8_t);
2346
2347 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2348 sp->fcport->vha, 0x5080,
2349 (uint8_t *)pkt, sizeof(struct ioctl_iocb_entry_fx00));
2350
2351 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
2352 sp->fcport->vha, 0x5074,
2353 (uint8_t *)fw_sts_ptr, sizeof(struct qla_mt_iocb_rsp_fx00));
2354
2355 res = bsg_job->reply->result = DID_OK << 16;
2356 bsg_job->reply->reply_payload_rcv_len =
2357 bsg_job->reply_payload.payload_len;
2358 }
2359 sp->done(vha, sp, res);
2360 }
2361
2362 /**
2363 * qlafx00_status_entry() - Process a Status IOCB entry.
2364 * @ha: SCSI driver HA context
2365 * @pkt: Entry pointer
2366 */
2367 static void
2368 qlafx00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
2369 {
2370 srb_t *sp;
2371 fc_port_t *fcport;
2372 struct scsi_cmnd *cp;
2373 struct sts_entry_fx00 *sts;
2374 __le16 comp_status;
2375 __le16 scsi_status;
2376 uint16_t ox_id;
2377 __le16 lscsi_status;
2378 int32_t resid;
2379 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
2380 fw_resid_len;
2381 uint8_t *rsp_info = NULL, *sense_data = NULL;
2382 struct qla_hw_data *ha = vha->hw;
2383 uint32_t hindex, handle;
2384 uint16_t que;
2385 struct req_que *req;
2386 int logit = 1;
2387 int res = 0;
2388
2389 sts = (struct sts_entry_fx00 *) pkt;
2390
2391 comp_status = sts->comp_status;
2392 scsi_status = sts->scsi_status & cpu_to_le16((uint16_t)SS_MASK);
2393 hindex = sts->handle;
2394 handle = LSW(hindex);
2395
2396 que = MSW(hindex);
2397 req = ha->req_q_map[que];
2398
2399 /* Validate handle. */
2400 if (handle < req->num_outstanding_cmds)
2401 sp = req->outstanding_cmds[handle];
2402 else
2403 sp = NULL;
2404
2405 if (sp == NULL) {
2406 ql_dbg(ql_dbg_io, vha, 0x3034,
2407 "Invalid status handle (0x%x).\n", handle);
2408
2409 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2410 qla2xxx_wake_dpc(vha);
2411 return;
2412 }
2413
2414 if (sp->type == SRB_TM_CMD) {
2415 req->outstanding_cmds[handle] = NULL;
2416 qlafx00_tm_iocb_entry(vha, req, pkt, sp,
2417 scsi_status, comp_status);
2418 return;
2419 }
2420
2421 /* Fast path completion. */
2422 if (comp_status == CS_COMPLETE && scsi_status == 0) {
2423 qla2x00_do_host_ramp_up(vha);
2424 qla2x00_process_completed_request(vha, req, handle);
2425 return;
2426 }
2427
2428 req->outstanding_cmds[handle] = NULL;
2429 cp = GET_CMD_SP(sp);
2430 if (cp == NULL) {
2431 ql_dbg(ql_dbg_io, vha, 0x3048,
2432 "Command already returned (0x%x/%p).\n",
2433 handle, sp);
2434
2435 return;
2436 }
2437
2438 lscsi_status = scsi_status & cpu_to_le16((uint16_t)STATUS_MASK);
2439
2440 fcport = sp->fcport;
2441
2442 ox_id = 0;
2443 sense_len = par_sense_len = rsp_info_len = resid_len =
2444 fw_resid_len = 0;
2445 if (scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID))
2446 sense_len = sts->sense_len;
2447 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2448 | (uint16_t)SS_RESIDUAL_OVER)))
2449 resid_len = le32_to_cpu(sts->residual_len);
2450 if (comp_status == cpu_to_le16((uint16_t)CS_DATA_UNDERRUN))
2451 fw_resid_len = le32_to_cpu(sts->residual_len);
2452 rsp_info = sense_data = sts->data;
2453 par_sense_len = sizeof(sts->data);
2454
2455 /* Check for overrun. */
2456 if (comp_status == CS_COMPLETE &&
2457 scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_OVER))
2458 comp_status = cpu_to_le16((uint16_t)CS_DATA_OVERRUN);
2459
2460 /*
2461 * Based on Host and scsi status generate status code for Linux
2462 */
2463 switch (le16_to_cpu(comp_status)) {
2464 case CS_COMPLETE:
2465 case CS_QUEUE_FULL:
2466 if (scsi_status == 0) {
2467 res = DID_OK << 16;
2468 break;
2469 }
2470 if (scsi_status & cpu_to_le16(((uint16_t)SS_RESIDUAL_UNDER
2471 | (uint16_t)SS_RESIDUAL_OVER))) {
2472 resid = resid_len;
2473 scsi_set_resid(cp, resid);
2474
2475 if (!lscsi_status &&
2476 ((unsigned)(scsi_bufflen(cp) - resid) <
2477 cp->underflow)) {
2478 ql_dbg(ql_dbg_io, fcport->vha, 0x3050,
2479 "Mid-layer underflow "
2480 "detected (0x%x of 0x%x bytes).\n",
2481 resid, scsi_bufflen(cp));
2482
2483 res = DID_ERROR << 16;
2484 break;
2485 }
2486 }
2487 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2488
2489 if (lscsi_status ==
2490 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2491 ql_dbg(ql_dbg_io, fcport->vha, 0x3051,
2492 "QUEUE FULL detected.\n");
2493 break;
2494 }
2495 logit = 0;
2496 if (lscsi_status != cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2497 break;
2498
2499 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2500 if (!(scsi_status & cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2501 break;
2502
2503 qlafx00_handle_sense(sp, sense_data, par_sense_len, sense_len,
2504 rsp, res);
2505 break;
2506
2507 case CS_DATA_UNDERRUN:
2508 /* Use F/W calculated residual length. */
2509 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2510 resid = fw_resid_len;
2511 else
2512 resid = resid_len;
2513 scsi_set_resid(cp, resid);
2514 if (scsi_status & cpu_to_le16((uint16_t)SS_RESIDUAL_UNDER)) {
2515 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2516 && fw_resid_len != resid_len) {
2517 ql_dbg(ql_dbg_io, fcport->vha, 0x3052,
2518 "Dropped frame(s) detected "
2519 "(0x%x of 0x%x bytes).\n",
2520 resid, scsi_bufflen(cp));
2521
2522 res = DID_ERROR << 16 |
2523 le16_to_cpu(lscsi_status);
2524 goto check_scsi_status;
2525 }
2526
2527 if (!lscsi_status &&
2528 ((unsigned)(scsi_bufflen(cp) - resid) <
2529 cp->underflow)) {
2530 ql_dbg(ql_dbg_io, fcport->vha, 0x3053,
2531 "Mid-layer underflow "
2532 "detected (0x%x of 0x%x bytes, "
2533 "cp->underflow: 0x%x).\n",
2534 resid, scsi_bufflen(cp), cp->underflow);
2535
2536 res = DID_ERROR << 16;
2537 break;
2538 }
2539 } else if (lscsi_status !=
2540 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL) &&
2541 lscsi_status != cpu_to_le16((uint16_t)SAM_STAT_BUSY)) {
2542 /*
2543 * scsi status of task set and busy are considered
2544 * to be task not completed.
2545 */
2546
2547 ql_dbg(ql_dbg_io, fcport->vha, 0x3054,
2548 "Dropped frame(s) detected (0x%x "
2549 "of 0x%x bytes).\n", resid,
2550 scsi_bufflen(cp));
2551
2552 res = DID_ERROR << 16 | le16_to_cpu(lscsi_status);
2553 goto check_scsi_status;
2554 } else {
2555 ql_dbg(ql_dbg_io, fcport->vha, 0x3055,
2556 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2557 scsi_status, lscsi_status);
2558 }
2559
2560 res = DID_OK << 16 | le16_to_cpu(lscsi_status);
2561 logit = 0;
2562
2563 check_scsi_status:
2564 /*
2565 * Check to see if SCSI Status is non zero. If so report SCSI
2566 * Status.
2567 */
2568 if (lscsi_status != 0) {
2569 if (lscsi_status ==
2570 cpu_to_le16((uint16_t)SAM_STAT_TASK_SET_FULL)) {
2571 ql_dbg(ql_dbg_io, fcport->vha, 0x3056,
2572 "QUEUE FULL detected.\n");
2573 logit = 1;
2574 break;
2575 }
2576 if (lscsi_status !=
2577 cpu_to_le16((uint16_t)SS_CHECK_CONDITION))
2578 break;
2579
2580 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2581 if (!(scsi_status &
2582 cpu_to_le16((uint16_t)SS_SENSE_LEN_VALID)))
2583 break;
2584
2585 qlafx00_handle_sense(sp, sense_data, par_sense_len,
2586 sense_len, rsp, res);
2587 }
2588 break;
2589
2590 case CS_PORT_LOGGED_OUT:
2591 case CS_PORT_CONFIG_CHG:
2592 case CS_PORT_BUSY:
2593 case CS_INCOMPLETE:
2594 case CS_PORT_UNAVAILABLE:
2595 case CS_TIMEOUT:
2596 case CS_RESET:
2597
2598 /*
2599 * We are going to have the fc class block the rport
2600 * while we try to recover so instruct the mid layer
2601 * to requeue until the class decides how to handle this.
2602 */
2603 res = DID_TRANSPORT_DISRUPTED << 16;
2604
2605 ql_dbg(ql_dbg_io, fcport->vha, 0x3057,
2606 "Port down status: port-state=0x%x.\n",
2607 atomic_read(&fcport->state));
2608
2609 if (atomic_read(&fcport->state) == FCS_ONLINE)
2610 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
2611 break;
2612
2613 case CS_ABORTED:
2614 res = DID_RESET << 16;
2615 break;
2616
2617 default:
2618 res = DID_ERROR << 16;
2619 break;
2620 }
2621
2622 if (logit)
2623 ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
2624 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
2625 "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
2626 "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
2627 "par_sense_len=0x%x, rsp_info_len=0x%x\n",
2628 comp_status, scsi_status, res, vha->host_no,
2629 cp->device->id, cp->device->lun, fcport->tgt_id,
2630 lscsi_status, cp->cmnd, scsi_bufflen(cp),
2631 rsp_info_len, resid_len, fw_resid_len, sense_len,
2632 par_sense_len, rsp_info_len);
2633
2634 if (!res)
2635 qla2x00_do_host_ramp_up(vha);
2636
2637 if (rsp->status_srb == NULL)
2638 sp->done(ha, sp, res);
2639 }
2640
2641 /**
2642 * qlafx00_status_cont_entry() - Process a Status Continuations entry.
2643 * @ha: SCSI driver HA context
2644 * @pkt: Entry pointer
2645 *
2646 * Extended sense data.
2647 */
2648 static void
2649 qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
2650 {
2651 uint8_t sense_sz = 0;
2652 struct qla_hw_data *ha = rsp->hw;
2653 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2654 srb_t *sp = rsp->status_srb;
2655 struct scsi_cmnd *cp;
2656 uint32_t sense_len;
2657 uint8_t *sense_ptr;
2658
2659 if (!sp) {
2660 ql_dbg(ql_dbg_io, vha, 0x3037,
2661 "no SP, sp = %p\n", sp);
2662 return;
2663 }
2664
2665 if (!GET_FW_SENSE_LEN(sp)) {
2666 ql_dbg(ql_dbg_io, vha, 0x304b,
2667 "no fw sense data, sp = %p\n", sp);
2668 return;
2669 }
2670 cp = GET_CMD_SP(sp);
2671 if (cp == NULL) {
2672 ql_log(ql_log_warn, vha, 0x303b,
2673 "cmd is NULL: already returned to OS (sp=%p).\n", sp);
2674
2675 rsp->status_srb = NULL;
2676 return;
2677 }
2678
2679 if (!GET_CMD_SENSE_LEN(sp)) {
2680 ql_dbg(ql_dbg_io, vha, 0x304c,
2681 "no sense data, sp = %p\n", sp);
2682 } else {
2683 sense_len = GET_CMD_SENSE_LEN(sp);
2684 sense_ptr = GET_CMD_SENSE_PTR(sp);
2685 ql_dbg(ql_dbg_io, vha, 0x304f,
2686 "sp=%p sense_len=0x%x sense_ptr=%p.\n",
2687 sp, sense_len, sense_ptr);
2688
2689 if (sense_len > sizeof(pkt->data))
2690 sense_sz = sizeof(pkt->data);
2691 else
2692 sense_sz = sense_len;
2693
2694 /* Move sense data. */
2695 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304e,
2696 (uint8_t *)pkt, sizeof(sts_cont_entry_t));
2697 memcpy(sense_ptr, pkt->data, sense_sz);
2698 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x304a,
2699 sense_ptr, sense_sz);
2700
2701 sense_len -= sense_sz;
2702 sense_ptr += sense_sz;
2703
2704 SET_CMD_SENSE_PTR(sp, sense_ptr);
2705 SET_CMD_SENSE_LEN(sp, sense_len);
2706 }
2707 sense_len = GET_FW_SENSE_LEN(sp);
2708 sense_len = (sense_len > sizeof(pkt->data)) ?
2709 (sense_len - sizeof(pkt->data)) : 0;
2710 SET_FW_SENSE_LEN(sp, sense_len);
2711
2712 /* Place command on done queue. */
2713 if (sense_len == 0) {
2714 rsp->status_srb = NULL;
2715 sp->done(ha, sp, cp->result);
2716 }
2717 }
2718
2719 /**
2720 * qlafx00_multistatus_entry() - Process Multi response queue entries.
2721 * @ha: SCSI driver HA context
2722 */
2723 static void
2724 qlafx00_multistatus_entry(struct scsi_qla_host *vha,
2725 struct rsp_que *rsp, void *pkt)
2726 {
2727 srb_t *sp;
2728 struct multi_sts_entry_fx00 *stsmfx;
2729 struct qla_hw_data *ha = vha->hw;
2730 uint32_t handle, hindex, handle_count, i;
2731 uint16_t que;
2732 struct req_que *req;
2733 __le32 *handle_ptr;
2734
2735 stsmfx = (struct multi_sts_entry_fx00 *) pkt;
2736
2737 handle_count = stsmfx->handle_count;
2738
2739 if (handle_count > MAX_HANDLE_COUNT) {
2740 ql_dbg(ql_dbg_io, vha, 0x3035,
2741 "Invalid handle count (0x%x).\n", handle_count);
2742 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2743 qla2xxx_wake_dpc(vha);
2744 return;
2745 }
2746
2747 handle_ptr = &stsmfx->handles[0];
2748
2749 for (i = 0; i < handle_count; i++) {
2750 hindex = le32_to_cpu(*handle_ptr);
2751 handle = LSW(hindex);
2752 que = MSW(hindex);
2753 req = ha->req_q_map[que];
2754
2755 /* Validate handle. */
2756 if (handle < req->num_outstanding_cmds)
2757 sp = req->outstanding_cmds[handle];
2758 else
2759 sp = NULL;
2760
2761 if (sp == NULL) {
2762 ql_dbg(ql_dbg_io, vha, 0x3044,
2763 "Invalid status handle (0x%x).\n", handle);
2764 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2765 qla2xxx_wake_dpc(vha);
2766 return;
2767 }
2768 qla2x00_process_completed_request(vha, req, handle);
2769 handle_ptr++;
2770 }
2771 }
2772
2773 /**
2774 * qlafx00_error_entry() - Process an error entry.
2775 * @ha: SCSI driver HA context
2776 * @pkt: Entry pointer
2777 */
2778 static void
2779 qlafx00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp,
2780 struct sts_entry_fx00 *pkt, uint8_t estatus, uint8_t etype)
2781 {
2782 srb_t *sp;
2783 struct qla_hw_data *ha = vha->hw;
2784 const char func[] = "ERROR-IOCB";
2785 uint16_t que = MSW(pkt->handle);
2786 struct req_que *req = NULL;
2787 int res = DID_ERROR << 16;
2788
2789 ql_dbg(ql_dbg_async, vha, 0x507f,
2790 "type of error status in response: 0x%x\n", estatus);
2791
2792 req = ha->req_q_map[que];
2793
2794 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
2795 if (sp) {
2796 sp->done(ha, sp, res);
2797 return;
2798 }
2799
2800 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2801 qla2xxx_wake_dpc(vha);
2802 }
2803
2804 /**
2805 * qlafx00_process_response_queue() - Process response queue entries.
2806 * @ha: SCSI driver HA context
2807 */
2808 static void
2809 qlafx00_process_response_queue(struct scsi_qla_host *vha,
2810 struct rsp_que *rsp)
2811 {
2812 struct sts_entry_fx00 *pkt;
2813 response_t *lptr;
2814
2815 while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
2816 RESPONSE_PROCESSED) {
2817 lptr = rsp->ring_ptr;
2818 memcpy_fromio(rsp->rsp_pkt, (void __iomem *)lptr,
2819 sizeof(rsp->rsp_pkt));
2820 pkt = (struct sts_entry_fx00 *)rsp->rsp_pkt;
2821
2822 rsp->ring_index++;
2823 if (rsp->ring_index == rsp->length) {
2824 rsp->ring_index = 0;
2825 rsp->ring_ptr = rsp->ring;
2826 } else {
2827 rsp->ring_ptr++;
2828 }
2829
2830 if (pkt->entry_status != 0 &&
2831 pkt->entry_type != IOCTL_IOSB_TYPE_FX00) {
2832 qlafx00_error_entry(vha, rsp,
2833 (struct sts_entry_fx00 *)pkt, pkt->entry_status,
2834 pkt->entry_type);
2835 goto next_iter;
2836 continue;
2837 }
2838
2839 switch (pkt->entry_type) {
2840 case STATUS_TYPE_FX00:
2841 qlafx00_status_entry(vha, rsp, pkt);
2842 break;
2843
2844 case STATUS_CONT_TYPE_FX00:
2845 qlafx00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2846 break;
2847
2848 case MULTI_STATUS_TYPE_FX00:
2849 qlafx00_multistatus_entry(vha, rsp, pkt);
2850 break;
2851
2852 case ABORT_IOCB_TYPE_FX00:
2853 qlafx00_abort_iocb_entry(vha, rsp->req,
2854 (struct abort_iocb_entry_fx00 *)pkt);
2855 break;
2856
2857 case IOCTL_IOSB_TYPE_FX00:
2858 qlafx00_ioctl_iosb_entry(vha, rsp->req,
2859 (struct ioctl_iocb_entry_fx00 *)pkt);
2860 break;
2861 default:
2862 /* Type Not Supported. */
2863 ql_dbg(ql_dbg_async, vha, 0x5081,
2864 "Received unknown response pkt type %x "
2865 "entry status=%x.\n",
2866 pkt->entry_type, pkt->entry_status);
2867 break;
2868 }
2869 next_iter:
2870 WRT_REG_DWORD((void __iomem *)&lptr->signature,
2871 RESPONSE_PROCESSED);
2872 wmb();
2873 }
2874
2875 /* Adjust ring index */
2876 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2877 }
2878
2879 /**
2880 * qlafx00_async_event() - Process aynchronous events.
2881 * @ha: SCSI driver HA context
2882 */
2883 static void
2884 qlafx00_async_event(scsi_qla_host_t *vha)
2885 {
2886 struct qla_hw_data *ha = vha->hw;
2887 struct device_reg_fx00 __iomem *reg;
2888 int data_size = 1;
2889
2890 reg = &ha->iobase->ispfx00;
2891 /* Setup to process RIO completion. */
2892 switch (ha->aenmb[0]) {
2893 case QLAFX00_MBA_SYSTEM_ERR: /* System Error */
2894 ql_log(ql_log_warn, vha, 0x5079,
2895 "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
2896 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2897 break;
2898
2899 case QLAFX00_MBA_SHUTDOWN_RQSTD: /* Shutdown requested */
2900 ql_dbg(ql_dbg_async, vha, 0x5076,
2901 "Asynchronous FW shutdown requested.\n");
2902 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2903 qla2xxx_wake_dpc(vha);
2904 break;
2905
2906 case QLAFX00_MBA_PORT_UPDATE: /* Port database update */
2907 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
2908 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
2909 ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
2910 ql_dbg(ql_dbg_async, vha, 0x5077,
2911 "Asynchronous port Update received "
2912 "aenmb[0]: %x, aenmb[1]: %x, aenmb[2]: %x, aenmb[3]: %x\n",
2913 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2914 data_size = 4;
2915 break;
2916
2917 case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
2918 case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
2919 ql_log(ql_log_info, vha, 0x5083,
2920 "Asynchronous critical temperature event received "
2921 "aenmb[0]: %x\n",
2922 ha->aenmb[0]);
2923 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2924 (uint32_t *)ha->aenmb, 1);
2925 break;
2926
2927 default:
2928 ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
2929 ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
2930 ha->aenmb[3] = RD_REG_WORD(&reg->aenmailbox3);
2931 ha->aenmb[4] = RD_REG_WORD(&reg->aenmailbox4);
2932 ha->aenmb[5] = RD_REG_WORD(&reg->aenmailbox5);
2933 ha->aenmb[6] = RD_REG_WORD(&reg->aenmailbox6);
2934 ha->aenmb[7] = RD_REG_WORD(&reg->aenmailbox7);
2935 ql_dbg(ql_dbg_async, vha, 0x5078,
2936 "AEN:%04x %04x %04x %04x :%04x %04x %04x %04x\n",
2937 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
2938 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
2939 break;
2940 }
2941 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2942 (uint32_t *)ha->aenmb, data_size);
2943 }
2944
2945 /**
2946 *
2947 * qlafx00x_mbx_completion() - Process mailbox command completions.
2948 * @ha: SCSI driver HA context
2949 * @mb16: Mailbox16 register
2950 */
2951 static void
2952 qlafx00_mbx_completion(scsi_qla_host_t *vha, uint32_t mb0)
2953 {
2954 uint16_t cnt;
2955 uint16_t __iomem *wptr;
2956 struct qla_hw_data *ha = vha->hw;
2957 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2958
2959 if (!ha->mcp32)
2960 ql_dbg(ql_dbg_async, vha, 0x507e, "MBX pointer ERROR.\n");
2961
2962 /* Load return mailbox registers. */
2963 ha->flags.mbox_int = 1;
2964 ha->mailbox_out32[0] = mb0;
2965 wptr = (uint16_t __iomem *)&reg->mailbox17;
2966
2967 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2968 ha->mailbox_out32[cnt] = RD_REG_WORD(wptr);
2969 wptr++;
2970 }
2971 }
2972
2973 /**
2974 * qlafx00_intr_handler() - Process interrupts for the ISPFX00.
2975 * @irq:
2976 * @dev_id: SCSI driver HA context
2977 *
2978 * Called by system whenever the host adapter generates an interrupt.
2979 *
2980 * Returns handled flag.
2981 */
2982 irqreturn_t
2983 qlafx00_intr_handler(int irq, void *dev_id)
2984 {
2985 scsi_qla_host_t *vha;
2986 struct qla_hw_data *ha;
2987 struct device_reg_fx00 __iomem *reg;
2988 int status;
2989 unsigned long iter;
2990 uint32_t stat;
2991 uint32_t mb[8];
2992 struct rsp_que *rsp;
2993 unsigned long flags;
2994 uint32_t clr_intr = 0;
2995
2996 rsp = (struct rsp_que *) dev_id;
2997 if (!rsp) {
2998 ql_log(ql_log_info, NULL, 0x507d,
2999 "%s: NULL response queue pointer.\n", __func__);
3000 return IRQ_NONE;
3001 }
3002
3003 ha = rsp->hw;
3004 reg = &ha->iobase->ispfx00;
3005 status = 0;
3006
3007 if (unlikely(pci_channel_offline(ha->pdev)))
3008 return IRQ_HANDLED;
3009
3010 spin_lock_irqsave(&ha->hardware_lock, flags);
3011 vha = pci_get_drvdata(ha->pdev);
3012 for (iter = 50; iter--; clr_intr = 0) {
3013 stat = QLAFX00_RD_INTR_REG(ha);
3014 if ((stat & QLAFX00_HST_INT_STS_BITS) == 0)
3015 break;
3016
3017 switch (stat & QLAFX00_HST_INT_STS_BITS) {
3018 case QLAFX00_INTR_MB_CMPLT:
3019 case QLAFX00_INTR_MB_RSP_CMPLT:
3020 case QLAFX00_INTR_MB_ASYNC_CMPLT:
3021 case QLAFX00_INTR_ALL_CMPLT:
3022 mb[0] = RD_REG_WORD(&reg->mailbox16);
3023 qlafx00_mbx_completion(vha, mb[0]);
3024 status |= MBX_INTERRUPT;
3025 clr_intr |= QLAFX00_INTR_MB_CMPLT;
3026 break;
3027 case QLAFX00_INTR_ASYNC_CMPLT:
3028 case QLAFX00_INTR_RSP_ASYNC_CMPLT:
3029 ha->aenmb[0] = RD_REG_WORD(&reg->aenmailbox0);
3030 qlafx00_async_event(vha);
3031 clr_intr |= QLAFX00_INTR_ASYNC_CMPLT;
3032 break;
3033 case QLAFX00_INTR_RSP_CMPLT:
3034 qlafx00_process_response_queue(vha, rsp);
3035 clr_intr |= QLAFX00_INTR_RSP_CMPLT;
3036 break;
3037 default:
3038 ql_dbg(ql_dbg_async, vha, 0x507a,
3039 "Unrecognized interrupt type (%d).\n", stat);
3040 break;
3041 }
3042 QLAFX00_CLR_INTR_REG(ha, clr_intr);
3043 QLAFX00_RD_INTR_REG(ha);
3044 }
3045
3046 qla2x00_handle_mbx_completion(ha, status);
3047 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3048
3049 return IRQ_HANDLED;
3050 }
3051
3052 /** QLAFX00 specific IOCB implementation functions */
3053
3054 static inline cont_a64_entry_t *
3055 qlafx00_prep_cont_type1_iocb(struct req_que *req,
3056 cont_a64_entry_t *lcont_pkt)
3057 {
3058 cont_a64_entry_t *cont_pkt;
3059
3060 /* Adjust ring index. */
3061 req->ring_index++;
3062 if (req->ring_index == req->length) {
3063 req->ring_index = 0;
3064 req->ring_ptr = req->ring;
3065 } else {
3066 req->ring_ptr++;
3067 }
3068
3069 cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
3070
3071 /* Load packet defaults. */
3072 lcont_pkt->entry_type = CONTINUE_A64_TYPE_FX00;
3073
3074 return cont_pkt;
3075 }
3076
3077 static inline void
3078 qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt,
3079 uint16_t tot_dsds, struct cmd_type_7_fx00 *lcmd_pkt)
3080 {
3081 uint16_t avail_dsds;
3082 __le32 *cur_dsd;
3083 scsi_qla_host_t *vha;
3084 struct scsi_cmnd *cmd;
3085 struct scatterlist *sg;
3086 int i, cont;
3087 struct req_que *req;
3088 cont_a64_entry_t lcont_pkt;
3089 cont_a64_entry_t *cont_pkt;
3090
3091 vha = sp->fcport->vha;
3092 req = vha->req;
3093
3094 cmd = GET_CMD_SP(sp);
3095 cont = 0;
3096 cont_pkt = NULL;
3097
3098 /* Update entry type to indicate Command Type 3 IOCB */
3099 lcmd_pkt->entry_type = FX00_COMMAND_TYPE_7;
3100
3101 /* No data transfer */
3102 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
3103 lcmd_pkt->byte_count = __constant_cpu_to_le32(0);
3104 return;
3105 }
3106
3107 /* Set transfer direction */
3108 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
3109 lcmd_pkt->cntrl_flags = TMF_WRITE_DATA;
3110 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
3111 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
3112 lcmd_pkt->cntrl_flags = TMF_READ_DATA;
3113 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
3114 }
3115
3116 /* One DSD is available in the Command Type 3 IOCB */
3117 avail_dsds = 1;
3118 cur_dsd = (__le32 *)&lcmd_pkt->dseg_0_address;
3119
3120 /* Load data segments */
3121 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
3122 dma_addr_t sle_dma;
3123
3124 /* Allocate additional continuation packets? */
3125 if (avail_dsds == 0) {
3126 /*
3127 * Five DSDs are available in the Continuation
3128 * Type 1 IOCB.
3129 */
3130 memset(&lcont_pkt, 0, REQUEST_ENTRY_SIZE);
3131 cont_pkt =
3132 qlafx00_prep_cont_type1_iocb(req, &lcont_pkt);
3133 cur_dsd = (__le32 *)lcont_pkt.dseg_0_address;
3134 avail_dsds = 5;
3135 cont = 1;
3136 }
3137
3138 sle_dma = sg_dma_address(sg);
3139 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3140 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3141 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3142 avail_dsds--;
3143 if (avail_dsds == 0 && cont == 1) {
3144 cont = 0;
3145 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3146 REQUEST_ENTRY_SIZE);
3147 }
3148
3149 }
3150 if (avail_dsds != 0 && cont == 1) {
3151 memcpy_toio((void __iomem *)cont_pkt, &lcont_pkt,
3152 REQUEST_ENTRY_SIZE);
3153 }
3154 }
3155
3156 /**
3157 * qlafx00_start_scsi() - Send a SCSI command to the ISP
3158 * @sp: command to send to the ISP
3159 *
3160 * Returns non-zero if a failure occurred, else zero.
3161 */
3162 int
3163 qlafx00_start_scsi(srb_t *sp)
3164 {
3165 int ret, nseg;
3166 unsigned long flags;
3167 uint32_t index;
3168 uint32_t handle;
3169 uint16_t cnt;
3170 uint16_t req_cnt;
3171 uint16_t tot_dsds;
3172 struct req_que *req = NULL;
3173 struct rsp_que *rsp = NULL;
3174 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
3175 struct scsi_qla_host *vha = sp->fcport->vha;
3176 struct qla_hw_data *ha = vha->hw;
3177 struct cmd_type_7_fx00 *cmd_pkt;
3178 struct cmd_type_7_fx00 lcmd_pkt;
3179 struct scsi_lun llun;
3180 char tag[2];
3181
3182 /* Setup device pointers. */
3183 ret = 0;
3184
3185 rsp = ha->rsp_q_map[0];
3186 req = vha->req;
3187
3188 /* So we know we haven't pci_map'ed anything yet */
3189 tot_dsds = 0;
3190
3191 /* Forcing marker needed for now */
3192 vha->marker_needed = 0;
3193
3194 /* Send marker if required */
3195 if (vha->marker_needed != 0) {
3196 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
3197 QLA_SUCCESS)
3198 return QLA_FUNCTION_FAILED;
3199 vha->marker_needed = 0;
3200 }
3201
3202 /* Acquire ring specific lock */
3203 spin_lock_irqsave(&ha->hardware_lock, flags);
3204
3205 /* Check for room in outstanding command list. */
3206 handle = req->current_outstanding_cmd;
3207 for (index = 1; index < req->num_outstanding_cmds; index++) {
3208 handle++;
3209 if (handle == req->num_outstanding_cmds)
3210 handle = 1;
3211 if (!req->outstanding_cmds[handle])
3212 break;
3213 }
3214 if (index == req->num_outstanding_cmds)
3215 goto queuing_error;
3216
3217 /* Map the sg table so we have an accurate count of sg entries needed */
3218 if (scsi_sg_count(cmd)) {
3219 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3220 scsi_sg_count(cmd), cmd->sc_data_direction);
3221 if (unlikely(!nseg))
3222 goto queuing_error;
3223 } else
3224 nseg = 0;
3225
3226 tot_dsds = nseg;
3227 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
3228 if (req->cnt < (req_cnt + 2)) {
3229 cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
3230
3231 if (req->ring_index < cnt)
3232 req->cnt = cnt - req->ring_index;
3233 else
3234 req->cnt = req->length -
3235 (req->ring_index - cnt);
3236 if (req->cnt < (req_cnt + 2))
3237 goto queuing_error;
3238 }
3239
3240 /* Build command packet. */
3241 req->current_outstanding_cmd = handle;
3242 req->outstanding_cmds[handle] = sp;
3243 sp->handle = handle;
3244 cmd->host_scribble = (unsigned char *)(unsigned long)handle;
3245 req->cnt -= req_cnt;
3246
3247 cmd_pkt = (struct cmd_type_7_fx00 *)req->ring_ptr;
3248
3249 memset(&lcmd_pkt, 0, REQUEST_ENTRY_SIZE);
3250
3251 lcmd_pkt.handle = MAKE_HANDLE(req->id, sp->handle);
3252 lcmd_pkt.handle_hi = 0;
3253 lcmd_pkt.dseg_count = cpu_to_le16(tot_dsds);
3254 lcmd_pkt.tgt_idx = cpu_to_le16(sp->fcport->tgt_id);
3255
3256 int_to_scsilun(cmd->device->lun, &llun);
3257 host_to_adap((uint8_t *)&llun, (uint8_t *)&lcmd_pkt.lun,
3258 sizeof(lcmd_pkt.lun));
3259
3260 /* Update tagged queuing modifier -- default is TSK_SIMPLE (0). */
3261 if (scsi_populate_tag_msg(cmd, tag)) {
3262 switch (tag[0]) {
3263 case HEAD_OF_QUEUE_TAG:
3264 lcmd_pkt.task = TSK_HEAD_OF_QUEUE;
3265 break;
3266 case ORDERED_QUEUE_TAG:
3267 lcmd_pkt.task = TSK_ORDERED;
3268 break;
3269 }
3270 }
3271
3272 /* Load SCSI command packet. */
3273 host_to_adap(cmd->cmnd, lcmd_pkt.fcp_cdb, sizeof(lcmd_pkt.fcp_cdb));
3274 lcmd_pkt.byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
3275
3276 /* Build IOCB segments */
3277 qlafx00_build_scsi_iocbs(sp, cmd_pkt, tot_dsds, &lcmd_pkt);
3278
3279 /* Set total data segment count. */
3280 lcmd_pkt.entry_count = (uint8_t)req_cnt;
3281
3282 /* Specify response queue number where completion should happen */
3283 lcmd_pkt.entry_status = (uint8_t) rsp->id;
3284
3285 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
3286 (uint8_t *)cmd->cmnd, cmd->cmd_len);
3287 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x3032,
3288 (uint8_t *)&lcmd_pkt, REQUEST_ENTRY_SIZE);
3289
3290 memcpy_toio((void __iomem *)cmd_pkt, &lcmd_pkt, REQUEST_ENTRY_SIZE);
3291 wmb();
3292
3293 /* Adjust ring index. */
3294 req->ring_index++;
3295 if (req->ring_index == req->length) {
3296 req->ring_index = 0;
3297 req->ring_ptr = req->ring;
3298 } else
3299 req->ring_ptr++;
3300
3301 sp->flags |= SRB_DMA_VALID;
3302
3303 /* Set chip new ring index. */
3304 WRT_REG_DWORD(req->req_q_in, req->ring_index);
3305 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
3306
3307 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3308 return QLA_SUCCESS;
3309
3310 queuing_error:
3311 if (tot_dsds)
3312 scsi_dma_unmap(cmd);
3313
3314 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3315
3316 return QLA_FUNCTION_FAILED;
3317 }
3318
3319 void
3320 qlafx00_tm_iocb(srb_t *sp, struct tsk_mgmt_entry_fx00 *ptm_iocb)
3321 {
3322 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3323 scsi_qla_host_t *vha = sp->fcport->vha;
3324 struct req_que *req = vha->req;
3325 struct tsk_mgmt_entry_fx00 tm_iocb;
3326 struct scsi_lun llun;
3327
3328 memset(&tm_iocb, 0, sizeof(struct tsk_mgmt_entry_fx00));
3329 tm_iocb.entry_type = TSK_MGMT_IOCB_TYPE_FX00;
3330 tm_iocb.entry_count = 1;
3331 tm_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3332 tm_iocb.handle_hi = 0;
3333 tm_iocb.timeout = cpu_to_le16(qla2x00_get_async_timeout(vha) + 2);
3334 tm_iocb.tgt_id = cpu_to_le16(sp->fcport->tgt_id);
3335 tm_iocb.control_flags = cpu_to_le32(fxio->u.tmf.flags);
3336 if (tm_iocb.control_flags == cpu_to_le32((uint32_t)TCF_LUN_RESET)) {
3337 int_to_scsilun(fxio->u.tmf.lun, &llun);
3338 host_to_adap((uint8_t *)&llun, (uint8_t *)&tm_iocb.lun,
3339 sizeof(struct scsi_lun));
3340 }
3341
3342 memcpy((void *)ptm_iocb, &tm_iocb,
3343 sizeof(struct tsk_mgmt_entry_fx00));
3344 wmb();
3345 }
3346
3347 void
3348 qlafx00_abort_iocb(srb_t *sp, struct abort_iocb_entry_fx00 *pabt_iocb)
3349 {
3350 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3351 scsi_qla_host_t *vha = sp->fcport->vha;
3352 struct req_que *req = vha->req;
3353 struct abort_iocb_entry_fx00 abt_iocb;
3354
3355 memset(&abt_iocb, 0, sizeof(struct abort_iocb_entry_fx00));
3356 abt_iocb.entry_type = ABORT_IOCB_TYPE_FX00;
3357 abt_iocb.entry_count = 1;
3358 abt_iocb.handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
3359 abt_iocb.abort_handle =
3360 cpu_to_le32(MAKE_HANDLE(req->id, fxio->u.abt.cmd_hndl));
3361 abt_iocb.tgt_id_sts = cpu_to_le16(sp->fcport->tgt_id);
3362 abt_iocb.req_que_no = cpu_to_le16(req->id);
3363
3364 memcpy((void *)pabt_iocb, &abt_iocb,
3365 sizeof(struct abort_iocb_entry_fx00));
3366 wmb();
3367 }
3368
3369 void
3370 qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
3371 {
3372 struct srb_iocb *fxio = &sp->u.iocb_cmd;
3373 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
3374 struct fc_bsg_job *bsg_job;
3375 struct fxdisc_entry_fx00 fx_iocb;
3376 uint8_t entry_cnt = 1;
3377
3378 memset(&fx_iocb, 0, sizeof(struct fxdisc_entry_fx00));
3379 fx_iocb.entry_type = FX00_IOCB_TYPE;
3380 fx_iocb.handle = cpu_to_le32(sp->handle);
3381 fx_iocb.entry_count = entry_cnt;
3382
3383 if (sp->type == SRB_FXIOCB_DCMD) {
3384 fx_iocb.func_num =
3385 sp->u.iocb_cmd.u.fxiocb.req_func_type;
3386 fx_iocb.adapid = fxio->u.fxiocb.adapter_id;
3387 fx_iocb.adapid_hi = fxio->u.fxiocb.adapter_id_hi;
3388 fx_iocb.reserved_0 = fxio->u.fxiocb.reserved_0;
3389 fx_iocb.reserved_1 = fxio->u.fxiocb.reserved_1;
3390 fx_iocb.dataword_extra = fxio->u.fxiocb.req_data_extra;
3391
3392 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DMA_VALID) {
3393 fx_iocb.req_dsdcnt = cpu_to_le16(1);
3394 fx_iocb.req_xfrcnt =
3395 cpu_to_le16(fxio->u.fxiocb.req_len);
3396 fx_iocb.dseg_rq_address[0] =
3397 cpu_to_le32(LSD(fxio->u.fxiocb.req_dma_handle));
3398 fx_iocb.dseg_rq_address[1] =
3399 cpu_to_le32(MSD(fxio->u.fxiocb.req_dma_handle));
3400 fx_iocb.dseg_rq_len =
3401 cpu_to_le32(fxio->u.fxiocb.req_len);
3402 }
3403
3404 if (fxio->u.fxiocb.flags & SRB_FXDISC_RESP_DMA_VALID) {
3405 fx_iocb.rsp_dsdcnt = cpu_to_le16(1);
3406 fx_iocb.rsp_xfrcnt =
3407 cpu_to_le16(fxio->u.fxiocb.rsp_len);
3408 fx_iocb.dseg_rsp_address[0] =
3409 cpu_to_le32(LSD(fxio->u.fxiocb.rsp_dma_handle));
3410 fx_iocb.dseg_rsp_address[1] =
3411 cpu_to_le32(MSD(fxio->u.fxiocb.rsp_dma_handle));
3412 fx_iocb.dseg_rsp_len =
3413 cpu_to_le32(fxio->u.fxiocb.rsp_len);
3414 }
3415
3416 if (fxio->u.fxiocb.flags & SRB_FXDISC_REQ_DWRD_VALID) {
3417 fx_iocb.dataword = fxio->u.fxiocb.req_data;
3418 }
3419 fx_iocb.flags = fxio->u.fxiocb.flags;
3420 } else {
3421 struct scatterlist *sg;
3422 bsg_job = sp->u.bsg_job;
3423 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
3424 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
3425
3426 fx_iocb.func_num = piocb_rqst->func_type;
3427 fx_iocb.adapid = piocb_rqst->adapid;
3428 fx_iocb.adapid_hi = piocb_rqst->adapid_hi;
3429 fx_iocb.reserved_0 = piocb_rqst->reserved_0;
3430 fx_iocb.reserved_1 = piocb_rqst->reserved_1;
3431 fx_iocb.dataword_extra = piocb_rqst->dataword_extra;
3432 fx_iocb.dataword = piocb_rqst->dataword;
3433 fx_iocb.req_xfrcnt = piocb_rqst->req_len;
3434 fx_iocb.rsp_xfrcnt = piocb_rqst->rsp_len;
3435
3436 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
3437 int avail_dsds, tot_dsds;
3438 cont_a64_entry_t lcont_pkt;
3439 cont_a64_entry_t *cont_pkt = NULL;
3440 __le32 *cur_dsd;
3441 int index = 0, cont = 0;
3442
3443 fx_iocb.req_dsdcnt =
3444 cpu_to_le16(bsg_job->request_payload.sg_cnt);
3445 tot_dsds =
3446 bsg_job->request_payload.sg_cnt;
3447 cur_dsd = (__le32 *)&fx_iocb.dseg_rq_address[0];
3448 avail_dsds = 1;
3449 for_each_sg(bsg_job->request_payload.sg_list, sg,
3450 tot_dsds, index) {
3451 dma_addr_t sle_dma;
3452
3453 /* Allocate additional continuation packets? */
3454 if (avail_dsds == 0) {
3455 /*
3456 * Five DSDs are available in the Cont.
3457 * Type 1 IOCB.
3458 */
3459 memset(&lcont_pkt, 0,
3460 REQUEST_ENTRY_SIZE);
3461 cont_pkt =
3462 qlafx00_prep_cont_type1_iocb(
3463 sp->fcport->vha->req,
3464 &lcont_pkt);
3465 cur_dsd = (__le32 *)
3466 lcont_pkt.dseg_0_address;
3467 avail_dsds = 5;
3468 cont = 1;
3469 entry_cnt++;
3470 }
3471
3472 sle_dma = sg_dma_address(sg);
3473 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3474 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3475 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3476 avail_dsds--;
3477
3478 if (avail_dsds == 0 && cont == 1) {
3479 cont = 0;
3480 memcpy_toio(
3481 (void __iomem *)cont_pkt,
3482 &lcont_pkt, REQUEST_ENTRY_SIZE);
3483 ql_dump_buffer(
3484 ql_dbg_user + ql_dbg_verbose,
3485 sp->fcport->vha, 0x3042,
3486 (uint8_t *)&lcont_pkt,
3487 REQUEST_ENTRY_SIZE);
3488 }
3489 }
3490 if (avail_dsds != 0 && cont == 1) {
3491 memcpy_toio((void __iomem *)cont_pkt,
3492 &lcont_pkt, REQUEST_ENTRY_SIZE);
3493 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3494 sp->fcport->vha, 0x3043,
3495 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3496 }
3497 }
3498
3499 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
3500 int avail_dsds, tot_dsds;
3501 cont_a64_entry_t lcont_pkt;
3502 cont_a64_entry_t *cont_pkt = NULL;
3503 __le32 *cur_dsd;
3504 int index = 0, cont = 0;
3505
3506 fx_iocb.rsp_dsdcnt =
3507 cpu_to_le16(bsg_job->reply_payload.sg_cnt);
3508 tot_dsds = bsg_job->reply_payload.sg_cnt;
3509 cur_dsd = (__le32 *)&fx_iocb.dseg_rsp_address[0];
3510 avail_dsds = 1;
3511
3512 for_each_sg(bsg_job->reply_payload.sg_list, sg,
3513 tot_dsds, index) {
3514 dma_addr_t sle_dma;
3515
3516 /* Allocate additional continuation packets? */
3517 if (avail_dsds == 0) {
3518 /*
3519 * Five DSDs are available in the Cont.
3520 * Type 1 IOCB.
3521 */
3522 memset(&lcont_pkt, 0,
3523 REQUEST_ENTRY_SIZE);
3524 cont_pkt =
3525 qlafx00_prep_cont_type1_iocb(
3526 sp->fcport->vha->req,
3527 &lcont_pkt);
3528 cur_dsd = (__le32 *)
3529 lcont_pkt.dseg_0_address;
3530 avail_dsds = 5;
3531 cont = 1;
3532 entry_cnt++;
3533 }
3534
3535 sle_dma = sg_dma_address(sg);
3536 *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
3537 *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
3538 *cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
3539 avail_dsds--;
3540
3541 if (avail_dsds == 0 && cont == 1) {
3542 cont = 0;
3543 memcpy_toio((void __iomem *)cont_pkt,
3544 &lcont_pkt,
3545 REQUEST_ENTRY_SIZE);
3546 ql_dump_buffer(
3547 ql_dbg_user + ql_dbg_verbose,
3548 sp->fcport->vha, 0x3045,
3549 (uint8_t *)&lcont_pkt,
3550 REQUEST_ENTRY_SIZE);
3551 }
3552 }
3553 if (avail_dsds != 0 && cont == 1) {
3554 memcpy_toio((void __iomem *)cont_pkt,
3555 &lcont_pkt, REQUEST_ENTRY_SIZE);
3556 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3557 sp->fcport->vha, 0x3046,
3558 (uint8_t *)&lcont_pkt, REQUEST_ENTRY_SIZE);
3559 }
3560 }
3561
3562 if (piocb_rqst->flags & SRB_FXDISC_REQ_DWRD_VALID)
3563 fx_iocb.dataword = piocb_rqst->dataword;
3564 fx_iocb.flags = piocb_rqst->flags;
3565 fx_iocb.entry_count = entry_cnt;
3566 }
3567
3568 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose,
3569 sp->fcport->vha, 0x3047,
3570 (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
3571
3572 memcpy((void *)pfxiocb, &fx_iocb,
3573 sizeof(struct fxdisc_entry_fx00));
3574 wmb();
3575 }
This page took 0.101822 seconds and 4 git commands to generate.