Merge branch 'for-2.6.26' of git://git.kernel.dk/linux-2.6-block
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_isr.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
11
12 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host *, uint32_t);
14 static void qla2x00_status_entry(scsi_qla_host_t *, void *);
15 static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
16 static void qla2x00_error_entry(scsi_qla_host_t *, sts_entry_t *);
17
18 /**
19 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
20 * @irq:
21 * @dev_id: SCSI driver HA context
22 *
23 * Called by system whenever the host adapter generates an interrupt.
24 *
25 * Returns handled flag.
26 */
27 irqreturn_t
28 qla2100_intr_handler(int irq, void *dev_id)
29 {
30 scsi_qla_host_t *ha;
31 struct device_reg_2xxx __iomem *reg;
32 int status;
33 unsigned long iter;
34 uint16_t hccr;
35 uint16_t mb[4];
36
37 ha = (scsi_qla_host_t *) dev_id;
38 if (!ha) {
39 printk(KERN_INFO
40 "%s(): NULL host pointer\n", __func__);
41 return (IRQ_NONE);
42 }
43
44 reg = &ha->iobase->isp;
45 status = 0;
46
47 spin_lock(&ha->hardware_lock);
48 for (iter = 50; iter--; ) {
49 hccr = RD_REG_WORD(&reg->hccr);
50 if (hccr & HCCR_RISC_PAUSE) {
51 if (pci_channel_offline(ha->pdev))
52 break;
53
54 /*
55 * Issue a "HARD" reset in order for the RISC interrupt
56 * bit to be cleared. Schedule a big hammmer to get
57 * out of the RISC PAUSED state.
58 */
59 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
60 RD_REG_WORD(&reg->hccr);
61
62 ha->isp_ops->fw_dump(ha, 1);
63 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
64 break;
65 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
66 break;
67
68 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
69 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
70 RD_REG_WORD(&reg->hccr);
71
72 /* Get mailbox data. */
73 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
74 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
75 qla2x00_mbx_completion(ha, mb[0]);
76 status |= MBX_INTERRUPT;
77 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
78 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
79 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
80 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
81 qla2x00_async_event(ha, mb);
82 } else {
83 /*EMPTY*/
84 DEBUG2(printk("scsi(%ld): Unrecognized "
85 "interrupt type (%d).\n",
86 ha->host_no, mb[0]));
87 }
88 /* Release mailbox registers. */
89 WRT_REG_WORD(&reg->semaphore, 0);
90 RD_REG_WORD(&reg->semaphore);
91 } else {
92 qla2x00_process_response_queue(ha);
93
94 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
95 RD_REG_WORD(&reg->hccr);
96 }
97 }
98 spin_unlock(&ha->hardware_lock);
99
100 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
101 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
102 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
103 complete(&ha->mbx_intr_comp);
104 }
105
106 return (IRQ_HANDLED);
107 }
108
109 /**
110 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
111 * @irq:
112 * @dev_id: SCSI driver HA context
113 *
114 * Called by system whenever the host adapter generates an interrupt.
115 *
116 * Returns handled flag.
117 */
118 irqreturn_t
119 qla2300_intr_handler(int irq, void *dev_id)
120 {
121 scsi_qla_host_t *ha;
122 struct device_reg_2xxx __iomem *reg;
123 int status;
124 unsigned long iter;
125 uint32_t stat;
126 uint16_t hccr;
127 uint16_t mb[4];
128
129 ha = (scsi_qla_host_t *) dev_id;
130 if (!ha) {
131 printk(KERN_INFO
132 "%s(): NULL host pointer\n", __func__);
133 return (IRQ_NONE);
134 }
135
136 reg = &ha->iobase->isp;
137 status = 0;
138
139 spin_lock(&ha->hardware_lock);
140 for (iter = 50; iter--; ) {
141 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
142 if (stat & HSR_RISC_PAUSED) {
143 if (pci_channel_offline(ha->pdev))
144 break;
145
146 hccr = RD_REG_WORD(&reg->hccr);
147 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
148 qla_printk(KERN_INFO, ha, "Parity error -- "
149 "HCCR=%x, Dumping firmware!\n", hccr);
150 else
151 qla_printk(KERN_INFO, ha, "RISC paused -- "
152 "HCCR=%x, Dumping firmware!\n", hccr);
153
154 /*
155 * Issue a "HARD" reset in order for the RISC
156 * interrupt bit to be cleared. Schedule a big
157 * hammmer to get out of the RISC PAUSED state.
158 */
159 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
160 RD_REG_WORD(&reg->hccr);
161
162 ha->isp_ops->fw_dump(ha, 1);
163 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
164 break;
165 } else if ((stat & HSR_RISC_INT) == 0)
166 break;
167
168 switch (stat & 0xff) {
169 case 0x1:
170 case 0x2:
171 case 0x10:
172 case 0x11:
173 qla2x00_mbx_completion(ha, MSW(stat));
174 status |= MBX_INTERRUPT;
175
176 /* Release mailbox registers. */
177 WRT_REG_WORD(&reg->semaphore, 0);
178 break;
179 case 0x12:
180 mb[0] = MSW(stat);
181 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
182 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
183 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
184 qla2x00_async_event(ha, mb);
185 break;
186 case 0x13:
187 qla2x00_process_response_queue(ha);
188 break;
189 case 0x15:
190 mb[0] = MBA_CMPLT_1_16BIT;
191 mb[1] = MSW(stat);
192 qla2x00_async_event(ha, mb);
193 break;
194 case 0x16:
195 mb[0] = MBA_SCSI_COMPLETION;
196 mb[1] = MSW(stat);
197 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
198 qla2x00_async_event(ha, mb);
199 break;
200 default:
201 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
202 "(%d).\n",
203 ha->host_no, stat & 0xff));
204 break;
205 }
206 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
207 RD_REG_WORD_RELAXED(&reg->hccr);
208 }
209 spin_unlock(&ha->hardware_lock);
210
211 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
212 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
213 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
214 complete(&ha->mbx_intr_comp);
215 }
216
217 return (IRQ_HANDLED);
218 }
219
220 /**
221 * qla2x00_mbx_completion() - Process mailbox command completions.
222 * @ha: SCSI driver HA context
223 * @mb0: Mailbox0 register
224 */
225 static void
226 qla2x00_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
227 {
228 uint16_t cnt;
229 uint16_t __iomem *wptr;
230 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
231
232 /* Load return mailbox registers. */
233 ha->flags.mbox_int = 1;
234 ha->mailbox_out[0] = mb0;
235 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
236
237 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
238 if (IS_QLA2200(ha) && cnt == 8)
239 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
240 if (cnt == 4 || cnt == 5)
241 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
242 else
243 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
244
245 wptr++;
246 }
247
248 if (ha->mcp) {
249 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
250 __func__, ha->host_no, ha->mcp->mb[0]));
251 } else {
252 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
253 __func__, ha->host_no));
254 }
255 }
256
257 /**
258 * qla2x00_async_event() - Process aynchronous events.
259 * @ha: SCSI driver HA context
260 * @mb: Mailbox registers (0 - 3)
261 */
262 void
263 qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
264 {
265 #define LS_UNKNOWN 2
266 static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
267 char *link_speed;
268 uint16_t handle_cnt;
269 uint16_t cnt;
270 uint32_t handles[5];
271 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
272 uint32_t rscn_entry, host_pid;
273 uint8_t rscn_queue_index;
274 unsigned long flags;
275 scsi_qla_host_t *vha;
276 int i;
277
278 /* Setup to process RIO completion. */
279 handle_cnt = 0;
280 switch (mb[0]) {
281 case MBA_SCSI_COMPLETION:
282 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
283 handle_cnt = 1;
284 break;
285 case MBA_CMPLT_1_16BIT:
286 handles[0] = mb[1];
287 handle_cnt = 1;
288 mb[0] = MBA_SCSI_COMPLETION;
289 break;
290 case MBA_CMPLT_2_16BIT:
291 handles[0] = mb[1];
292 handles[1] = mb[2];
293 handle_cnt = 2;
294 mb[0] = MBA_SCSI_COMPLETION;
295 break;
296 case MBA_CMPLT_3_16BIT:
297 handles[0] = mb[1];
298 handles[1] = mb[2];
299 handles[2] = mb[3];
300 handle_cnt = 3;
301 mb[0] = MBA_SCSI_COMPLETION;
302 break;
303 case MBA_CMPLT_4_16BIT:
304 handles[0] = mb[1];
305 handles[1] = mb[2];
306 handles[2] = mb[3];
307 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
308 handle_cnt = 4;
309 mb[0] = MBA_SCSI_COMPLETION;
310 break;
311 case MBA_CMPLT_5_16BIT:
312 handles[0] = mb[1];
313 handles[1] = mb[2];
314 handles[2] = mb[3];
315 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
316 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
317 handle_cnt = 5;
318 mb[0] = MBA_SCSI_COMPLETION;
319 break;
320 case MBA_CMPLT_2_32BIT:
321 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
322 handles[1] = le32_to_cpu(
323 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
324 RD_MAILBOX_REG(ha, reg, 6));
325 handle_cnt = 2;
326 mb[0] = MBA_SCSI_COMPLETION;
327 break;
328 default:
329 break;
330 }
331
332 switch (mb[0]) {
333 case MBA_SCSI_COMPLETION: /* Fast Post */
334 if (!ha->flags.online)
335 break;
336
337 for (cnt = 0; cnt < handle_cnt; cnt++)
338 qla2x00_process_completed_request(ha, handles[cnt]);
339 break;
340
341 case MBA_RESET: /* Reset */
342 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));
343
344 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
345 break;
346
347 case MBA_SYSTEM_ERR: /* System Error */
348 qla_printk(KERN_INFO, ha,
349 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
350 mb[1], mb[2], mb[3]);
351
352 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
353 ha->isp_ops->fw_dump(ha, 1);
354
355 if (IS_FWI2_CAPABLE(ha)) {
356 if (mb[1] == 0 && mb[2] == 0) {
357 qla_printk(KERN_ERR, ha,
358 "Unrecoverable Hardware Error: adapter "
359 "marked OFFLINE!\n");
360 ha->flags.online = 0;
361 } else
362 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
363 } else if (mb[1] == 0) {
364 qla_printk(KERN_INFO, ha,
365 "Unrecoverable Hardware Error: adapter marked "
366 "OFFLINE!\n");
367 ha->flags.online = 0;
368 } else
369 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
370 break;
371
372 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
373 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
374 ha->host_no));
375 qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");
376
377 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
378 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
379 break;
380
381 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
382 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
383 ha->host_no));
384 qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");
385
386 qla2x00_post_hwe_work(ha, mb[0], mb[1], mb[2], mb[3]);
387 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
388 break;
389
390 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
391 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
392 ha->host_no));
393 break;
394
395 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
396 DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
397 mb[1]));
398 qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);
399
400 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
401 atomic_set(&ha->loop_state, LOOP_DOWN);
402 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
403 qla2x00_mark_all_devices_lost(ha, 1);
404 }
405
406 if (ha->parent) {
407 atomic_set(&ha->vp_state, VP_FAILED);
408 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
409 }
410
411 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
412
413 ha->flags.management_server_logged_in = 0;
414 qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
415 break;
416
417 case MBA_LOOP_UP: /* Loop Up Event */
418 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
419 link_speed = link_speeds[0];
420 ha->link_data_rate = PORT_SPEED_1GB;
421 } else {
422 link_speed = link_speeds[LS_UNKNOWN];
423 if (mb[1] < 5)
424 link_speed = link_speeds[mb[1]];
425 ha->link_data_rate = mb[1];
426 }
427
428 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
429 ha->host_no, link_speed));
430 qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
431 link_speed);
432
433 ha->flags.management_server_logged_in = 0;
434 qla2x00_post_aen_work(ha, FCH_EVT_LINKUP, ha->link_data_rate);
435 break;
436
437 case MBA_LOOP_DOWN: /* Loop Down Event */
438 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
439 "(%x %x %x).\n", ha->host_no, mb[1], mb[2], mb[3]));
440 qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x %x %x).\n",
441 mb[1], mb[2], mb[3]);
442
443 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
444 atomic_set(&ha->loop_state, LOOP_DOWN);
445 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
446 ha->device_flags |= DFLG_NO_CABLE;
447 qla2x00_mark_all_devices_lost(ha, 1);
448 }
449
450 if (ha->parent) {
451 atomic_set(&ha->vp_state, VP_FAILED);
452 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
453 }
454
455 ha->flags.management_server_logged_in = 0;
456 ha->link_data_rate = PORT_SPEED_UNKNOWN;
457 if (ql2xfdmienable)
458 set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
459 qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
460 break;
461
462 case MBA_LIP_RESET: /* LIP reset occurred */
463 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
464 ha->host_no, mb[1]));
465 qla_printk(KERN_INFO, ha,
466 "LIP reset occured (%x).\n", mb[1]);
467
468 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
469 atomic_set(&ha->loop_state, LOOP_DOWN);
470 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
471 qla2x00_mark_all_devices_lost(ha, 1);
472 }
473
474 if (ha->parent) {
475 atomic_set(&ha->vp_state, VP_FAILED);
476 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
477 }
478
479 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
480
481 ha->operating_mode = LOOP;
482 ha->flags.management_server_logged_in = 0;
483 qla2x00_post_aen_work(ha, FCH_EVT_LIPRESET, mb[1]);
484 break;
485
486 case MBA_POINT_TO_POINT: /* Point-to-Point */
487 if (IS_QLA2100(ha))
488 break;
489
490 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
491 ha->host_no));
492
493 /*
494 * Until there's a transition from loop down to loop up, treat
495 * this as loop down only.
496 */
497 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
498 atomic_set(&ha->loop_state, LOOP_DOWN);
499 if (!atomic_read(&ha->loop_down_timer))
500 atomic_set(&ha->loop_down_timer,
501 LOOP_DOWN_TIME);
502 qla2x00_mark_all_devices_lost(ha, 1);
503 }
504
505 if (ha->parent) {
506 atomic_set(&ha->vp_state, VP_FAILED);
507 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
508 }
509
510 if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
511 set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
512 }
513 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
514
515 ha->flags.gpsc_supported = 1;
516 ha->flags.management_server_logged_in = 0;
517 break;
518
519 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
520 if (IS_QLA2100(ha))
521 break;
522
523 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
524 "received.\n",
525 ha->host_no));
526 qla_printk(KERN_INFO, ha,
527 "Configuration change detected: value=%x.\n", mb[1]);
528
529 if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
530 atomic_set(&ha->loop_state, LOOP_DOWN);
531 if (!atomic_read(&ha->loop_down_timer))
532 atomic_set(&ha->loop_down_timer,
533 LOOP_DOWN_TIME);
534 qla2x00_mark_all_devices_lost(ha, 1);
535 }
536
537 if (ha->parent) {
538 atomic_set(&ha->vp_state, VP_FAILED);
539 fc_vport_set_state(ha->fc_vport, FC_VPORT_FAILED);
540 }
541
542 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
543 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
544 break;
545
546 case MBA_PORT_UPDATE: /* Port database update */
547 if ((ha->flags.npiv_supported) && (ha->num_vhosts)) {
548 for_each_mapped_vp_idx(ha, i) {
549 list_for_each_entry(vha, &ha->vp_list,
550 vp_list) {
551 if ((mb[3] & 0xff)
552 == vha->vp_idx) {
553 ha = vha;
554 break;
555 }
556 }
557 }
558 }
559 /*
560 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
561 * event etc. earlier indicating loop is down) then process
562 * it. Otherwise ignore it and Wait for RSCN to come in.
563 */
564 atomic_set(&ha->loop_down_timer, 0);
565 if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
566 atomic_read(&ha->loop_state) != LOOP_DEAD) {
567 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
568 "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
569 mb[2], mb[3]));
570 break;
571 }
572
573 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
574 ha->host_no));
575 DEBUG(printk(KERN_INFO
576 "scsi(%ld): Port database changed %04x %04x %04x.\n",
577 ha->host_no, mb[1], mb[2], mb[3]));
578
579 /*
580 * Mark all devices as missing so we will login again.
581 */
582 atomic_set(&ha->loop_state, LOOP_UP);
583
584 qla2x00_mark_all_devices_lost(ha, 1);
585
586 ha->flags.rscn_queue_overflow = 1;
587
588 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
589 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
590 break;
591
592 case MBA_RSCN_UPDATE: /* State Change Registration */
593 if ((ha->flags.npiv_supported) && (ha->num_vhosts)) {
594 for_each_mapped_vp_idx(ha, i) {
595 list_for_each_entry(vha, &ha->vp_list,
596 vp_list) {
597 if ((mb[3] & 0xff)
598 == vha->vp_idx) {
599 ha = vha;
600 break;
601 }
602 }
603 }
604 }
605
606 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
607 ha->host_no));
608 DEBUG(printk(KERN_INFO
609 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
610 ha->host_no, mb[1], mb[2], mb[3]));
611
612 rscn_entry = (mb[1] << 16) | mb[2];
613 host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
614 ha->d_id.b.al_pa;
615 if (rscn_entry == host_pid) {
616 DEBUG(printk(KERN_INFO
617 "scsi(%ld): Ignoring RSCN update to local host "
618 "port ID (%06x)\n",
619 ha->host_no, host_pid));
620 break;
621 }
622
623 rscn_queue_index = ha->rscn_in_ptr + 1;
624 if (rscn_queue_index == MAX_RSCN_COUNT)
625 rscn_queue_index = 0;
626 if (rscn_queue_index != ha->rscn_out_ptr) {
627 ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
628 ha->rscn_in_ptr = rscn_queue_index;
629 } else {
630 ha->flags.rscn_queue_overflow = 1;
631 }
632
633 atomic_set(&ha->loop_state, LOOP_UPDATE);
634 atomic_set(&ha->loop_down_timer, 0);
635 ha->flags.management_server_logged_in = 0;
636
637 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
638 set_bit(RSCN_UPDATE, &ha->dpc_flags);
639 qla2x00_post_aen_work(ha, FCH_EVT_RSCN, rscn_entry);
640 break;
641
642 /* case MBA_RIO_RESPONSE: */
643 case MBA_ZIO_RESPONSE:
644 DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
645 ha->host_no));
646 DEBUG(printk(KERN_INFO
647 "scsi(%ld): [R|Z]IO update completion.\n",
648 ha->host_no));
649
650 if (IS_FWI2_CAPABLE(ha))
651 qla24xx_process_response_queue(ha);
652 else
653 qla2x00_process_response_queue(ha);
654 break;
655
656 case MBA_DISCARD_RND_FRAME:
657 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
658 "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
659 break;
660
661 case MBA_TRACE_NOTIFICATION:
662 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
663 ha->host_no, mb[1], mb[2]));
664 break;
665
666 case MBA_ISP84XX_ALERT:
667 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
668 "%04x %04x %04x\n", ha->host_no, mb[1], mb[2], mb[3]));
669
670 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
671 switch (mb[1]) {
672 case A84_PANIC_RECOVERY:
673 qla_printk(KERN_INFO, ha, "Alert 84XX: panic recovery "
674 "%04x %04x\n", mb[2], mb[3]);
675 break;
676 case A84_OP_LOGIN_COMPLETE:
677 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
678 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
679 "firmware version %x\n", ha->cs84xx->op_fw_version));
680 break;
681 case A84_DIAG_LOGIN_COMPLETE:
682 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
683 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX:"
684 "diagnostic firmware version %x\n",
685 ha->cs84xx->diag_fw_version));
686 break;
687 case A84_GOLD_LOGIN_COMPLETE:
688 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
689 ha->cs84xx->fw_update = 1;
690 DEBUG2(qla_printk(KERN_INFO, ha, "Alert 84XX: gold "
691 "firmware version %x\n",
692 ha->cs84xx->gold_fw_version));
693 break;
694 default:
695 qla_printk(KERN_ERR, ha,
696 "Alert 84xx: Invalid Alert %04x %04x %04x\n",
697 mb[1], mb[2], mb[3]);
698 }
699 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
700 break;
701 }
702
703 if (!ha->parent && ha->num_vhosts)
704 qla2x00_alert_all_vps(ha, mb);
705 }
706
707 static void
708 qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
709 {
710 fc_port_t *fcport = data;
711
712 if (fcport->ha->max_q_depth <= sdev->queue_depth)
713 return;
714
715 if (sdev->ordered_tags)
716 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG,
717 sdev->queue_depth + 1);
718 else
719 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG,
720 sdev->queue_depth + 1);
721
722 fcport->last_ramp_up = jiffies;
723
724 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
725 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
726 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
727 sdev->queue_depth));
728 }
729
730 static void
731 qla2x00_adjust_sdev_qdepth_down(struct scsi_device *sdev, void *data)
732 {
733 fc_port_t *fcport = data;
734
735 if (!scsi_track_queue_full(sdev, sdev->queue_depth - 1))
736 return;
737
738 DEBUG2(qla_printk(KERN_INFO, fcport->ha,
739 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
740 fcport->ha->host_no, sdev->channel, sdev->id, sdev->lun,
741 sdev->queue_depth));
742 }
743
744 static inline void
745 qla2x00_ramp_up_queue_depth(scsi_qla_host_t *ha, srb_t *sp)
746 {
747 fc_port_t *fcport;
748 struct scsi_device *sdev;
749
750 sdev = sp->cmd->device;
751 if (sdev->queue_depth >= ha->max_q_depth)
752 return;
753
754 fcport = sp->fcport;
755 if (time_before(jiffies,
756 fcport->last_ramp_up + ql2xqfullrampup * HZ))
757 return;
758 if (time_before(jiffies,
759 fcport->last_queue_full + ql2xqfullrampup * HZ))
760 return;
761
762 starget_for_each_device(sdev->sdev_target, fcport,
763 qla2x00_adjust_sdev_qdepth_up);
764 }
765
766 /**
767 * qla2x00_process_completed_request() - Process a Fast Post response.
768 * @ha: SCSI driver HA context
769 * @index: SRB index
770 */
771 static void
772 qla2x00_process_completed_request(struct scsi_qla_host *ha, uint32_t index)
773 {
774 srb_t *sp;
775
776 /* Validate handle. */
777 if (index >= MAX_OUTSTANDING_COMMANDS) {
778 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
779 ha->host_no, index));
780 qla_printk(KERN_WARNING, ha,
781 "Invalid SCSI completion handle %d.\n", index);
782
783 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
784 return;
785 }
786
787 sp = ha->outstanding_cmds[index];
788 if (sp) {
789 /* Free outstanding command slot. */
790 ha->outstanding_cmds[index] = NULL;
791
792 CMD_COMPL_STATUS(sp->cmd) = 0L;
793 CMD_SCSI_STATUS(sp->cmd) = 0L;
794
795 /* Save ISP completion status */
796 sp->cmd->result = DID_OK << 16;
797
798 qla2x00_ramp_up_queue_depth(ha, sp);
799 qla2x00_sp_compl(ha, sp);
800 } else {
801 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
802 ha->host_no));
803 qla_printk(KERN_WARNING, ha,
804 "Invalid ISP SCSI completion handle\n");
805
806 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
807 }
808 }
809
810 /**
811 * qla2x00_process_response_queue() - Process response queue entries.
812 * @ha: SCSI driver HA context
813 */
814 void
815 qla2x00_process_response_queue(struct scsi_qla_host *ha)
816 {
817 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
818 sts_entry_t *pkt;
819 uint16_t handle_cnt;
820 uint16_t cnt;
821
822 if (!ha->flags.online)
823 return;
824
825 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
826 pkt = (sts_entry_t *)ha->response_ring_ptr;
827
828 ha->rsp_ring_index++;
829 if (ha->rsp_ring_index == ha->response_q_length) {
830 ha->rsp_ring_index = 0;
831 ha->response_ring_ptr = ha->response_ring;
832 } else {
833 ha->response_ring_ptr++;
834 }
835
836 if (pkt->entry_status != 0) {
837 DEBUG3(printk(KERN_INFO
838 "scsi(%ld): Process error entry.\n", ha->host_no));
839
840 qla2x00_error_entry(ha, pkt);
841 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
842 wmb();
843 continue;
844 }
845
846 switch (pkt->entry_type) {
847 case STATUS_TYPE:
848 qla2x00_status_entry(ha, pkt);
849 break;
850 case STATUS_TYPE_21:
851 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
852 for (cnt = 0; cnt < handle_cnt; cnt++) {
853 qla2x00_process_completed_request(ha,
854 ((sts21_entry_t *)pkt)->handle[cnt]);
855 }
856 break;
857 case STATUS_TYPE_22:
858 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
859 for (cnt = 0; cnt < handle_cnt; cnt++) {
860 qla2x00_process_completed_request(ha,
861 ((sts22_entry_t *)pkt)->handle[cnt]);
862 }
863 break;
864 case STATUS_CONT_TYPE:
865 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
866 break;
867 default:
868 /* Type Not Supported. */
869 DEBUG4(printk(KERN_WARNING
870 "scsi(%ld): Received unknown response pkt type %x "
871 "entry status=%x.\n",
872 ha->host_no, pkt->entry_type, pkt->entry_status));
873 break;
874 }
875 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
876 wmb();
877 }
878
879 /* Adjust ring index */
880 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), ha->rsp_ring_index);
881 }
882
883 static inline void
884 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
885 {
886 struct scsi_cmnd *cp = sp->cmd;
887
888 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
889 sense_len = SCSI_SENSE_BUFFERSIZE;
890
891 CMD_ACTUAL_SNSLEN(cp) = sense_len;
892 sp->request_sense_length = sense_len;
893 sp->request_sense_ptr = cp->sense_buffer;
894 if (sp->request_sense_length > 32)
895 sense_len = 32;
896
897 memcpy(cp->sense_buffer, sense_data, sense_len);
898
899 sp->request_sense_ptr += sense_len;
900 sp->request_sense_length -= sense_len;
901 if (sp->request_sense_length != 0)
902 sp->ha->status_srb = sp;
903
904 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
905 "cmd=%p pid=%ld\n", __func__, sp->ha->host_no, cp->device->channel,
906 cp->device->id, cp->device->lun, cp, cp->serial_number));
907 if (sense_len)
908 DEBUG5(qla2x00_dump_buffer(cp->sense_buffer,
909 CMD_ACTUAL_SNSLEN(cp)));
910 }
911
912 /**
913 * qla2x00_status_entry() - Process a Status IOCB entry.
914 * @ha: SCSI driver HA context
915 * @pkt: Entry pointer
916 */
917 static void
918 qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt)
919 {
920 srb_t *sp;
921 fc_port_t *fcport;
922 struct scsi_cmnd *cp;
923 sts_entry_t *sts;
924 struct sts_entry_24xx *sts24;
925 uint16_t comp_status;
926 uint16_t scsi_status;
927 uint8_t lscsi_status;
928 int32_t resid;
929 uint32_t sense_len, rsp_info_len, resid_len, fw_resid_len;
930 uint8_t *rsp_info, *sense_data;
931
932 sts = (sts_entry_t *) pkt;
933 sts24 = (struct sts_entry_24xx *) pkt;
934 if (IS_FWI2_CAPABLE(ha)) {
935 comp_status = le16_to_cpu(sts24->comp_status);
936 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
937 } else {
938 comp_status = le16_to_cpu(sts->comp_status);
939 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
940 }
941
942 /* Fast path completion. */
943 if (comp_status == CS_COMPLETE && scsi_status == 0) {
944 qla2x00_process_completed_request(ha, sts->handle);
945
946 return;
947 }
948
949 /* Validate handle. */
950 if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
951 sp = ha->outstanding_cmds[sts->handle];
952 ha->outstanding_cmds[sts->handle] = NULL;
953 } else
954 sp = NULL;
955
956 if (sp == NULL) {
957 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
958 ha->host_no));
959 qla_printk(KERN_WARNING, ha, "Status Entry invalid handle.\n");
960
961 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
962 qla2xxx_wake_dpc(ha);
963 return;
964 }
965 cp = sp->cmd;
966 if (cp == NULL) {
967 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
968 "pkt->handle=%d sp=%p.\n", ha->host_no, sts->handle, sp));
969 qla_printk(KERN_WARNING, ha,
970 "Command is NULL: already returned to OS (sp=%p)\n", sp);
971
972 return;
973 }
974
975 lscsi_status = scsi_status & STATUS_MASK;
976 CMD_ENTRY_STATUS(cp) = sts->entry_status;
977 CMD_COMPL_STATUS(cp) = comp_status;
978 CMD_SCSI_STATUS(cp) = scsi_status;
979
980 fcport = sp->fcport;
981
982 sense_len = rsp_info_len = resid_len = fw_resid_len = 0;
983 if (IS_FWI2_CAPABLE(ha)) {
984 sense_len = le32_to_cpu(sts24->sense_len);
985 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
986 resid_len = le32_to_cpu(sts24->rsp_residual_count);
987 fw_resid_len = le32_to_cpu(sts24->residual_len);
988 rsp_info = sts24->data;
989 sense_data = sts24->data;
990 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
991 } else {
992 sense_len = le16_to_cpu(sts->req_sense_length);
993 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
994 resid_len = le32_to_cpu(sts->residual_length);
995 rsp_info = sts->rsp_info;
996 sense_data = sts->req_sense_data;
997 }
998
999 /* Check for any FCP transport errors. */
1000 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1001 /* Sense data lies beyond any FCP RESPONSE data. */
1002 if (IS_FWI2_CAPABLE(ha))
1003 sense_data += rsp_info_len;
1004 if (rsp_info_len > 3 && rsp_info[3]) {
1005 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1006 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1007 "retrying command\n", ha->host_no,
1008 cp->device->channel, cp->device->id,
1009 cp->device->lun, rsp_info_len, rsp_info[0],
1010 rsp_info[1], rsp_info[2], rsp_info[3], rsp_info[4],
1011 rsp_info[5], rsp_info[6], rsp_info[7]));
1012
1013 cp->result = DID_BUS_BUSY << 16;
1014 qla2x00_sp_compl(ha, sp);
1015 return;
1016 }
1017 }
1018
1019 /* Check for overrun. */
1020 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1021 scsi_status & SS_RESIDUAL_OVER)
1022 comp_status = CS_DATA_OVERRUN;
1023
1024 /*
1025 * Based on Host and scsi status generate status code for Linux
1026 */
1027 switch (comp_status) {
1028 case CS_COMPLETE:
1029 case CS_QUEUE_FULL:
1030 if (scsi_status == 0) {
1031 cp->result = DID_OK << 16;
1032 break;
1033 }
1034 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1035 resid = resid_len;
1036 scsi_set_resid(cp, resid);
1037 CMD_RESID_LEN(cp) = resid;
1038
1039 if (!lscsi_status &&
1040 ((unsigned)(scsi_bufflen(cp) - resid) <
1041 cp->underflow)) {
1042 qla_printk(KERN_INFO, ha,
1043 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1044 "detected (%x of %x bytes)...returning "
1045 "error status.\n", ha->host_no,
1046 cp->device->channel, cp->device->id,
1047 cp->device->lun, resid,
1048 scsi_bufflen(cp));
1049
1050 cp->result = DID_ERROR << 16;
1051 break;
1052 }
1053 }
1054 cp->result = DID_OK << 16 | lscsi_status;
1055
1056 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1057 DEBUG2(printk(KERN_INFO
1058 "scsi(%ld): QUEUE FULL status detected "
1059 "0x%x-0x%x.\n", ha->host_no, comp_status,
1060 scsi_status));
1061
1062 /* Adjust queue depth for all luns on the port. */
1063 fcport->last_queue_full = jiffies;
1064 starget_for_each_device(cp->device->sdev_target,
1065 fcport, qla2x00_adjust_sdev_qdepth_down);
1066 break;
1067 }
1068 if (lscsi_status != SS_CHECK_CONDITION)
1069 break;
1070
1071 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1072 if (!(scsi_status & SS_SENSE_LEN_VALID))
1073 break;
1074
1075 qla2x00_handle_sense(sp, sense_data, sense_len);
1076 break;
1077
1078 case CS_DATA_UNDERRUN:
1079 resid = resid_len;
1080 /* Use F/W calculated residual length. */
1081 if (IS_FWI2_CAPABLE(ha)) {
1082 if (scsi_status & SS_RESIDUAL_UNDER &&
1083 resid != fw_resid_len) {
1084 scsi_status &= ~SS_RESIDUAL_UNDER;
1085 lscsi_status = 0;
1086 }
1087 resid = fw_resid_len;
1088 }
1089
1090 if (scsi_status & SS_RESIDUAL_UNDER) {
1091 scsi_set_resid(cp, resid);
1092 CMD_RESID_LEN(cp) = resid;
1093 } else {
1094 DEBUG2(printk(KERN_INFO
1095 "scsi(%ld:%d:%d) UNDERRUN status detected "
1096 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1097 "os_underflow=0x%x\n", ha->host_no,
1098 cp->device->id, cp->device->lun, comp_status,
1099 scsi_status, resid_len, resid, cp->cmnd[0],
1100 cp->underflow));
1101
1102 }
1103
1104 /*
1105 * Check to see if SCSI Status is non zero. If so report SCSI
1106 * Status.
1107 */
1108 if (lscsi_status != 0) {
1109 cp->result = DID_OK << 16 | lscsi_status;
1110
1111 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1112 DEBUG2(printk(KERN_INFO
1113 "scsi(%ld): QUEUE FULL status detected "
1114 "0x%x-0x%x.\n", ha->host_no, comp_status,
1115 scsi_status));
1116
1117 /*
1118 * Adjust queue depth for all luns on the
1119 * port.
1120 */
1121 fcport->last_queue_full = jiffies;
1122 starget_for_each_device(
1123 cp->device->sdev_target, fcport,
1124 qla2x00_adjust_sdev_qdepth_down);
1125 break;
1126 }
1127 if (lscsi_status != SS_CHECK_CONDITION)
1128 break;
1129
1130 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1131 if (!(scsi_status & SS_SENSE_LEN_VALID))
1132 break;
1133
1134 qla2x00_handle_sense(sp, sense_data, sense_len);
1135
1136 /*
1137 * In case of a Underrun condition, set both the lscsi
1138 * status and the completion status to appropriate
1139 * values.
1140 */
1141 if (resid &&
1142 ((unsigned)(scsi_bufflen(cp) - resid) <
1143 cp->underflow)) {
1144 DEBUG2(qla_printk(KERN_INFO, ha,
1145 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1146 "detected (%x of %x bytes)...returning "
1147 "error status.\n", ha->host_no,
1148 cp->device->channel, cp->device->id,
1149 cp->device->lun, resid,
1150 scsi_bufflen(cp)));
1151
1152 cp->result = DID_ERROR << 16 | lscsi_status;
1153 }
1154 } else {
1155 /*
1156 * If RISC reports underrun and target does not report
1157 * it then we must have a lost frame, so tell upper
1158 * layer to retry it by reporting a bus busy.
1159 */
1160 if (!(scsi_status & SS_RESIDUAL_UNDER)) {
1161 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1162 "frame(s) detected (%x of %x bytes)..."
1163 "retrying command.\n", ha->host_no,
1164 cp->device->channel, cp->device->id,
1165 cp->device->lun, resid,
1166 scsi_bufflen(cp)));
1167
1168 cp->result = DID_BUS_BUSY << 16;
1169 break;
1170 }
1171
1172 /* Handle mid-layer underflow */
1173 if ((unsigned)(scsi_bufflen(cp) - resid) <
1174 cp->underflow) {
1175 qla_printk(KERN_INFO, ha,
1176 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1177 "detected (%x of %x bytes)...returning "
1178 "error status.\n", ha->host_no,
1179 cp->device->channel, cp->device->id,
1180 cp->device->lun, resid,
1181 scsi_bufflen(cp));
1182
1183 cp->result = DID_ERROR << 16;
1184 break;
1185 }
1186
1187 /* Everybody online, looking good... */
1188 cp->result = DID_OK << 16;
1189 }
1190 break;
1191
1192 case CS_DATA_OVERRUN:
1193 DEBUG2(printk(KERN_INFO
1194 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1195 ha->host_no, cp->device->id, cp->device->lun, comp_status,
1196 scsi_status));
1197 DEBUG2(printk(KERN_INFO
1198 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1199 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1200 cp->cmnd[4], cp->cmnd[5]));
1201 DEBUG2(printk(KERN_INFO
1202 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1203 "status!\n",
1204 cp->serial_number, scsi_bufflen(cp), resid_len));
1205
1206 cp->result = DID_ERROR << 16;
1207 break;
1208
1209 case CS_PORT_LOGGED_OUT:
1210 case CS_PORT_CONFIG_CHG:
1211 case CS_PORT_BUSY:
1212 case CS_INCOMPLETE:
1213 case CS_PORT_UNAVAILABLE:
1214 /*
1215 * If the port is in Target Down state, return all IOs for this
1216 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1217 * retry_queue.
1218 */
1219 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1220 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1221 ha->host_no, cp->device->id, cp->device->lun,
1222 cp->serial_number, comp_status,
1223 atomic_read(&fcport->state)));
1224
1225 cp->result = DID_BUS_BUSY << 16;
1226 if (atomic_read(&fcport->state) == FCS_ONLINE) {
1227 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1228 }
1229 break;
1230
1231 case CS_RESET:
1232 DEBUG2(printk(KERN_INFO
1233 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1234 ha->host_no, comp_status, scsi_status));
1235
1236 cp->result = DID_RESET << 16;
1237 break;
1238
1239 case CS_ABORTED:
1240 /*
1241 * hv2.19.12 - DID_ABORT does not retry the request if we
1242 * aborted this request then abort otherwise it must be a
1243 * reset.
1244 */
1245 DEBUG2(printk(KERN_INFO
1246 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1247 ha->host_no, comp_status, scsi_status));
1248
1249 cp->result = DID_RESET << 16;
1250 break;
1251
1252 case CS_TIMEOUT:
1253 cp->result = DID_BUS_BUSY << 16;
1254
1255 if (IS_FWI2_CAPABLE(ha)) {
1256 DEBUG2(printk(KERN_INFO
1257 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1258 "0x%x-0x%x\n", ha->host_no, cp->device->channel,
1259 cp->device->id, cp->device->lun, comp_status,
1260 scsi_status));
1261 break;
1262 }
1263 DEBUG2(printk(KERN_INFO
1264 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1265 "sflags=%x.\n", ha->host_no, cp->device->channel,
1266 cp->device->id, cp->device->lun, comp_status, scsi_status,
1267 le16_to_cpu(sts->status_flags)));
1268
1269 /* Check to see if logout occurred. */
1270 if ((le16_to_cpu(sts->status_flags) & SF_LOGOUT_SENT))
1271 qla2x00_mark_device_lost(ha, fcport, 1, 1);
1272 break;
1273
1274 default:
1275 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1276 "0x%x-0x%x.\n", ha->host_no, comp_status, scsi_status));
1277 qla_printk(KERN_INFO, ha,
1278 "Unknown status detected 0x%x-0x%x.\n",
1279 comp_status, scsi_status);
1280
1281 cp->result = DID_ERROR << 16;
1282 break;
1283 }
1284
1285 /* Place command on done queue. */
1286 if (ha->status_srb == NULL)
1287 qla2x00_sp_compl(ha, sp);
1288 }
1289
1290 /**
1291 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1292 * @ha: SCSI driver HA context
1293 * @pkt: Entry pointer
1294 *
1295 * Extended sense data.
1296 */
1297 static void
1298 qla2x00_status_cont_entry(scsi_qla_host_t *ha, sts_cont_entry_t *pkt)
1299 {
1300 uint8_t sense_sz = 0;
1301 srb_t *sp = ha->status_srb;
1302 struct scsi_cmnd *cp;
1303
1304 if (sp != NULL && sp->request_sense_length != 0) {
1305 cp = sp->cmd;
1306 if (cp == NULL) {
1307 DEBUG2(printk("%s(): Cmd already returned back to OS "
1308 "sp=%p.\n", __func__, sp));
1309 qla_printk(KERN_INFO, ha,
1310 "cmd is NULL: already returned to OS (sp=%p)\n",
1311 sp);
1312
1313 ha->status_srb = NULL;
1314 return;
1315 }
1316
1317 if (sp->request_sense_length > sizeof(pkt->data)) {
1318 sense_sz = sizeof(pkt->data);
1319 } else {
1320 sense_sz = sp->request_sense_length;
1321 }
1322
1323 /* Move sense data. */
1324 if (IS_FWI2_CAPABLE(ha))
1325 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1326 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1327 DEBUG5(qla2x00_dump_buffer(sp->request_sense_ptr, sense_sz));
1328
1329 sp->request_sense_ptr += sense_sz;
1330 sp->request_sense_length -= sense_sz;
1331
1332 /* Place command on done queue. */
1333 if (sp->request_sense_length == 0) {
1334 ha->status_srb = NULL;
1335 qla2x00_sp_compl(ha, sp);
1336 }
1337 }
1338 }
1339
1340 /**
1341 * qla2x00_error_entry() - Process an error entry.
1342 * @ha: SCSI driver HA context
1343 * @pkt: Entry pointer
1344 */
1345 static void
1346 qla2x00_error_entry(scsi_qla_host_t *ha, sts_entry_t *pkt)
1347 {
1348 srb_t *sp;
1349
1350 #if defined(QL_DEBUG_LEVEL_2)
1351 if (pkt->entry_status & RF_INV_E_ORDER)
1352 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
1353 else if (pkt->entry_status & RF_INV_E_COUNT)
1354 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Count\n", __func__);
1355 else if (pkt->entry_status & RF_INV_E_PARAM)
1356 qla_printk(KERN_ERR, ha,
1357 "%s: Invalid Entry Parameter\n", __func__);
1358 else if (pkt->entry_status & RF_INV_E_TYPE)
1359 qla_printk(KERN_ERR, ha, "%s: Invalid Entry Type\n", __func__);
1360 else if (pkt->entry_status & RF_BUSY)
1361 qla_printk(KERN_ERR, ha, "%s: Busy\n", __func__);
1362 else
1363 qla_printk(KERN_ERR, ha, "%s: UNKNOWN flag error\n", __func__);
1364 #endif
1365
1366 /* Validate handle. */
1367 if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
1368 sp = ha->outstanding_cmds[pkt->handle];
1369 else
1370 sp = NULL;
1371
1372 if (sp) {
1373 /* Free outstanding command slot. */
1374 ha->outstanding_cmds[pkt->handle] = NULL;
1375
1376 /* Bad payload or header */
1377 if (pkt->entry_status &
1378 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1379 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1380 sp->cmd->result = DID_ERROR << 16;
1381 } else if (pkt->entry_status & RF_BUSY) {
1382 sp->cmd->result = DID_BUS_BUSY << 16;
1383 } else {
1384 sp->cmd->result = DID_ERROR << 16;
1385 }
1386 qla2x00_sp_compl(ha, sp);
1387
1388 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1389 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7) {
1390 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1391 ha->host_no));
1392 qla_printk(KERN_WARNING, ha,
1393 "Error entry - invalid handle\n");
1394
1395 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1396 qla2xxx_wake_dpc(ha);
1397 }
1398 }
1399
1400 /**
1401 * qla24xx_mbx_completion() - Process mailbox command completions.
1402 * @ha: SCSI driver HA context
1403 * @mb0: Mailbox0 register
1404 */
1405 static void
1406 qla24xx_mbx_completion(scsi_qla_host_t *ha, uint16_t mb0)
1407 {
1408 uint16_t cnt;
1409 uint16_t __iomem *wptr;
1410 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1411
1412 /* Load return mailbox registers. */
1413 ha->flags.mbox_int = 1;
1414 ha->mailbox_out[0] = mb0;
1415 wptr = (uint16_t __iomem *)&reg->mailbox1;
1416
1417 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
1418 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
1419 wptr++;
1420 }
1421
1422 if (ha->mcp) {
1423 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1424 __func__, ha->host_no, ha->mcp->mb[0]));
1425 } else {
1426 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1427 __func__, ha->host_no));
1428 }
1429 }
1430
1431 /**
1432 * qla24xx_process_response_queue() - Process response queue entries.
1433 * @ha: SCSI driver HA context
1434 */
1435 void
1436 qla24xx_process_response_queue(struct scsi_qla_host *ha)
1437 {
1438 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1439 struct sts_entry_24xx *pkt;
1440
1441 if (!ha->flags.online)
1442 return;
1443
1444 while (ha->response_ring_ptr->signature != RESPONSE_PROCESSED) {
1445 pkt = (struct sts_entry_24xx *)ha->response_ring_ptr;
1446
1447 ha->rsp_ring_index++;
1448 if (ha->rsp_ring_index == ha->response_q_length) {
1449 ha->rsp_ring_index = 0;
1450 ha->response_ring_ptr = ha->response_ring;
1451 } else {
1452 ha->response_ring_ptr++;
1453 }
1454
1455 if (pkt->entry_status != 0) {
1456 DEBUG3(printk(KERN_INFO
1457 "scsi(%ld): Process error entry.\n", ha->host_no));
1458
1459 qla2x00_error_entry(ha, (sts_entry_t *) pkt);
1460 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1461 wmb();
1462 continue;
1463 }
1464
1465 switch (pkt->entry_type) {
1466 case STATUS_TYPE:
1467 qla2x00_status_entry(ha, pkt);
1468 break;
1469 case STATUS_CONT_TYPE:
1470 qla2x00_status_cont_entry(ha, (sts_cont_entry_t *)pkt);
1471 break;
1472 case VP_RPT_ID_IOCB_TYPE:
1473 qla24xx_report_id_acquisition(ha,
1474 (struct vp_rpt_id_entry_24xx *)pkt);
1475 break;
1476 default:
1477 /* Type Not Supported. */
1478 DEBUG4(printk(KERN_WARNING
1479 "scsi(%ld): Received unknown response pkt type %x "
1480 "entry status=%x.\n",
1481 ha->host_no, pkt->entry_type, pkt->entry_status));
1482 break;
1483 }
1484 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1485 wmb();
1486 }
1487
1488 /* Adjust ring index */
1489 WRT_REG_DWORD(&reg->rsp_q_out, ha->rsp_ring_index);
1490 }
1491
1492 static void
1493 qla2xxx_check_risc_status(scsi_qla_host_t *ha)
1494 {
1495 int rval;
1496 uint32_t cnt;
1497 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1498
1499 if (!IS_QLA25XX(ha))
1500 return;
1501
1502 rval = QLA_SUCCESS;
1503 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1504 RD_REG_DWORD(&reg->iobase_addr);
1505 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1506 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1507 rval == QLA_SUCCESS; cnt--) {
1508 if (cnt) {
1509 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
1510 udelay(10);
1511 } else
1512 rval = QLA_FUNCTION_TIMEOUT;
1513 }
1514 if (rval == QLA_SUCCESS)
1515 goto next_test;
1516
1517 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1518 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
1519 rval == QLA_SUCCESS; cnt--) {
1520 if (cnt) {
1521 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
1522 udelay(10);
1523 } else
1524 rval = QLA_FUNCTION_TIMEOUT;
1525 }
1526 if (rval != QLA_SUCCESS)
1527 goto done;
1528
1529 next_test:
1530 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
1531 qla_printk(KERN_INFO, ha, "Additional code -- 0x55AA.\n");
1532
1533 done:
1534 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
1535 RD_REG_DWORD(&reg->iobase_window);
1536 }
1537
1538 /**
1539 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1540 * @irq:
1541 * @dev_id: SCSI driver HA context
1542 *
1543 * Called by system whenever the host adapter generates an interrupt.
1544 *
1545 * Returns handled flag.
1546 */
1547 irqreturn_t
1548 qla24xx_intr_handler(int irq, void *dev_id)
1549 {
1550 scsi_qla_host_t *ha;
1551 struct device_reg_24xx __iomem *reg;
1552 int status;
1553 unsigned long iter;
1554 uint32_t stat;
1555 uint32_t hccr;
1556 uint16_t mb[4];
1557
1558 ha = (scsi_qla_host_t *) dev_id;
1559 if (!ha) {
1560 printk(KERN_INFO
1561 "%s(): NULL host pointer\n", __func__);
1562 return IRQ_NONE;
1563 }
1564
1565 reg = &ha->iobase->isp24;
1566 status = 0;
1567
1568 spin_lock(&ha->hardware_lock);
1569 for (iter = 50; iter--; ) {
1570 stat = RD_REG_DWORD(&reg->host_status);
1571 if (stat & HSRX_RISC_PAUSED) {
1572 if (pci_channel_offline(ha->pdev))
1573 break;
1574
1575 if (ha->hw_event_pause_errors == 0)
1576 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1577 0, MSW(stat), LSW(stat));
1578 else if (ha->hw_event_pause_errors < 0xffffffff)
1579 ha->hw_event_pause_errors++;
1580
1581 hccr = RD_REG_DWORD(&reg->hccr);
1582
1583 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1584 "Dumping firmware!\n", hccr);
1585
1586 qla2xxx_check_risc_status(ha);
1587
1588 ha->isp_ops->fw_dump(ha, 1);
1589 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1590 break;
1591 } else if ((stat & HSRX_RISC_INT) == 0)
1592 break;
1593
1594 switch (stat & 0xff) {
1595 case 0x1:
1596 case 0x2:
1597 case 0x10:
1598 case 0x11:
1599 qla24xx_mbx_completion(ha, MSW(stat));
1600 status |= MBX_INTERRUPT;
1601
1602 break;
1603 case 0x12:
1604 mb[0] = MSW(stat);
1605 mb[1] = RD_REG_WORD(&reg->mailbox1);
1606 mb[2] = RD_REG_WORD(&reg->mailbox2);
1607 mb[3] = RD_REG_WORD(&reg->mailbox3);
1608 qla2x00_async_event(ha, mb);
1609 break;
1610 case 0x13:
1611 qla24xx_process_response_queue(ha);
1612 break;
1613 default:
1614 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1615 "(%d).\n",
1616 ha->host_no, stat & 0xff));
1617 break;
1618 }
1619 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1620 RD_REG_DWORD_RELAXED(&reg->hccr);
1621 }
1622 spin_unlock(&ha->hardware_lock);
1623
1624 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1625 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1626 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1627 complete(&ha->mbx_intr_comp);
1628 }
1629
1630 return IRQ_HANDLED;
1631 }
1632
1633 static irqreturn_t
1634 qla24xx_msix_rsp_q(int irq, void *dev_id)
1635 {
1636 scsi_qla_host_t *ha;
1637 struct device_reg_24xx __iomem *reg;
1638
1639 ha = dev_id;
1640 reg = &ha->iobase->isp24;
1641
1642 spin_lock(&ha->hardware_lock);
1643
1644 qla24xx_process_response_queue(ha);
1645 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1646
1647 spin_unlock(&ha->hardware_lock);
1648
1649 return IRQ_HANDLED;
1650 }
1651
1652 static irqreturn_t
1653 qla24xx_msix_default(int irq, void *dev_id)
1654 {
1655 scsi_qla_host_t *ha;
1656 struct device_reg_24xx __iomem *reg;
1657 int status;
1658 uint32_t stat;
1659 uint32_t hccr;
1660 uint16_t mb[4];
1661
1662 ha = dev_id;
1663 reg = &ha->iobase->isp24;
1664 status = 0;
1665
1666 spin_lock(&ha->hardware_lock);
1667 do {
1668 stat = RD_REG_DWORD(&reg->host_status);
1669 if (stat & HSRX_RISC_PAUSED) {
1670 if (pci_channel_offline(ha->pdev))
1671 break;
1672
1673 if (ha->hw_event_pause_errors == 0)
1674 qla2x00_post_hwe_work(ha, HW_EVENT_PARITY_ERR,
1675 0, MSW(stat), LSW(stat));
1676 else if (ha->hw_event_pause_errors < 0xffffffff)
1677 ha->hw_event_pause_errors++;
1678
1679 hccr = RD_REG_DWORD(&reg->hccr);
1680
1681 qla_printk(KERN_INFO, ha, "RISC paused -- HCCR=%x, "
1682 "Dumping firmware!\n", hccr);
1683
1684 qla2xxx_check_risc_status(ha);
1685
1686 ha->isp_ops->fw_dump(ha, 1);
1687 set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
1688 break;
1689 } else if ((stat & HSRX_RISC_INT) == 0)
1690 break;
1691
1692 switch (stat & 0xff) {
1693 case 0x1:
1694 case 0x2:
1695 case 0x10:
1696 case 0x11:
1697 qla24xx_mbx_completion(ha, MSW(stat));
1698 status |= MBX_INTERRUPT;
1699
1700 break;
1701 case 0x12:
1702 mb[0] = MSW(stat);
1703 mb[1] = RD_REG_WORD(&reg->mailbox1);
1704 mb[2] = RD_REG_WORD(&reg->mailbox2);
1705 mb[3] = RD_REG_WORD(&reg->mailbox3);
1706 qla2x00_async_event(ha, mb);
1707 break;
1708 case 0x13:
1709 qla24xx_process_response_queue(ha);
1710 break;
1711 default:
1712 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1713 "(%d).\n",
1714 ha->host_no, stat & 0xff));
1715 break;
1716 }
1717 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
1718 } while (0);
1719 spin_unlock(&ha->hardware_lock);
1720
1721 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
1722 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
1723 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1724 complete(&ha->mbx_intr_comp);
1725 }
1726
1727 return IRQ_HANDLED;
1728 }
1729
1730 /* Interrupt handling helpers. */
1731
1732 struct qla_init_msix_entry {
1733 uint16_t entry;
1734 uint16_t index;
1735 const char *name;
1736 irq_handler_t handler;
1737 };
1738
1739 static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
1740 { QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
1741 "qla2xxx (default)", qla24xx_msix_default },
1742
1743 { QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
1744 "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
1745 };
1746
1747 static void
1748 qla24xx_disable_msix(scsi_qla_host_t *ha)
1749 {
1750 int i;
1751 struct qla_msix_entry *qentry;
1752
1753 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1754 qentry = &ha->msix_entries[imsix_entries[i].index];
1755 if (qentry->have_irq)
1756 free_irq(qentry->msix_vector, ha);
1757 }
1758 pci_disable_msix(ha->pdev);
1759 }
1760
1761 static int
1762 qla24xx_enable_msix(scsi_qla_host_t *ha)
1763 {
1764 int i, ret;
1765 struct msix_entry entries[QLA_MSIX_ENTRIES];
1766 struct qla_msix_entry *qentry;
1767
1768 for (i = 0; i < QLA_MSIX_ENTRIES; i++)
1769 entries[i].entry = imsix_entries[i].entry;
1770
1771 ret = pci_enable_msix(ha->pdev, entries, ARRAY_SIZE(entries));
1772 if (ret) {
1773 qla_printk(KERN_WARNING, ha,
1774 "MSI-X: Failed to enable support -- %d/%d\n",
1775 QLA_MSIX_ENTRIES, ret);
1776 goto msix_out;
1777 }
1778 ha->flags.msix_enabled = 1;
1779
1780 for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
1781 qentry = &ha->msix_entries[imsix_entries[i].index];
1782 qentry->msix_vector = entries[i].vector;
1783 qentry->msix_entry = entries[i].entry;
1784 qentry->have_irq = 0;
1785 ret = request_irq(qentry->msix_vector,
1786 imsix_entries[i].handler, 0, imsix_entries[i].name, ha);
1787 if (ret) {
1788 qla_printk(KERN_WARNING, ha,
1789 "MSI-X: Unable to register handler -- %x/%d.\n",
1790 imsix_entries[i].index, ret);
1791 qla24xx_disable_msix(ha);
1792 goto msix_out;
1793 }
1794 qentry->have_irq = 1;
1795 }
1796
1797 msix_out:
1798 return ret;
1799 }
1800
1801 int
1802 qla2x00_request_irqs(scsi_qla_host_t *ha)
1803 {
1804 int ret;
1805 device_reg_t __iomem *reg = ha->iobase;
1806
1807 /* If possible, enable MSI-X. */
1808 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1809 goto skip_msix;
1810
1811 if (IS_QLA2432(ha) && (ha->chip_revision < QLA_MSIX_CHIP_REV_24XX ||
1812 !QLA_MSIX_FW_MODE_1(ha->fw_attributes))) {
1813 DEBUG2(qla_printk(KERN_WARNING, ha,
1814 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1815 ha->chip_revision, ha->fw_attributes));
1816
1817 goto skip_msix;
1818 }
1819
1820 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
1821 (ha->pdev->subsystem_device == 0x7040 ||
1822 ha->pdev->subsystem_device == 0x7041 ||
1823 ha->pdev->subsystem_device == 0x1705)) {
1824 DEBUG2(qla_printk(KERN_WARNING, ha,
1825 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1826 ha->pdev->subsystem_vendor,
1827 ha->pdev->subsystem_device));
1828
1829 goto skip_msi;
1830 }
1831
1832 ret = qla24xx_enable_msix(ha);
1833 if (!ret) {
1834 DEBUG2(qla_printk(KERN_INFO, ha,
1835 "MSI-X: Enabled (0x%X, 0x%X).\n", ha->chip_revision,
1836 ha->fw_attributes));
1837 goto clear_risc_ints;
1838 }
1839 qla_printk(KERN_WARNING, ha,
1840 "MSI-X: Falling back-to INTa mode -- %d.\n", ret);
1841 skip_msix:
1842
1843 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha))
1844 goto skip_msi;
1845
1846 ret = pci_enable_msi(ha->pdev);
1847 if (!ret) {
1848 DEBUG2(qla_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1849 ha->flags.msi_enabled = 1;
1850 }
1851 skip_msi:
1852
1853 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1854 IRQF_DISABLED|IRQF_SHARED, QLA2XXX_DRIVER_NAME, ha);
1855 if (ret) {
1856 qla_printk(KERN_WARNING, ha,
1857 "Failed to reserve interrupt %d already in use.\n",
1858 ha->pdev->irq);
1859 goto fail;
1860 }
1861 ha->flags.inta_enabled = 1;
1862 ha->host->irq = ha->pdev->irq;
1863 clear_risc_ints:
1864
1865 ha->isp_ops->disable_intrs(ha);
1866 spin_lock_irq(&ha->hardware_lock);
1867 if (IS_FWI2_CAPABLE(ha)) {
1868 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
1869 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
1870 } else {
1871 WRT_REG_WORD(&reg->isp.semaphore, 0);
1872 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
1873 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
1874 }
1875 spin_unlock_irq(&ha->hardware_lock);
1876 ha->isp_ops->enable_intrs(ha);
1877
1878 fail:
1879 return ret;
1880 }
1881
1882 void
1883 qla2x00_free_irqs(scsi_qla_host_t *ha)
1884 {
1885
1886 if (ha->flags.msix_enabled)
1887 qla24xx_disable_msix(ha);
1888 else if (ha->flags.inta_enabled) {
1889 free_irq(ha->host->irq, ha);
1890 pci_disable_msi(ha->pdev);
1891 }
1892 }
This page took 0.081426 seconds and 5 git commands to generate.