[SCSI] lpfc 8.2.8 : Miscellaneous Bug Fixes
[deliverable/linux.git] / drivers / scsi / lpfc / lpfc_scsi.c
1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2008 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
6 * www.emulex.com *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
8 * *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
21
22 #include <linux/pci.h>
23 #include <linux/interrupt.h>
24 #include <linux/delay.h>
25
26 #include <scsi/scsi.h>
27 #include <scsi/scsi_device.h>
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_transport_fc.h>
31
32 #include "lpfc_version.h"
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_logmsg.h"
39 #include "lpfc_crtn.h"
40 #include "lpfc_vport.h"
41
42 #define LPFC_RESET_WAIT 2
43 #define LPFC_ABORT_WAIT 2
44
45 /*
46 * This function is called with no lock held when there is a resource
47 * error in driver or in firmware.
48 */
49 void
50 lpfc_adjust_queue_depth(struct lpfc_hba *phba)
51 {
52 unsigned long flags;
53 uint32_t evt_posted;
54
55 spin_lock_irqsave(&phba->hbalock, flags);
56 atomic_inc(&phba->num_rsrc_err);
57 phba->last_rsrc_error_time = jiffies;
58
59 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
60 spin_unlock_irqrestore(&phba->hbalock, flags);
61 return;
62 }
63
64 phba->last_ramp_down_time = jiffies;
65
66 spin_unlock_irqrestore(&phba->hbalock, flags);
67
68 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
69 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
70 if (!evt_posted)
71 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
72 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
73
74 if (!evt_posted)
75 lpfc_worker_wake_up(phba);
76 return;
77 }
78
79 /*
80 * This function is called with no lock held when there is a successful
81 * SCSI command completion.
82 */
83 static inline void
84 lpfc_rampup_queue_depth(struct lpfc_vport *vport,
85 struct scsi_device *sdev)
86 {
87 unsigned long flags;
88 struct lpfc_hba *phba = vport->phba;
89 uint32_t evt_posted;
90 atomic_inc(&phba->num_cmd_success);
91
92 if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
93 return;
94 spin_lock_irqsave(&phba->hbalock, flags);
95 if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
96 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
97 spin_unlock_irqrestore(&phba->hbalock, flags);
98 return;
99 }
100 phba->last_ramp_up_time = jiffies;
101 spin_unlock_irqrestore(&phba->hbalock, flags);
102
103 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
104 evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
105 if (!evt_posted)
106 phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
107 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
108
109 if (!evt_posted)
110 lpfc_worker_wake_up(phba);
111 return;
112 }
113
114 void
115 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
116 {
117 struct lpfc_vport **vports;
118 struct Scsi_Host *shost;
119 struct scsi_device *sdev;
120 unsigned long new_queue_depth;
121 unsigned long num_rsrc_err, num_cmd_success;
122 int i;
123
124 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
125 num_cmd_success = atomic_read(&phba->num_cmd_success);
126
127 vports = lpfc_create_vport_work_array(phba);
128 if (vports != NULL)
129 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
130 shost = lpfc_shost_from_vport(vports[i]);
131 shost_for_each_device(sdev, shost) {
132 new_queue_depth =
133 sdev->queue_depth * num_rsrc_err /
134 (num_rsrc_err + num_cmd_success);
135 if (!new_queue_depth)
136 new_queue_depth = sdev->queue_depth - 1;
137 else
138 new_queue_depth = sdev->queue_depth -
139 new_queue_depth;
140 if (sdev->ordered_tags)
141 scsi_adjust_queue_depth(sdev,
142 MSG_ORDERED_TAG,
143 new_queue_depth);
144 else
145 scsi_adjust_queue_depth(sdev,
146 MSG_SIMPLE_TAG,
147 new_queue_depth);
148 }
149 }
150 lpfc_destroy_vport_work_array(phba, vports);
151 atomic_set(&phba->num_rsrc_err, 0);
152 atomic_set(&phba->num_cmd_success, 0);
153 }
154
155 void
156 lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
157 {
158 struct lpfc_vport **vports;
159 struct Scsi_Host *shost;
160 struct scsi_device *sdev;
161 int i;
162
163 vports = lpfc_create_vport_work_array(phba);
164 if (vports != NULL)
165 for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
166 shost = lpfc_shost_from_vport(vports[i]);
167 shost_for_each_device(sdev, shost) {
168 if (vports[i]->cfg_lun_queue_depth <=
169 sdev->queue_depth)
170 continue;
171 if (sdev->ordered_tags)
172 scsi_adjust_queue_depth(sdev,
173 MSG_ORDERED_TAG,
174 sdev->queue_depth+1);
175 else
176 scsi_adjust_queue_depth(sdev,
177 MSG_SIMPLE_TAG,
178 sdev->queue_depth+1);
179 }
180 }
181 lpfc_destroy_vport_work_array(phba, vports);
182 atomic_set(&phba->num_rsrc_err, 0);
183 atomic_set(&phba->num_cmd_success, 0);
184 }
185
186 /*
187 * This routine allocates a scsi buffer, which contains all the necessary
188 * information needed to initiate a SCSI I/O. The non-DMAable buffer region
189 * contains information to build the IOCB. The DMAable region contains
190 * memory for the FCP CMND, FCP RSP, and the inital BPL. In addition to
191 * allocating memeory, the FCP CMND and FCP RSP BDEs are setup in the BPL
192 * and the BPL BDE is setup in the IOCB.
193 */
194 static struct lpfc_scsi_buf *
195 lpfc_new_scsi_buf(struct lpfc_vport *vport)
196 {
197 struct lpfc_hba *phba = vport->phba;
198 struct lpfc_scsi_buf *psb;
199 struct ulp_bde64 *bpl;
200 IOCB_t *iocb;
201 dma_addr_t pdma_phys_fcp_cmd;
202 dma_addr_t pdma_phys_fcp_rsp;
203 dma_addr_t pdma_phys_bpl;
204 uint16_t iotag;
205
206 psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
207 if (!psb)
208 return NULL;
209
210 /*
211 * Get memory from the pci pool to map the virt space to pci bus space
212 * for an I/O. The DMA buffer includes space for the struct fcp_cmnd,
213 * struct fcp_rsp and the number of bde's necessary to support the
214 * sg_tablesize.
215 */
216 psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
217 &psb->dma_handle);
218 if (!psb->data) {
219 kfree(psb);
220 return NULL;
221 }
222
223 /* Initialize virtual ptrs to dma_buf region. */
224 memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
225
226 /* Allocate iotag for psb->cur_iocbq. */
227 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
228 if (iotag == 0) {
229 pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
230 psb->data, psb->dma_handle);
231 kfree (psb);
232 return NULL;
233 }
234 psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
235
236 psb->fcp_cmnd = psb->data;
237 psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
238 psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
239 sizeof(struct fcp_rsp);
240
241 /* Initialize local short-hand pointers. */
242 bpl = psb->fcp_bpl;
243 pdma_phys_fcp_cmd = psb->dma_handle;
244 pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
245 pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
246 sizeof(struct fcp_rsp);
247
248 /*
249 * The first two bdes are the FCP_CMD and FCP_RSP. The balance are sg
250 * list bdes. Initialize the first two and leave the rest for
251 * queuecommand.
252 */
253 bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
254 bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
255 bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
256 bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
257 bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
258
259 /* Setup the physical region for the FCP RSP */
260 bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
261 bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
262 bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
263 bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
264 bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
265
266 /*
267 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
268 * initialize it with all known data now.
269 */
270 iocb = &psb->cur_iocbq.iocb;
271 iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
272 if (phba->sli_rev == 3) {
273 /* fill in immediate fcp command BDE */
274 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
275 iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
276 iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
277 unsli3.fcp_ext.icd);
278 iocb->un.fcpi64.bdl.addrHigh = 0;
279 iocb->ulpBdeCount = 0;
280 iocb->ulpLe = 0;
281 /* fill in responce BDE */
282 iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
283 iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
284 sizeof(struct fcp_rsp);
285 iocb->unsli3.fcp_ext.rbde.addrLow =
286 putPaddrLow(pdma_phys_fcp_rsp);
287 iocb->unsli3.fcp_ext.rbde.addrHigh =
288 putPaddrHigh(pdma_phys_fcp_rsp);
289 } else {
290 iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
291 iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
292 iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
293 iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
294 iocb->ulpBdeCount = 1;
295 iocb->ulpLe = 1;
296 }
297 iocb->ulpClass = CLASS3;
298
299 return psb;
300 }
301
302 static struct lpfc_scsi_buf*
303 lpfc_get_scsi_buf(struct lpfc_hba * phba)
304 {
305 struct lpfc_scsi_buf * lpfc_cmd = NULL;
306 struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
307 unsigned long iflag = 0;
308
309 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
310 list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
311 if (lpfc_cmd) {
312 lpfc_cmd->seg_cnt = 0;
313 lpfc_cmd->nonsg_phys = 0;
314 }
315 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
316 return lpfc_cmd;
317 }
318
319 static void
320 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
321 {
322 unsigned long iflag = 0;
323
324 spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
325 psb->pCmd = NULL;
326 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
327 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
328 }
329
330 static int
331 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
332 {
333 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
334 struct scatterlist *sgel = NULL;
335 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
336 struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
337 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
338 struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
339 dma_addr_t physaddr;
340 uint32_t num_bde = 0;
341 int nseg, datadir = scsi_cmnd->sc_data_direction;
342
343 /*
344 * There are three possibilities here - use scatter-gather segment, use
345 * the single mapping, or neither. Start the lpfc command prep by
346 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
347 * data bde entry.
348 */
349 bpl += 2;
350 if (scsi_sg_count(scsi_cmnd)) {
351 /*
352 * The driver stores the segment count returned from pci_map_sg
353 * because this a count of dma-mappings used to map the use_sg
354 * pages. They are not guaranteed to be the same for those
355 * architectures that implement an IOMMU.
356 */
357
358 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
359 scsi_sg_count(scsi_cmnd), datadir);
360 if (unlikely(!nseg))
361 return 1;
362
363 lpfc_cmd->seg_cnt = nseg;
364 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
365 printk(KERN_ERR "%s: Too many sg segments from "
366 "dma_map_sg. Config %d, seg_cnt %d",
367 __func__, phba->cfg_sg_seg_cnt,
368 lpfc_cmd->seg_cnt);
369 scsi_dma_unmap(scsi_cmnd);
370 return 1;
371 }
372
373 /*
374 * The driver established a maximum scatter-gather segment count
375 * during probe that limits the number of sg elements in any
376 * single scsi command. Just run through the seg_cnt and format
377 * the bde's.
378 * When using SLI-3 the driver will try to fit all the BDEs into
379 * the IOCB. If it can't then the BDEs get added to a BPL as it
380 * does for SLI-2 mode.
381 */
382 scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
383 physaddr = sg_dma_address(sgel);
384 if (phba->sli_rev == 3 &&
385 nseg <= LPFC_EXT_DATA_BDE_COUNT) {
386 data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
387 data_bde->tus.f.bdeSize = sg_dma_len(sgel);
388 data_bde->addrLow = putPaddrLow(physaddr);
389 data_bde->addrHigh = putPaddrHigh(physaddr);
390 data_bde++;
391 } else {
392 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
393 bpl->tus.f.bdeSize = sg_dma_len(sgel);
394 bpl->tus.w = le32_to_cpu(bpl->tus.w);
395 bpl->addrLow =
396 le32_to_cpu(putPaddrLow(physaddr));
397 bpl->addrHigh =
398 le32_to_cpu(putPaddrHigh(physaddr));
399 bpl++;
400 }
401 }
402 }
403
404 /*
405 * Finish initializing those IOCB fields that are dependent on the
406 * scsi_cmnd request_buffer. Note that for SLI-2 the bdeSize is
407 * explicitly reinitialized and for SLI-3 the extended bde count is
408 * explicitly reinitialized since all iocb memory resources are reused.
409 */
410 if (phba->sli_rev == 3) {
411 if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
412 /*
413 * The extended IOCB format can only fit 3 BDE or a BPL.
414 * This I/O has more than 3 BDE so the 1st data bde will
415 * be a BPL that is filled in here.
416 */
417 physaddr = lpfc_cmd->dma_handle;
418 data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
419 data_bde->tus.f.bdeSize = (num_bde *
420 sizeof(struct ulp_bde64));
421 physaddr += (sizeof(struct fcp_cmnd) +
422 sizeof(struct fcp_rsp) +
423 (2 * sizeof(struct ulp_bde64)));
424 data_bde->addrHigh = putPaddrHigh(physaddr);
425 data_bde->addrLow = putPaddrLow(physaddr);
426 /* ebde count includes the responce bde and data bpl */
427 iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
428 } else {
429 /* ebde count includes the responce bde and data bdes */
430 iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
431 }
432 } else {
433 iocb_cmd->un.fcpi64.bdl.bdeSize =
434 ((num_bde + 2) * sizeof(struct ulp_bde64));
435 }
436 fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
437 return 0;
438 }
439
440 static void
441 lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
442 {
443 /*
444 * There are only two special cases to consider. (1) the scsi command
445 * requested scatter-gather usage or (2) the scsi command allocated
446 * a request buffer, but did not request use_sg. There is a third
447 * case, but it does not require resource deallocation.
448 */
449 if (psb->seg_cnt > 0)
450 scsi_dma_unmap(psb->pCmd);
451 }
452
453 static void
454 lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
455 struct lpfc_iocbq *rsp_iocb)
456 {
457 struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
458 struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
459 struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
460 uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
461 uint32_t resp_info = fcprsp->rspStatus2;
462 uint32_t scsi_status = fcprsp->rspStatus3;
463 uint32_t *lp;
464 uint32_t host_status = DID_OK;
465 uint32_t rsplen = 0;
466 uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
467
468 /*
469 * If this is a task management command, there is no
470 * scsi packet associated with this lpfc_cmd. The driver
471 * consumes it.
472 */
473 if (fcpcmd->fcpCntl2) {
474 scsi_status = 0;
475 goto out;
476 }
477
478 if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
479 uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
480 if (snslen > SCSI_SENSE_BUFFERSIZE)
481 snslen = SCSI_SENSE_BUFFERSIZE;
482
483 if (resp_info & RSP_LEN_VALID)
484 rsplen = be32_to_cpu(fcprsp->rspRspLen);
485 memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
486 }
487 lp = (uint32_t *)cmnd->sense_buffer;
488
489 if (!scsi_status && (resp_info & RESID_UNDER))
490 logit = LOG_FCP;
491
492 lpfc_printf_vlog(vport, KERN_WARNING, logit,
493 "0730 FCP command x%x failed: x%x SNS x%x x%x "
494 "Data: x%x x%x x%x x%x x%x\n",
495 cmnd->cmnd[0], scsi_status,
496 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
497 be32_to_cpu(fcprsp->rspResId),
498 be32_to_cpu(fcprsp->rspSnsLen),
499 be32_to_cpu(fcprsp->rspRspLen),
500 fcprsp->rspInfo3);
501
502 if (resp_info & RSP_LEN_VALID) {
503 rsplen = be32_to_cpu(fcprsp->rspRspLen);
504 if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
505 (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
506 host_status = DID_ERROR;
507 goto out;
508 }
509 }
510
511 scsi_set_resid(cmnd, 0);
512 if (resp_info & RESID_UNDER) {
513 scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
514
515 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
516 "0716 FCP Read Underrun, expected %d, "
517 "residual %d Data: x%x x%x x%x\n",
518 be32_to_cpu(fcpcmd->fcpDl),
519 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
520 cmnd->underflow);
521
522 /*
523 * If there is an under run check if under run reported by
524 * storage array is same as the under run reported by HBA.
525 * If this is not same, there is a dropped frame.
526 */
527 if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
528 fcpi_parm &&
529 (scsi_get_resid(cmnd) != fcpi_parm)) {
530 lpfc_printf_vlog(vport, KERN_WARNING,
531 LOG_FCP | LOG_FCP_ERROR,
532 "0735 FCP Read Check Error "
533 "and Underrun Data: x%x x%x x%x x%x\n",
534 be32_to_cpu(fcpcmd->fcpDl),
535 scsi_get_resid(cmnd), fcpi_parm,
536 cmnd->cmnd[0]);
537 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
538 host_status = DID_ERROR;
539 }
540 /*
541 * The cmnd->underflow is the minimum number of bytes that must
542 * be transfered for this command. Provided a sense condition
543 * is not present, make sure the actual amount transferred is at
544 * least the underflow value or fail.
545 */
546 if (!(resp_info & SNS_LEN_VALID) &&
547 (scsi_status == SAM_STAT_GOOD) &&
548 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
549 < cmnd->underflow)) {
550 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
551 "0717 FCP command x%x residual "
552 "underrun converted to error "
553 "Data: x%x x%x x%x\n",
554 cmnd->cmnd[0], scsi_bufflen(cmnd),
555 scsi_get_resid(cmnd), cmnd->underflow);
556 host_status = DID_ERROR;
557 }
558 } else if (resp_info & RESID_OVER) {
559 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
560 "0720 FCP command x%x residual overrun error. "
561 "Data: x%x x%x \n", cmnd->cmnd[0],
562 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
563 host_status = DID_ERROR;
564
565 /*
566 * Check SLI validation that all the transfer was actually done
567 * (fcpi_parm should be zero). Apply check only to reads.
568 */
569 } else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
570 (cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
571 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
572 "0734 FCP Read Check Error Data: "
573 "x%x x%x x%x x%x\n",
574 be32_to_cpu(fcpcmd->fcpDl),
575 be32_to_cpu(fcprsp->rspResId),
576 fcpi_parm, cmnd->cmnd[0]);
577 host_status = DID_ERROR;
578 scsi_set_resid(cmnd, scsi_bufflen(cmnd));
579 }
580
581 out:
582 cmnd->result = ScsiResult(host_status, scsi_status);
583 }
584
585 static void
586 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
587 struct lpfc_iocbq *pIocbOut)
588 {
589 struct lpfc_scsi_buf *lpfc_cmd =
590 (struct lpfc_scsi_buf *) pIocbIn->context1;
591 struct lpfc_vport *vport = pIocbIn->vport;
592 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
593 struct lpfc_nodelist *pnode = rdata->pnode;
594 struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
595 int result;
596 struct scsi_device *sdev, *tmp_sdev;
597 int depth = 0;
598 unsigned long flags;
599
600 lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
601 lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
602
603 if (lpfc_cmd->status) {
604 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
605 (lpfc_cmd->result & IOERR_DRVR_MASK))
606 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
607 else if (lpfc_cmd->status >= IOSTAT_CNT)
608 lpfc_cmd->status = IOSTAT_DEFAULT;
609
610 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
611 "0729 FCP cmd x%x failed <%d/%d> "
612 "status: x%x result: x%x Data: x%x x%x\n",
613 cmd->cmnd[0],
614 cmd->device ? cmd->device->id : 0xffff,
615 cmd->device ? cmd->device->lun : 0xffff,
616 lpfc_cmd->status, lpfc_cmd->result,
617 pIocbOut->iocb.ulpContext,
618 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
619
620 switch (lpfc_cmd->status) {
621 case IOSTAT_FCP_RSP_ERROR:
622 /* Call FCP RSP handler to determine result */
623 lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
624 break;
625 case IOSTAT_NPORT_BSY:
626 case IOSTAT_FABRIC_BSY:
627 cmd->result = ScsiResult(DID_BUS_BUSY, 0);
628 break;
629 case IOSTAT_LOCAL_REJECT:
630 if (lpfc_cmd->result == IOERR_INVALID_RPI ||
631 lpfc_cmd->result == IOERR_NO_RESOURCES ||
632 lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
633 cmd->result = ScsiResult(DID_REQUEUE, 0);
634 break;
635 } /* else: fall through */
636 default:
637 cmd->result = ScsiResult(DID_ERROR, 0);
638 break;
639 }
640
641 if (!pnode || !NLP_CHK_NODE_ACT(pnode)
642 || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
643 cmd->result = ScsiResult(DID_BUS_BUSY, SAM_STAT_BUSY);
644 } else {
645 cmd->result = ScsiResult(DID_OK, 0);
646 }
647
648 if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
649 uint32_t *lp = (uint32_t *)cmd->sense_buffer;
650
651 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
652 "0710 Iodone <%d/%d> cmd %p, error "
653 "x%x SNS x%x x%x Data: x%x x%x\n",
654 cmd->device->id, cmd->device->lun, cmd,
655 cmd->result, *lp, *(lp + 3), cmd->retries,
656 scsi_get_resid(cmd));
657 }
658
659 result = cmd->result;
660 sdev = cmd->device;
661 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
662 cmd->scsi_done(cmd);
663
664 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
665 /*
666 * If there is a thread waiting for command completion
667 * wake up the thread.
668 */
669 spin_lock_irqsave(sdev->host->host_lock, flags);
670 lpfc_cmd->pCmd = NULL;
671 if (lpfc_cmd->waitq)
672 wake_up(lpfc_cmd->waitq);
673 spin_unlock_irqrestore(sdev->host->host_lock, flags);
674 lpfc_release_scsi_buf(phba, lpfc_cmd);
675 return;
676 }
677
678
679 if (!result)
680 lpfc_rampup_queue_depth(vport, sdev);
681
682 if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
683 ((jiffies - pnode->last_ramp_up_time) >
684 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
685 ((jiffies - pnode->last_q_full_time) >
686 LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
687 (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
688 shost_for_each_device(tmp_sdev, sdev->host) {
689 if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
690 if (tmp_sdev->id != sdev->id)
691 continue;
692 if (tmp_sdev->ordered_tags)
693 scsi_adjust_queue_depth(tmp_sdev,
694 MSG_ORDERED_TAG,
695 tmp_sdev->queue_depth+1);
696 else
697 scsi_adjust_queue_depth(tmp_sdev,
698 MSG_SIMPLE_TAG,
699 tmp_sdev->queue_depth+1);
700
701 pnode->last_ramp_up_time = jiffies;
702 }
703 }
704 }
705
706 /*
707 * Check for queue full. If the lun is reporting queue full, then
708 * back off the lun queue depth to prevent target overloads.
709 */
710 if (result == SAM_STAT_TASK_SET_FULL && pnode &&
711 NLP_CHK_NODE_ACT(pnode)) {
712 pnode->last_q_full_time = jiffies;
713
714 shost_for_each_device(tmp_sdev, sdev->host) {
715 if (tmp_sdev->id != sdev->id)
716 continue;
717 depth = scsi_track_queue_full(tmp_sdev,
718 tmp_sdev->queue_depth - 1);
719 }
720 /*
721 * The queue depth cannot be lowered any more.
722 * Modify the returned error code to store
723 * the final depth value set by
724 * scsi_track_queue_full.
725 */
726 if (depth == -1)
727 depth = sdev->host->cmd_per_lun;
728
729 if (depth) {
730 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
731 "0711 detected queue full - lun queue "
732 "depth adjusted to %d.\n", depth);
733 }
734 }
735
736 /*
737 * If there is a thread waiting for command completion
738 * wake up the thread.
739 */
740 spin_lock_irqsave(sdev->host->host_lock, flags);
741 lpfc_cmd->pCmd = NULL;
742 if (lpfc_cmd->waitq)
743 wake_up(lpfc_cmd->waitq);
744 spin_unlock_irqrestore(sdev->host->host_lock, flags);
745
746 lpfc_release_scsi_buf(phba, lpfc_cmd);
747 }
748
749 /**
750 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
751 * @data: A pointer to the immediate command data portion of the IOCB.
752 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
753 *
754 * The routine copies the entire FCP command from @fcp_cmnd to @data while
755 * byte swapping the data to big endian format for transmission on the wire.
756 **/
757 static void
758 lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
759 {
760 int i, j;
761 for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
762 i += sizeof(uint32_t), j++) {
763 ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
764 }
765 }
766
767 static void
768 lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
769 struct lpfc_nodelist *pnode)
770 {
771 struct lpfc_hba *phba = vport->phba;
772 struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
773 struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
774 IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
775 struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
776 int datadir = scsi_cmnd->sc_data_direction;
777 char tag[2];
778
779 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
780 return;
781
782 lpfc_cmd->fcp_rsp->rspSnsLen = 0;
783 /* clear task management bits */
784 lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
785
786 int_to_scsilun(lpfc_cmd->pCmd->device->lun,
787 &lpfc_cmd->fcp_cmnd->fcp_lun);
788
789 memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
790
791 if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
792 switch (tag[0]) {
793 case HEAD_OF_QUEUE_TAG:
794 fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
795 break;
796 case ORDERED_QUEUE_TAG:
797 fcp_cmnd->fcpCntl1 = ORDERED_Q;
798 break;
799 default:
800 fcp_cmnd->fcpCntl1 = SIMPLE_Q;
801 break;
802 }
803 } else
804 fcp_cmnd->fcpCntl1 = 0;
805
806 /*
807 * There are three possibilities here - use scatter-gather segment, use
808 * the single mapping, or neither. Start the lpfc command prep by
809 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
810 * data bde entry.
811 */
812 if (scsi_sg_count(scsi_cmnd)) {
813 if (datadir == DMA_TO_DEVICE) {
814 iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
815 iocb_cmd->un.fcpi.fcpi_parm = 0;
816 iocb_cmd->ulpPU = 0;
817 fcp_cmnd->fcpCntl3 = WRITE_DATA;
818 phba->fc4OutputRequests++;
819 } else {
820 iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
821 iocb_cmd->ulpPU = PARM_READ_CHECK;
822 iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
823 fcp_cmnd->fcpCntl3 = READ_DATA;
824 phba->fc4InputRequests++;
825 }
826 } else {
827 iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
828 iocb_cmd->un.fcpi.fcpi_parm = 0;
829 iocb_cmd->ulpPU = 0;
830 fcp_cmnd->fcpCntl3 = 0;
831 phba->fc4ControlRequests++;
832 }
833 if (phba->sli_rev == 3)
834 lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
835 /*
836 * Finish initializing those IOCB fields that are independent
837 * of the scsi_cmnd request_buffer
838 */
839 piocbq->iocb.ulpContext = pnode->nlp_rpi;
840 if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
841 piocbq->iocb.ulpFCP2Rcvy = 1;
842 else
843 piocbq->iocb.ulpFCP2Rcvy = 0;
844
845 piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
846 piocbq->context1 = lpfc_cmd;
847 piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
848 piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
849 piocbq->vport = vport;
850 }
851
852 static int
853 lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
854 struct lpfc_scsi_buf *lpfc_cmd,
855 unsigned int lun,
856 uint8_t task_mgmt_cmd)
857 {
858 struct lpfc_iocbq *piocbq;
859 IOCB_t *piocb;
860 struct fcp_cmnd *fcp_cmnd;
861 struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
862 struct lpfc_nodelist *ndlp = rdata->pnode;
863
864 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
865 ndlp->nlp_state != NLP_STE_MAPPED_NODE)
866 return 0;
867
868 piocbq = &(lpfc_cmd->cur_iocbq);
869 piocbq->vport = vport;
870
871 piocb = &piocbq->iocb;
872
873 fcp_cmnd = lpfc_cmd->fcp_cmnd;
874 /* Clear out any old data in the FCP command area */
875 memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
876 int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
877 fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
878 if (vport->phba->sli_rev == 3)
879 lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
880 piocb->ulpCommand = CMD_FCP_ICMND64_CR;
881 piocb->ulpContext = ndlp->nlp_rpi;
882 if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
883 piocb->ulpFCP2Rcvy = 1;
884 }
885 piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);
886
887 /* ulpTimeout is only one byte */
888 if (lpfc_cmd->timeout > 0xff) {
889 /*
890 * Do not timeout the command at the firmware level.
891 * The driver will provide the timeout mechanism.
892 */
893 piocb->ulpTimeout = 0;
894 } else {
895 piocb->ulpTimeout = lpfc_cmd->timeout;
896 }
897
898 return 1;
899 }
900
901 static void
902 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
903 struct lpfc_iocbq *cmdiocbq,
904 struct lpfc_iocbq *rspiocbq)
905 {
906 struct lpfc_scsi_buf *lpfc_cmd =
907 (struct lpfc_scsi_buf *) cmdiocbq->context1;
908 if (lpfc_cmd)
909 lpfc_release_scsi_buf(phba, lpfc_cmd);
910 return;
911 }
912
913 static int
914 lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
915 unsigned tgt_id, unsigned int lun,
916 struct lpfc_rport_data *rdata)
917 {
918 struct lpfc_hba *phba = vport->phba;
919 struct lpfc_iocbq *iocbq;
920 struct lpfc_iocbq *iocbqrsp;
921 int ret;
922 int status;
923
924 if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
925 return FAILED;
926
927 lpfc_cmd->rdata = rdata;
928 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
929 FCP_TARGET_RESET);
930 if (!status)
931 return FAILED;
932
933 iocbq = &lpfc_cmd->cur_iocbq;
934 iocbqrsp = lpfc_sli_get_iocbq(phba);
935
936 if (!iocbqrsp)
937 return FAILED;
938
939 /* Issue Target Reset to TGT <num> */
940 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
941 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
942 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
943 status = lpfc_sli_issue_iocb_wait(phba,
944 &phba->sli.ring[phba->sli.fcp_ring],
945 iocbq, iocbqrsp, lpfc_cmd->timeout);
946 if (status != IOCB_SUCCESS) {
947 if (status == IOCB_TIMEDOUT) {
948 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
949 ret = TIMEOUT_ERROR;
950 } else
951 ret = FAILED;
952 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
953 } else {
954 ret = SUCCESS;
955 lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
956 lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
957 if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
958 (lpfc_cmd->result & IOERR_DRVR_MASK))
959 lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
960 }
961
962 lpfc_sli_release_iocbq(phba, iocbqrsp);
963 return ret;
964 }
965
966 const char *
967 lpfc_info(struct Scsi_Host *host)
968 {
969 struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
970 struct lpfc_hba *phba = vport->phba;
971 int len;
972 static char lpfcinfobuf[384];
973
974 memset(lpfcinfobuf,0,384);
975 if (phba && phba->pcidev){
976 strncpy(lpfcinfobuf, phba->ModelDesc, 256);
977 len = strlen(lpfcinfobuf);
978 snprintf(lpfcinfobuf + len,
979 384-len,
980 " on PCI bus %02x device %02x irq %d",
981 phba->pcidev->bus->number,
982 phba->pcidev->devfn,
983 phba->pcidev->irq);
984 len = strlen(lpfcinfobuf);
985 if (phba->Port[0]) {
986 snprintf(lpfcinfobuf + len,
987 384-len,
988 " port %s",
989 phba->Port);
990 }
991 }
992 return lpfcinfobuf;
993 }
994
995 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
996 {
997 unsigned long poll_tmo_expires =
998 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
999
1000 if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
1001 mod_timer(&phba->fcp_poll_timer,
1002 poll_tmo_expires);
1003 }
1004
1005 void lpfc_poll_start_timer(struct lpfc_hba * phba)
1006 {
1007 lpfc_poll_rearm_timer(phba);
1008 }
1009
1010 void lpfc_poll_timeout(unsigned long ptr)
1011 {
1012 struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1013
1014 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1015 lpfc_sli_poll_fcp_ring (phba);
1016 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1017 lpfc_poll_rearm_timer(phba);
1018 }
1019 }
1020
1021 static int
1022 lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
1023 {
1024 struct Scsi_Host *shost = cmnd->device->host;
1025 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1026 struct lpfc_hba *phba = vport->phba;
1027 struct lpfc_sli *psli = &phba->sli;
1028 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1029 struct lpfc_nodelist *ndlp = rdata->pnode;
1030 struct lpfc_scsi_buf *lpfc_cmd;
1031 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1032 int err;
1033
1034 err = fc_remote_port_chkready(rport);
1035 if (err) {
1036 cmnd->result = err;
1037 goto out_fail_command;
1038 }
1039
1040 /*
1041 * Catch race where our node has transitioned, but the
1042 * transport is still transitioning.
1043 */
1044 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
1045 goto out_target_busy;
1046
1047 lpfc_cmd = lpfc_get_scsi_buf(phba);
1048 if (lpfc_cmd == NULL) {
1049 lpfc_adjust_queue_depth(phba);
1050
1051 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1052 "0707 driver's buffer pool is empty, "
1053 "IO busied\n");
1054 goto out_host_busy;
1055 }
1056
1057 /*
1058 * Store the midlayer's command structure for the completion phase
1059 * and complete the command initialization.
1060 */
1061 lpfc_cmd->pCmd = cmnd;
1062 lpfc_cmd->rdata = rdata;
1063 lpfc_cmd->timeout = 0;
1064 cmnd->host_scribble = (unsigned char *)lpfc_cmd;
1065 cmnd->scsi_done = done;
1066
1067 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
1068 if (err)
1069 goto out_host_busy_free_buf;
1070
1071 lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
1072
1073 err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1074 &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1075 if (err)
1076 goto out_host_busy_free_buf;
1077
1078 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1079 lpfc_sli_poll_fcp_ring(phba);
1080 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1081 lpfc_poll_rearm_timer(phba);
1082 }
1083
1084 return 0;
1085
1086 out_host_busy_free_buf:
1087 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1088 lpfc_release_scsi_buf(phba, lpfc_cmd);
1089 out_host_busy:
1090 return SCSI_MLQUEUE_HOST_BUSY;
1091 out_target_busy:
1092 return SCSI_MLQUEUE_TARGET_BUSY;
1093
1094 out_fail_command:
1095 done(cmnd);
1096 return 0;
1097 }
1098
1099 static void
1100 lpfc_block_error_handler(struct scsi_cmnd *cmnd)
1101 {
1102 struct Scsi_Host *shost = cmnd->device->host;
1103 struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
1104
1105 spin_lock_irq(shost->host_lock);
1106 while (rport->port_state == FC_PORTSTATE_BLOCKED) {
1107 spin_unlock_irq(shost->host_lock);
1108 msleep(1000);
1109 spin_lock_irq(shost->host_lock);
1110 }
1111 spin_unlock_irq(shost->host_lock);
1112 return;
1113 }
1114
1115 static int
1116 lpfc_abort_handler(struct scsi_cmnd *cmnd)
1117 {
1118 struct Scsi_Host *shost = cmnd->device->host;
1119 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1120 struct lpfc_hba *phba = vport->phba;
1121 struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1122 struct lpfc_iocbq *iocb;
1123 struct lpfc_iocbq *abtsiocb;
1124 struct lpfc_scsi_buf *lpfc_cmd;
1125 IOCB_t *cmd, *icmd;
1126 int ret = SUCCESS;
1127 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
1128
1129 lpfc_block_error_handler(cmnd);
1130 lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
1131 BUG_ON(!lpfc_cmd);
1132
1133 /*
1134 * If pCmd field of the corresponding lpfc_scsi_buf structure
1135 * points to a different SCSI command, then the driver has
1136 * already completed this command, but the midlayer did not
1137 * see the completion before the eh fired. Just return
1138 * SUCCESS.
1139 */
1140 iocb = &lpfc_cmd->cur_iocbq;
1141 if (lpfc_cmd->pCmd != cmnd)
1142 goto out;
1143
1144 BUG_ON(iocb->context1 != lpfc_cmd);
1145
1146 abtsiocb = lpfc_sli_get_iocbq(phba);
1147 if (abtsiocb == NULL) {
1148 ret = FAILED;
1149 goto out;
1150 }
1151
1152 /*
1153 * The scsi command can not be in txq and it is in flight because the
1154 * pCmd is still pointig at the SCSI command we have to abort. There
1155 * is no need to search the txcmplq. Just send an abort to the FW.
1156 */
1157
1158 cmd = &iocb->iocb;
1159 icmd = &abtsiocb->iocb;
1160 icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
1161 icmd->un.acxri.abortContextTag = cmd->ulpContext;
1162 icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
1163
1164 icmd->ulpLe = 1;
1165 icmd->ulpClass = cmd->ulpClass;
1166 if (lpfc_is_link_up(phba))
1167 icmd->ulpCommand = CMD_ABORT_XRI_CN;
1168 else
1169 icmd->ulpCommand = CMD_CLOSE_XRI_CN;
1170
1171 abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
1172 abtsiocb->vport = vport;
1173 if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
1174 lpfc_sli_release_iocbq(phba, abtsiocb);
1175 ret = FAILED;
1176 goto out;
1177 }
1178
1179 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1180 lpfc_sli_poll_fcp_ring (phba);
1181
1182 lpfc_cmd->waitq = &waitq;
1183 /* Wait for abort to complete */
1184 wait_event_timeout(waitq,
1185 (lpfc_cmd->pCmd != cmnd),
1186 (2*vport->cfg_devloss_tmo*HZ));
1187
1188 spin_lock_irq(shost->host_lock);
1189 lpfc_cmd->waitq = NULL;
1190 spin_unlock_irq(shost->host_lock);
1191
1192 if (lpfc_cmd->pCmd == cmnd) {
1193 ret = FAILED;
1194 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1195 "0748 abort handler timed out waiting "
1196 "for abort to complete: ret %#x, ID %d, "
1197 "LUN %d, snum %#lx\n",
1198 ret, cmnd->device->id, cmnd->device->lun,
1199 cmnd->serial_number);
1200 }
1201
1202 out:
1203 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1204 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
1205 "LUN %d snum %#lx\n", ret, cmnd->device->id,
1206 cmnd->device->lun, cmnd->serial_number);
1207 return ret;
1208 }
1209
1210 static int
1211 lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
1212 {
1213 struct Scsi_Host *shost = cmnd->device->host;
1214 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1215 struct lpfc_hba *phba = vport->phba;
1216 struct lpfc_scsi_buf *lpfc_cmd;
1217 struct lpfc_iocbq *iocbq, *iocbqrsp;
1218 struct lpfc_rport_data *rdata = cmnd->device->hostdata;
1219 struct lpfc_nodelist *pnode = rdata->pnode;
1220 unsigned long later;
1221 int ret = SUCCESS;
1222 int status;
1223 int cnt;
1224
1225 lpfc_block_error_handler(cmnd);
1226 /*
1227 * If target is not in a MAPPED state, delay the reset until
1228 * target is rediscovered or devloss timeout expires.
1229 */
1230 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1231 while (time_after(later, jiffies)) {
1232 if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1233 return FAILED;
1234 if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
1235 break;
1236 schedule_timeout_uninterruptible(msecs_to_jiffies(500));
1237 rdata = cmnd->device->hostdata;
1238 if (!rdata)
1239 break;
1240 pnode = rdata->pnode;
1241 }
1242 if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
1243 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1244 "0721 LUN Reset rport "
1245 "failure: msec x%x rdata x%p\n",
1246 jiffies_to_msecs(jiffies - later), rdata);
1247 return FAILED;
1248 }
1249 lpfc_cmd = lpfc_get_scsi_buf(phba);
1250 if (lpfc_cmd == NULL)
1251 return FAILED;
1252 lpfc_cmd->timeout = 60;
1253 lpfc_cmd->rdata = rdata;
1254
1255 status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
1256 cmnd->device->lun,
1257 FCP_TARGET_RESET);
1258 if (!status) {
1259 lpfc_release_scsi_buf(phba, lpfc_cmd);
1260 return FAILED;
1261 }
1262 iocbq = &lpfc_cmd->cur_iocbq;
1263
1264 /* get a buffer for this IOCB command response */
1265 iocbqrsp = lpfc_sli_get_iocbq(phba);
1266 if (iocbqrsp == NULL) {
1267 lpfc_release_scsi_buf(phba, lpfc_cmd);
1268 return FAILED;
1269 }
1270 lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
1271 "0703 Issue target reset to TGT %d LUN %d "
1272 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
1273 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1274 status = lpfc_sli_issue_iocb_wait(phba,
1275 &phba->sli.ring[phba->sli.fcp_ring],
1276 iocbq, iocbqrsp, lpfc_cmd->timeout);
1277 if (status == IOCB_TIMEDOUT) {
1278 iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1279 ret = TIMEOUT_ERROR;
1280 } else {
1281 if (status != IOCB_SUCCESS)
1282 ret = FAILED;
1283 lpfc_release_scsi_buf(phba, lpfc_cmd);
1284 }
1285 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1286 "0713 SCSI layer issued device reset (%d, %d) "
1287 "return x%x status x%x result x%x\n",
1288 cmnd->device->id, cmnd->device->lun, ret,
1289 iocbqrsp->iocb.ulpStatus,
1290 iocbqrsp->iocb.un.ulpWord[4]);
1291 lpfc_sli_release_iocbq(phba, iocbqrsp);
1292 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1293 LPFC_CTX_TGT);
1294 if (cnt)
1295 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1296 cmnd->device->id, cmnd->device->lun,
1297 LPFC_CTX_TGT);
1298 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1299 while (time_after(later, jiffies) && cnt) {
1300 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1301 cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1302 cmnd->device->lun, LPFC_CTX_TGT);
1303 }
1304 if (cnt) {
1305 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1306 "0719 device reset I/O flush failure: "
1307 "cnt x%x\n", cnt);
1308 ret = FAILED;
1309 }
1310 return ret;
1311 }
1312
1313 static int
1314 lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
1315 {
1316 struct Scsi_Host *shost = cmnd->device->host;
1317 struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
1318 struct lpfc_hba *phba = vport->phba;
1319 struct lpfc_nodelist *ndlp = NULL;
1320 int match;
1321 int ret = SUCCESS, status = SUCCESS, i;
1322 int cnt;
1323 struct lpfc_scsi_buf * lpfc_cmd;
1324 unsigned long later;
1325
1326 lpfc_block_error_handler(cmnd);
1327 /*
1328 * Since the driver manages a single bus device, reset all
1329 * targets known to the driver. Should any target reset
1330 * fail, this routine returns failure to the midlayer.
1331 */
1332 for (i = 0; i < LPFC_MAX_TARGET; i++) {
1333 /* Search for mapped node by target ID */
1334 match = 0;
1335 spin_lock_irq(shost->host_lock);
1336 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1337 if (!NLP_CHK_NODE_ACT(ndlp))
1338 continue;
1339 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1340 ndlp->nlp_sid == i &&
1341 ndlp->rport) {
1342 match = 1;
1343 break;
1344 }
1345 }
1346 spin_unlock_irq(shost->host_lock);
1347 if (!match)
1348 continue;
1349 lpfc_cmd = lpfc_get_scsi_buf(phba);
1350 if (lpfc_cmd) {
1351 lpfc_cmd->timeout = 60;
1352 status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
1353 cmnd->device->lun,
1354 ndlp->rport->dd_data);
1355 if (status != TIMEOUT_ERROR)
1356 lpfc_release_scsi_buf(phba, lpfc_cmd);
1357 }
1358 if (!lpfc_cmd || status != SUCCESS) {
1359 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1360 "0700 Bus Reset on target %d failed\n",
1361 i);
1362 ret = FAILED;
1363 }
1364 }
1365 /*
1366 * All outstanding txcmplq I/Os should have been aborted by
1367 * the targets. Unfortunately, some targets do not abide by
1368 * this forcing the driver to double check.
1369 */
1370 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1371 if (cnt)
1372 lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1373 0, 0, LPFC_CTX_HOST);
1374 later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
1375 while (time_after(later, jiffies) && cnt) {
1376 schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1377 cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1378 }
1379 if (cnt) {
1380 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1381 "0715 Bus Reset I/O flush failure: "
1382 "cnt x%x left x%x\n", cnt, i);
1383 ret = FAILED;
1384 }
1385 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1386 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
1387 return ret;
1388 }
1389
1390 static int
1391 lpfc_slave_alloc(struct scsi_device *sdev)
1392 {
1393 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1394 struct lpfc_hba *phba = vport->phba;
1395 struct lpfc_scsi_buf *scsi_buf = NULL;
1396 struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
1397 uint32_t total = 0, i;
1398 uint32_t num_to_alloc = 0;
1399 unsigned long flags;
1400
1401 if (!rport || fc_remote_port_chkready(rport))
1402 return -ENXIO;
1403
1404 sdev->hostdata = rport->dd_data;
1405
1406 /*
1407 * Populate the cmds_per_lun count scsi_bufs into this host's globally
1408 * available list of scsi buffers. Don't allocate more than the
1409 * HBA limit conveyed to the midlayer via the host structure. The
1410 * formula accounts for the lun_queue_depth + error handlers + 1
1411 * extra. This list of scsi bufs exists for the lifetime of the driver.
1412 */
1413 total = phba->total_scsi_bufs;
1414 num_to_alloc = vport->cfg_lun_queue_depth + 2;
1415
1416 /* Allow some exchanges to be available always to complete discovery */
1417 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1418 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1419 "0704 At limitation of %d preallocated "
1420 "command buffers\n", total);
1421 return 0;
1422 /* Allow some exchanges to be available always to complete discovery */
1423 } else if (total + num_to_alloc >
1424 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
1425 lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
1426 "0705 Allocation request of %d "
1427 "command buffers will exceed max of %d. "
1428 "Reducing allocation request to %d.\n",
1429 num_to_alloc, phba->cfg_hba_queue_depth,
1430 (phba->cfg_hba_queue_depth - total));
1431 num_to_alloc = phba->cfg_hba_queue_depth - total;
1432 }
1433
1434 for (i = 0; i < num_to_alloc; i++) {
1435 scsi_buf = lpfc_new_scsi_buf(vport);
1436 if (!scsi_buf) {
1437 lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
1438 "0706 Failed to allocate "
1439 "command buffer\n");
1440 break;
1441 }
1442
1443 spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
1444 phba->total_scsi_bufs++;
1445 list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
1446 spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
1447 }
1448 return 0;
1449 }
1450
1451 static int
1452 lpfc_slave_configure(struct scsi_device *sdev)
1453 {
1454 struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
1455 struct lpfc_hba *phba = vport->phba;
1456 struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
1457
1458 if (sdev->tagged_supported)
1459 scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
1460 else
1461 scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
1462
1463 /*
1464 * Initialize the fc transport attributes for the target
1465 * containing this scsi device. Also note that the driver's
1466 * target pointer is stored in the starget_data for the
1467 * driver's sysfs entry point functions.
1468 */
1469 rport->dev_loss_tmo = vport->cfg_devloss_tmo;
1470
1471 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1472 lpfc_sli_poll_fcp_ring(phba);
1473 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
1474 lpfc_poll_rearm_timer(phba);
1475 }
1476
1477 return 0;
1478 }
1479
1480 static void
1481 lpfc_slave_destroy(struct scsi_device *sdev)
1482 {
1483 sdev->hostdata = NULL;
1484 return;
1485 }
1486
1487
1488 struct scsi_host_template lpfc_template = {
1489 .module = THIS_MODULE,
1490 .name = LPFC_DRIVER_NAME,
1491 .info = lpfc_info,
1492 .queuecommand = lpfc_queuecommand,
1493 .eh_abort_handler = lpfc_abort_handler,
1494 .eh_device_reset_handler= lpfc_device_reset_handler,
1495 .eh_bus_reset_handler = lpfc_bus_reset_handler,
1496 .slave_alloc = lpfc_slave_alloc,
1497 .slave_configure = lpfc_slave_configure,
1498 .slave_destroy = lpfc_slave_destroy,
1499 .scan_finished = lpfc_scan_finished,
1500 .this_id = -1,
1501 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1502 .cmd_per_lun = LPFC_CMD_PER_LUN,
1503 .use_clustering = ENABLE_CLUSTERING,
1504 .shost_attrs = lpfc_hba_attrs,
1505 .max_sectors = 0xFFFF,
1506 };
1507
1508 struct scsi_host_template lpfc_vport_template = {
1509 .module = THIS_MODULE,
1510 .name = LPFC_DRIVER_NAME,
1511 .info = lpfc_info,
1512 .queuecommand = lpfc_queuecommand,
1513 .eh_abort_handler = lpfc_abort_handler,
1514 .eh_device_reset_handler= lpfc_device_reset_handler,
1515 .eh_bus_reset_handler = lpfc_bus_reset_handler,
1516 .slave_alloc = lpfc_slave_alloc,
1517 .slave_configure = lpfc_slave_configure,
1518 .slave_destroy = lpfc_slave_destroy,
1519 .scan_finished = lpfc_scan_finished,
1520 .this_id = -1,
1521 .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
1522 .cmd_per_lun = LPFC_CMD_PER_LUN,
1523 .use_clustering = ENABLE_CLUSTERING,
1524 .shost_attrs = lpfc_vport_attrs,
1525 .max_sectors = 0xFFFF,
1526 };
This page took 0.068306 seconds and 5 git commands to generate.