Merge branch 'upstream' of git://git.infradead.org/users/pcmoore/audit
[deliverable/linux.git] / drivers / scsi / qla2xxx / qla_bsg.c
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12
13 /* BSG support for ELS/CT pass through */
14 void
15 qla2x00_bsg_job_done(void *data, void *ptr, int res)
16 {
17 srb_t *sp = (srb_t *)ptr;
18 struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
19 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
20
21 bsg_job->reply->result = res;
22 bsg_job->job_done(bsg_job);
23 sp->free(vha, sp);
24 }
25
26 void
27 qla2x00_bsg_sp_free(void *data, void *ptr)
28 {
29 srb_t *sp = (srb_t *)ptr;
30 struct scsi_qla_host *vha = sp->fcport->vha;
31 struct fc_bsg_job *bsg_job = sp->u.bsg_job;
32 struct qla_hw_data *ha = vha->hw;
33 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
34
35 if (sp->type == SRB_FXIOCB_BCMD) {
36 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
37 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
38
39 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
40 dma_unmap_sg(&ha->pdev->dev,
41 bsg_job->request_payload.sg_list,
42 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
43
44 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
45 dma_unmap_sg(&ha->pdev->dev,
46 bsg_job->reply_payload.sg_list,
47 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
48 } else {
49 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
50 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
51
52 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
53 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
54 }
55
56 if (sp->type == SRB_CT_CMD ||
57 sp->type == SRB_FXIOCB_BCMD ||
58 sp->type == SRB_ELS_CMD_HST)
59 kfree(sp->fcport);
60 qla2x00_rel_sp(vha, sp);
61 }
62
63 int
64 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
65 struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
66 {
67 int i, ret, num_valid;
68 uint8_t *bcode;
69 struct qla_fcp_prio_entry *pri_entry;
70 uint32_t *bcode_val_ptr, bcode_val;
71
72 ret = 1;
73 num_valid = 0;
74 bcode = (uint8_t *)pri_cfg;
75 bcode_val_ptr = (uint32_t *)pri_cfg;
76 bcode_val = (uint32_t)(*bcode_val_ptr);
77
78 if (bcode_val == 0xFFFFFFFF) {
79 /* No FCP Priority config data in flash */
80 ql_dbg(ql_dbg_user, vha, 0x7051,
81 "No FCP Priority config data.\n");
82 return 0;
83 }
84
85 if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
86 bcode[3] != 'S') {
87 /* Invalid FCP priority data header*/
88 ql_dbg(ql_dbg_user, vha, 0x7052,
89 "Invalid FCP Priority data header. bcode=0x%x.\n",
90 bcode_val);
91 return 0;
92 }
93 if (flag != 1)
94 return ret;
95
96 pri_entry = &pri_cfg->entry[0];
97 for (i = 0; i < pri_cfg->num_entries; i++) {
98 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
99 num_valid++;
100 pri_entry++;
101 }
102
103 if (num_valid == 0) {
104 /* No valid FCP priority data entries */
105 ql_dbg(ql_dbg_user, vha, 0x7053,
106 "No valid FCP Priority data entries.\n");
107 ret = 0;
108 } else {
109 /* FCP priority data is valid */
110 ql_dbg(ql_dbg_user, vha, 0x7054,
111 "Valid FCP priority data. num entries = %d.\n",
112 num_valid);
113 }
114
115 return ret;
116 }
117
118 static int
119 qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
120 {
121 struct Scsi_Host *host = bsg_job->shost;
122 scsi_qla_host_t *vha = shost_priv(host);
123 struct qla_hw_data *ha = vha->hw;
124 int ret = 0;
125 uint32_t len;
126 uint32_t oper;
127
128 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
129 ret = -EINVAL;
130 goto exit_fcp_prio_cfg;
131 }
132
133 /* Get the sub command */
134 oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
135
136 /* Only set config is allowed if config memory is not allocated */
137 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
138 ret = -EINVAL;
139 goto exit_fcp_prio_cfg;
140 }
141 switch (oper) {
142 case QLFC_FCP_PRIO_DISABLE:
143 if (ha->flags.fcp_prio_enabled) {
144 ha->flags.fcp_prio_enabled = 0;
145 ha->fcp_prio_cfg->attributes &=
146 ~FCP_PRIO_ATTR_ENABLE;
147 qla24xx_update_all_fcp_prio(vha);
148 bsg_job->reply->result = DID_OK;
149 } else {
150 ret = -EINVAL;
151 bsg_job->reply->result = (DID_ERROR << 16);
152 goto exit_fcp_prio_cfg;
153 }
154 break;
155
156 case QLFC_FCP_PRIO_ENABLE:
157 if (!ha->flags.fcp_prio_enabled) {
158 if (ha->fcp_prio_cfg) {
159 ha->flags.fcp_prio_enabled = 1;
160 ha->fcp_prio_cfg->attributes |=
161 FCP_PRIO_ATTR_ENABLE;
162 qla24xx_update_all_fcp_prio(vha);
163 bsg_job->reply->result = DID_OK;
164 } else {
165 ret = -EINVAL;
166 bsg_job->reply->result = (DID_ERROR << 16);
167 goto exit_fcp_prio_cfg;
168 }
169 }
170 break;
171
172 case QLFC_FCP_PRIO_GET_CONFIG:
173 len = bsg_job->reply_payload.payload_len;
174 if (!len || len > FCP_PRIO_CFG_SIZE) {
175 ret = -EINVAL;
176 bsg_job->reply->result = (DID_ERROR << 16);
177 goto exit_fcp_prio_cfg;
178 }
179
180 bsg_job->reply->result = DID_OK;
181 bsg_job->reply->reply_payload_rcv_len =
182 sg_copy_from_buffer(
183 bsg_job->reply_payload.sg_list,
184 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
185 len);
186
187 break;
188
189 case QLFC_FCP_PRIO_SET_CONFIG:
190 len = bsg_job->request_payload.payload_len;
191 if (!len || len > FCP_PRIO_CFG_SIZE) {
192 bsg_job->reply->result = (DID_ERROR << 16);
193 ret = -EINVAL;
194 goto exit_fcp_prio_cfg;
195 }
196
197 if (!ha->fcp_prio_cfg) {
198 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
199 if (!ha->fcp_prio_cfg) {
200 ql_log(ql_log_warn, vha, 0x7050,
201 "Unable to allocate memory for fcp prio "
202 "config data (%x).\n", FCP_PRIO_CFG_SIZE);
203 bsg_job->reply->result = (DID_ERROR << 16);
204 ret = -ENOMEM;
205 goto exit_fcp_prio_cfg;
206 }
207 }
208
209 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
210 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
211 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
212 FCP_PRIO_CFG_SIZE);
213
214 /* validate fcp priority data */
215
216 if (!qla24xx_fcp_prio_cfg_valid(vha,
217 (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
218 bsg_job->reply->result = (DID_ERROR << 16);
219 ret = -EINVAL;
220 /* If buffer was invalidatic int
221 * fcp_prio_cfg is of no use
222 */
223 vfree(ha->fcp_prio_cfg);
224 ha->fcp_prio_cfg = NULL;
225 goto exit_fcp_prio_cfg;
226 }
227
228 ha->flags.fcp_prio_enabled = 0;
229 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
230 ha->flags.fcp_prio_enabled = 1;
231 qla24xx_update_all_fcp_prio(vha);
232 bsg_job->reply->result = DID_OK;
233 break;
234 default:
235 ret = -EINVAL;
236 break;
237 }
238 exit_fcp_prio_cfg:
239 if (!ret)
240 bsg_job->job_done(bsg_job);
241 return ret;
242 }
243
244 static int
245 qla2x00_process_els(struct fc_bsg_job *bsg_job)
246 {
247 struct fc_rport *rport;
248 fc_port_t *fcport = NULL;
249 struct Scsi_Host *host;
250 scsi_qla_host_t *vha;
251 struct qla_hw_data *ha;
252 srb_t *sp;
253 const char *type;
254 int req_sg_cnt, rsp_sg_cnt;
255 int rval = (DRIVER_ERROR << 16);
256 uint16_t nextlid = 0;
257
258 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
259 rport = bsg_job->rport;
260 fcport = *(fc_port_t **) rport->dd_data;
261 host = rport_to_shost(rport);
262 vha = shost_priv(host);
263 ha = vha->hw;
264 type = "FC_BSG_RPT_ELS";
265 } else {
266 host = bsg_job->shost;
267 vha = shost_priv(host);
268 ha = vha->hw;
269 type = "FC_BSG_HST_ELS_NOLOGIN";
270 }
271
272 if (!vha->flags.online) {
273 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
274 rval = -EIO;
275 goto done;
276 }
277
278 /* pass through is supported only for ISP 4Gb or higher */
279 if (!IS_FWI2_CAPABLE(ha)) {
280 ql_dbg(ql_dbg_user, vha, 0x7001,
281 "ELS passthru not supported for ISP23xx based adapters.\n");
282 rval = -EPERM;
283 goto done;
284 }
285
286 /* Multiple SG's are not supported for ELS requests */
287 if (bsg_job->request_payload.sg_cnt > 1 ||
288 bsg_job->reply_payload.sg_cnt > 1) {
289 ql_dbg(ql_dbg_user, vha, 0x7002,
290 "Multiple SG's are not suppored for ELS requests, "
291 "request_sg_cnt=%x reply_sg_cnt=%x.\n",
292 bsg_job->request_payload.sg_cnt,
293 bsg_job->reply_payload.sg_cnt);
294 rval = -EPERM;
295 goto done;
296 }
297
298 /* ELS request for rport */
299 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
300 /* make sure the rport is logged in,
301 * if not perform fabric login
302 */
303 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
304 ql_dbg(ql_dbg_user, vha, 0x7003,
305 "Failed to login port %06X for ELS passthru.\n",
306 fcport->d_id.b24);
307 rval = -EIO;
308 goto done;
309 }
310 } else {
311 /* Allocate a dummy fcport structure, since functions
312 * preparing the IOCB and mailbox command retrieves port
313 * specific information from fcport structure. For Host based
314 * ELS commands there will be no fcport structure allocated
315 */
316 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
317 if (!fcport) {
318 rval = -ENOMEM;
319 goto done;
320 }
321
322 /* Initialize all required fields of fcport */
323 fcport->vha = vha;
324 fcport->d_id.b.al_pa =
325 bsg_job->request->rqst_data.h_els.port_id[0];
326 fcport->d_id.b.area =
327 bsg_job->request->rqst_data.h_els.port_id[1];
328 fcport->d_id.b.domain =
329 bsg_job->request->rqst_data.h_els.port_id[2];
330 fcport->loop_id =
331 (fcport->d_id.b.al_pa == 0xFD) ?
332 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
333 }
334
335 req_sg_cnt =
336 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
337 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
338 if (!req_sg_cnt) {
339 rval = -ENOMEM;
340 goto done_free_fcport;
341 }
342
343 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
344 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
345 if (!rsp_sg_cnt) {
346 rval = -ENOMEM;
347 goto done_free_fcport;
348 }
349
350 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
351 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
352 ql_log(ql_log_warn, vha, 0x7008,
353 "dma mapping resulted in different sg counts, "
354 "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
355 "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
356 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
357 rval = -EAGAIN;
358 goto done_unmap_sg;
359 }
360
361 /* Alloc SRB structure */
362 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
363 if (!sp) {
364 rval = -ENOMEM;
365 goto done_unmap_sg;
366 }
367
368 sp->type =
369 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
370 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
371 sp->name =
372 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
373 "bsg_els_rpt" : "bsg_els_hst");
374 sp->u.bsg_job = bsg_job;
375 sp->free = qla2x00_bsg_sp_free;
376 sp->done = qla2x00_bsg_job_done;
377
378 ql_dbg(ql_dbg_user, vha, 0x700a,
379 "bsg rqst type: %s els type: %x - loop-id=%x "
380 "portid=%-2x%02x%02x.\n", type,
381 bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
382 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
383
384 rval = qla2x00_start_sp(sp);
385 if (rval != QLA_SUCCESS) {
386 ql_log(ql_log_warn, vha, 0x700e,
387 "qla2x00_start_sp failed = %d\n", rval);
388 qla2x00_rel_sp(vha, sp);
389 rval = -EIO;
390 goto done_unmap_sg;
391 }
392 return rval;
393
394 done_unmap_sg:
395 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
396 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
397 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
398 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
399 goto done_free_fcport;
400
401 done_free_fcport:
402 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS)
403 kfree(fcport);
404 done:
405 return rval;
406 }
407
408 inline uint16_t
409 qla24xx_calc_ct_iocbs(uint16_t dsds)
410 {
411 uint16_t iocbs;
412
413 iocbs = 1;
414 if (dsds > 2) {
415 iocbs += (dsds - 2) / 5;
416 if ((dsds - 2) % 5)
417 iocbs++;
418 }
419 return iocbs;
420 }
421
422 static int
423 qla2x00_process_ct(struct fc_bsg_job *bsg_job)
424 {
425 srb_t *sp;
426 struct Scsi_Host *host = bsg_job->shost;
427 scsi_qla_host_t *vha = shost_priv(host);
428 struct qla_hw_data *ha = vha->hw;
429 int rval = (DRIVER_ERROR << 16);
430 int req_sg_cnt, rsp_sg_cnt;
431 uint16_t loop_id;
432 struct fc_port *fcport;
433 char *type = "FC_BSG_HST_CT";
434
435 req_sg_cnt =
436 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
437 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
438 if (!req_sg_cnt) {
439 ql_log(ql_log_warn, vha, 0x700f,
440 "dma_map_sg return %d for request\n", req_sg_cnt);
441 rval = -ENOMEM;
442 goto done;
443 }
444
445 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
446 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
447 if (!rsp_sg_cnt) {
448 ql_log(ql_log_warn, vha, 0x7010,
449 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
450 rval = -ENOMEM;
451 goto done;
452 }
453
454 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
455 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
456 ql_log(ql_log_warn, vha, 0x7011,
457 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
458 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
459 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
460 rval = -EAGAIN;
461 goto done_unmap_sg;
462 }
463
464 if (!vha->flags.online) {
465 ql_log(ql_log_warn, vha, 0x7012,
466 "Host is not online.\n");
467 rval = -EIO;
468 goto done_unmap_sg;
469 }
470
471 loop_id =
472 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
473 >> 24;
474 switch (loop_id) {
475 case 0xFC:
476 loop_id = cpu_to_le16(NPH_SNS);
477 break;
478 case 0xFA:
479 loop_id = vha->mgmt_svr_loop_id;
480 break;
481 default:
482 ql_dbg(ql_dbg_user, vha, 0x7013,
483 "Unknown loop id: %x.\n", loop_id);
484 rval = -EINVAL;
485 goto done_unmap_sg;
486 }
487
488 /* Allocate a dummy fcport structure, since functions preparing the
489 * IOCB and mailbox command retrieves port specific information
490 * from fcport structure. For Host based ELS commands there will be
491 * no fcport structure allocated
492 */
493 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
494 if (!fcport) {
495 ql_log(ql_log_warn, vha, 0x7014,
496 "Failed to allocate fcport.\n");
497 rval = -ENOMEM;
498 goto done_unmap_sg;
499 }
500
501 /* Initialize all required fields of fcport */
502 fcport->vha = vha;
503 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
504 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
505 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
506 fcport->loop_id = loop_id;
507
508 /* Alloc SRB structure */
509 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
510 if (!sp) {
511 ql_log(ql_log_warn, vha, 0x7015,
512 "qla2x00_get_sp failed.\n");
513 rval = -ENOMEM;
514 goto done_free_fcport;
515 }
516
517 sp->type = SRB_CT_CMD;
518 sp->name = "bsg_ct";
519 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
520 sp->u.bsg_job = bsg_job;
521 sp->free = qla2x00_bsg_sp_free;
522 sp->done = qla2x00_bsg_job_done;
523
524 ql_dbg(ql_dbg_user, vha, 0x7016,
525 "bsg rqst type: %s else type: %x - "
526 "loop-id=%x portid=%02x%02x%02x.\n", type,
527 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
528 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
529 fcport->d_id.b.al_pa);
530
531 rval = qla2x00_start_sp(sp);
532 if (rval != QLA_SUCCESS) {
533 ql_log(ql_log_warn, vha, 0x7017,
534 "qla2x00_start_sp failed=%d.\n", rval);
535 qla2x00_rel_sp(vha, sp);
536 rval = -EIO;
537 goto done_free_fcport;
538 }
539 return rval;
540
541 done_free_fcport:
542 kfree(fcport);
543 done_unmap_sg:
544 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
545 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
546 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
547 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
548 done:
549 return rval;
550 }
551
552 /* Disable loopback mode */
553 static inline int
554 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
555 int wait, int wait2)
556 {
557 int ret = 0;
558 int rval = 0;
559 uint16_t new_config[4];
560 struct qla_hw_data *ha = vha->hw;
561
562 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
563 goto done_reset_internal;
564
565 memset(new_config, 0 , sizeof(new_config));
566 if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
567 ENABLE_INTERNAL_LOOPBACK ||
568 (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
569 ENABLE_EXTERNAL_LOOPBACK) {
570 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
571 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
572 (new_config[0] & INTERNAL_LOOPBACK_MASK));
573 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
574
575 ha->notify_dcbx_comp = wait;
576 ha->notify_lb_portup_comp = wait2;
577
578 ret = qla81xx_set_port_config(vha, new_config);
579 if (ret != QLA_SUCCESS) {
580 ql_log(ql_log_warn, vha, 0x7025,
581 "Set port config failed.\n");
582 ha->notify_dcbx_comp = 0;
583 ha->notify_lb_portup_comp = 0;
584 rval = -EINVAL;
585 goto done_reset_internal;
586 }
587
588 /* Wait for DCBX complete event */
589 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
590 (DCBX_COMP_TIMEOUT * HZ))) {
591 ql_dbg(ql_dbg_user, vha, 0x7026,
592 "DCBX completion not received.\n");
593 ha->notify_dcbx_comp = 0;
594 ha->notify_lb_portup_comp = 0;
595 rval = -EINVAL;
596 goto done_reset_internal;
597 } else
598 ql_dbg(ql_dbg_user, vha, 0x7027,
599 "DCBX completion received.\n");
600
601 if (wait2 &&
602 !wait_for_completion_timeout(&ha->lb_portup_comp,
603 (LB_PORTUP_COMP_TIMEOUT * HZ))) {
604 ql_dbg(ql_dbg_user, vha, 0x70c5,
605 "Port up completion not received.\n");
606 ha->notify_lb_portup_comp = 0;
607 rval = -EINVAL;
608 goto done_reset_internal;
609 } else
610 ql_dbg(ql_dbg_user, vha, 0x70c6,
611 "Port up completion received.\n");
612
613 ha->notify_dcbx_comp = 0;
614 ha->notify_lb_portup_comp = 0;
615 }
616 done_reset_internal:
617 return rval;
618 }
619
620 /*
621 * Set the port configuration to enable the internal or external loopback
622 * depending on the loopback mode.
623 */
624 static inline int
625 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
626 uint16_t *new_config, uint16_t mode)
627 {
628 int ret = 0;
629 int rval = 0;
630 unsigned long rem_tmo = 0, current_tmo = 0;
631 struct qla_hw_data *ha = vha->hw;
632
633 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
634 goto done_set_internal;
635
636 if (mode == INTERNAL_LOOPBACK)
637 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
638 else if (mode == EXTERNAL_LOOPBACK)
639 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
640 ql_dbg(ql_dbg_user, vha, 0x70be,
641 "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
642
643 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
644
645 ha->notify_dcbx_comp = 1;
646 ret = qla81xx_set_port_config(vha, new_config);
647 if (ret != QLA_SUCCESS) {
648 ql_log(ql_log_warn, vha, 0x7021,
649 "set port config failed.\n");
650 ha->notify_dcbx_comp = 0;
651 rval = -EINVAL;
652 goto done_set_internal;
653 }
654
655 /* Wait for DCBX complete event */
656 current_tmo = DCBX_COMP_TIMEOUT * HZ;
657 while (1) {
658 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
659 current_tmo);
660 if (!ha->idc_extend_tmo || rem_tmo) {
661 ha->idc_extend_tmo = 0;
662 break;
663 }
664 current_tmo = ha->idc_extend_tmo * HZ;
665 ha->idc_extend_tmo = 0;
666 }
667
668 if (!rem_tmo) {
669 ql_dbg(ql_dbg_user, vha, 0x7022,
670 "DCBX completion not received.\n");
671 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
672 /*
673 * If the reset of the loopback mode doesn't work take a FCoE
674 * dump and reset the chip.
675 */
676 if (ret) {
677 ha->isp_ops->fw_dump(vha, 0);
678 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
679 }
680 rval = -EINVAL;
681 } else {
682 if (ha->flags.idc_compl_status) {
683 ql_dbg(ql_dbg_user, vha, 0x70c3,
684 "Bad status in IDC Completion AEN\n");
685 rval = -EINVAL;
686 ha->flags.idc_compl_status = 0;
687 } else
688 ql_dbg(ql_dbg_user, vha, 0x7023,
689 "DCBX completion received.\n");
690 }
691
692 ha->notify_dcbx_comp = 0;
693 ha->idc_extend_tmo = 0;
694
695 done_set_internal:
696 return rval;
697 }
698
699 static int
700 qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
701 {
702 struct Scsi_Host *host = bsg_job->shost;
703 scsi_qla_host_t *vha = shost_priv(host);
704 struct qla_hw_data *ha = vha->hw;
705 int rval;
706 uint8_t command_sent;
707 char *type;
708 struct msg_echo_lb elreq;
709 uint16_t response[MAILBOX_REGISTER_COUNT];
710 uint16_t config[4], new_config[4];
711 uint8_t *fw_sts_ptr;
712 uint8_t *req_data = NULL;
713 dma_addr_t req_data_dma;
714 uint32_t req_data_len;
715 uint8_t *rsp_data = NULL;
716 dma_addr_t rsp_data_dma;
717 uint32_t rsp_data_len;
718
719 if (!vha->flags.online) {
720 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
721 return -EIO;
722 }
723
724 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
725 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
726 DMA_TO_DEVICE);
727
728 if (!elreq.req_sg_cnt) {
729 ql_log(ql_log_warn, vha, 0x701a,
730 "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
731 return -ENOMEM;
732 }
733
734 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
735 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
736 DMA_FROM_DEVICE);
737
738 if (!elreq.rsp_sg_cnt) {
739 ql_log(ql_log_warn, vha, 0x701b,
740 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
741 rval = -ENOMEM;
742 goto done_unmap_req_sg;
743 }
744
745 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
746 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
747 ql_log(ql_log_warn, vha, 0x701c,
748 "dma mapping resulted in different sg counts, "
749 "request_sg_cnt: %x dma_request_sg_cnt: %x "
750 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
751 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
752 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
753 rval = -EAGAIN;
754 goto done_unmap_sg;
755 }
756 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
757 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
758 &req_data_dma, GFP_KERNEL);
759 if (!req_data) {
760 ql_log(ql_log_warn, vha, 0x701d,
761 "dma alloc failed for req_data.\n");
762 rval = -ENOMEM;
763 goto done_unmap_sg;
764 }
765
766 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
767 &rsp_data_dma, GFP_KERNEL);
768 if (!rsp_data) {
769 ql_log(ql_log_warn, vha, 0x7004,
770 "dma alloc failed for rsp_data.\n");
771 rval = -ENOMEM;
772 goto done_free_dma_req;
773 }
774
775 /* Copy the request buffer in req_data now */
776 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
777 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
778
779 elreq.send_dma = req_data_dma;
780 elreq.rcv_dma = rsp_data_dma;
781 elreq.transfer_size = req_data_len;
782
783 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
784 elreq.iteration_count =
785 bsg_job->request->rqst_data.h_vendor.vendor_cmd[2];
786
787 if (atomic_read(&vha->loop_state) == LOOP_READY &&
788 (ha->current_topology == ISP_CFG_F ||
789 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
790 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
791 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
792 elreq.options == EXTERNAL_LOOPBACK) {
793 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
794 ql_dbg(ql_dbg_user, vha, 0x701e,
795 "BSG request type: %s.\n", type);
796 command_sent = INT_DEF_LB_ECHO_CMD;
797 rval = qla2x00_echo_test(vha, &elreq, response);
798 } else {
799 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
800 memset(config, 0, sizeof(config));
801 memset(new_config, 0, sizeof(new_config));
802
803 if (qla81xx_get_port_config(vha, config)) {
804 ql_log(ql_log_warn, vha, 0x701f,
805 "Get port config failed.\n");
806 rval = -EPERM;
807 goto done_free_dma_rsp;
808 }
809
810 if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
811 ql_dbg(ql_dbg_user, vha, 0x70c4,
812 "Loopback operation already in "
813 "progress.\n");
814 rval = -EAGAIN;
815 goto done_free_dma_rsp;
816 }
817
818 ql_dbg(ql_dbg_user, vha, 0x70c0,
819 "elreq.options=%04x\n", elreq.options);
820
821 if (elreq.options == EXTERNAL_LOOPBACK)
822 if (IS_QLA8031(ha) || IS_QLA8044(ha))
823 rval = qla81xx_set_loopback_mode(vha,
824 config, new_config, elreq.options);
825 else
826 rval = qla81xx_reset_loopback_mode(vha,
827 config, 1, 0);
828 else
829 rval = qla81xx_set_loopback_mode(vha, config,
830 new_config, elreq.options);
831
832 if (rval) {
833 rval = -EPERM;
834 goto done_free_dma_rsp;
835 }
836
837 type = "FC_BSG_HST_VENDOR_LOOPBACK";
838 ql_dbg(ql_dbg_user, vha, 0x7028,
839 "BSG request type: %s.\n", type);
840
841 command_sent = INT_DEF_LB_LOOPBACK_CMD;
842 rval = qla2x00_loopback_test(vha, &elreq, response);
843
844 if (response[0] == MBS_COMMAND_ERROR &&
845 response[1] == MBS_LB_RESET) {
846 ql_log(ql_log_warn, vha, 0x7029,
847 "MBX command error, Aborting ISP.\n");
848 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
849 qla2xxx_wake_dpc(vha);
850 qla2x00_wait_for_chip_reset(vha);
851 /* Also reset the MPI */
852 if (IS_QLA81XX(ha)) {
853 if (qla81xx_restart_mpi_firmware(vha) !=
854 QLA_SUCCESS) {
855 ql_log(ql_log_warn, vha, 0x702a,
856 "MPI reset failed.\n");
857 }
858 }
859
860 rval = -EIO;
861 goto done_free_dma_rsp;
862 }
863
864 if (new_config[0]) {
865 int ret;
866
867 /* Revert back to original port config
868 * Also clear internal loopback
869 */
870 ret = qla81xx_reset_loopback_mode(vha,
871 new_config, 0, 1);
872 if (ret) {
873 /*
874 * If the reset of the loopback mode
875 * doesn't work take FCoE dump and then
876 * reset the chip.
877 */
878 ha->isp_ops->fw_dump(vha, 0);
879 set_bit(ISP_ABORT_NEEDED,
880 &vha->dpc_flags);
881 }
882
883 }
884
885 } else {
886 type = "FC_BSG_HST_VENDOR_LOOPBACK";
887 ql_dbg(ql_dbg_user, vha, 0x702b,
888 "BSG request type: %s.\n", type);
889 command_sent = INT_DEF_LB_LOOPBACK_CMD;
890 rval = qla2x00_loopback_test(vha, &elreq, response);
891 }
892 }
893
894 if (rval) {
895 ql_log(ql_log_warn, vha, 0x702c,
896 "Vendor request %s failed.\n", type);
897
898 rval = 0;
899 bsg_job->reply->result = (DID_ERROR << 16);
900 bsg_job->reply->reply_payload_rcv_len = 0;
901 } else {
902 ql_dbg(ql_dbg_user, vha, 0x702d,
903 "Vendor request %s completed.\n", type);
904 bsg_job->reply->result = (DID_OK << 16);
905 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
906 bsg_job->reply_payload.sg_cnt, rsp_data,
907 rsp_data_len);
908 }
909
910 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
911 sizeof(response) + sizeof(uint8_t);
912 fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
913 sizeof(struct fc_bsg_reply);
914 memcpy(fw_sts_ptr, response, sizeof(response));
915 fw_sts_ptr += sizeof(response);
916 *fw_sts_ptr = command_sent;
917
918 done_free_dma_rsp:
919 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
920 rsp_data, rsp_data_dma);
921 done_free_dma_req:
922 dma_free_coherent(&ha->pdev->dev, req_data_len,
923 req_data, req_data_dma);
924 done_unmap_sg:
925 dma_unmap_sg(&ha->pdev->dev,
926 bsg_job->reply_payload.sg_list,
927 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
928 done_unmap_req_sg:
929 dma_unmap_sg(&ha->pdev->dev,
930 bsg_job->request_payload.sg_list,
931 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
932 if (!rval)
933 bsg_job->job_done(bsg_job);
934 return rval;
935 }
936
937 static int
938 qla84xx_reset(struct fc_bsg_job *bsg_job)
939 {
940 struct Scsi_Host *host = bsg_job->shost;
941 scsi_qla_host_t *vha = shost_priv(host);
942 struct qla_hw_data *ha = vha->hw;
943 int rval = 0;
944 uint32_t flag;
945
946 if (!IS_QLA84XX(ha)) {
947 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
948 return -EINVAL;
949 }
950
951 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
952
953 rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
954
955 if (rval) {
956 ql_log(ql_log_warn, vha, 0x7030,
957 "Vendor request 84xx reset failed.\n");
958 rval = (DID_ERROR << 16);
959
960 } else {
961 ql_dbg(ql_dbg_user, vha, 0x7031,
962 "Vendor request 84xx reset completed.\n");
963 bsg_job->reply->result = DID_OK;
964 bsg_job->job_done(bsg_job);
965 }
966
967 return rval;
968 }
969
970 static int
971 qla84xx_updatefw(struct fc_bsg_job *bsg_job)
972 {
973 struct Scsi_Host *host = bsg_job->shost;
974 scsi_qla_host_t *vha = shost_priv(host);
975 struct qla_hw_data *ha = vha->hw;
976 struct verify_chip_entry_84xx *mn = NULL;
977 dma_addr_t mn_dma, fw_dma;
978 void *fw_buf = NULL;
979 int rval = 0;
980 uint32_t sg_cnt;
981 uint32_t data_len;
982 uint16_t options;
983 uint32_t flag;
984 uint32_t fw_ver;
985
986 if (!IS_QLA84XX(ha)) {
987 ql_dbg(ql_dbg_user, vha, 0x7032,
988 "Not 84xx, exiting.\n");
989 return -EINVAL;
990 }
991
992 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
993 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
994 if (!sg_cnt) {
995 ql_log(ql_log_warn, vha, 0x7033,
996 "dma_map_sg returned %d for request.\n", sg_cnt);
997 return -ENOMEM;
998 }
999
1000 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1001 ql_log(ql_log_warn, vha, 0x7034,
1002 "DMA mapping resulted in different sg counts, "
1003 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1004 bsg_job->request_payload.sg_cnt, sg_cnt);
1005 rval = -EAGAIN;
1006 goto done_unmap_sg;
1007 }
1008
1009 data_len = bsg_job->request_payload.payload_len;
1010 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1011 &fw_dma, GFP_KERNEL);
1012 if (!fw_buf) {
1013 ql_log(ql_log_warn, vha, 0x7035,
1014 "DMA alloc failed for fw_buf.\n");
1015 rval = -ENOMEM;
1016 goto done_unmap_sg;
1017 }
1018
1019 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1020 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1021
1022 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1023 if (!mn) {
1024 ql_log(ql_log_warn, vha, 0x7036,
1025 "DMA alloc failed for fw buffer.\n");
1026 rval = -ENOMEM;
1027 goto done_free_fw_buf;
1028 }
1029
1030 flag = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1031 fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1032
1033 memset(mn, 0, sizeof(struct access_chip_84xx));
1034 mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1035 mn->entry_count = 1;
1036
1037 options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1038 if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1039 options |= VCO_DIAG_FW;
1040
1041 mn->options = cpu_to_le16(options);
1042 mn->fw_ver = cpu_to_le32(fw_ver);
1043 mn->fw_size = cpu_to_le32(data_len);
1044 mn->fw_seq_size = cpu_to_le32(data_len);
1045 mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1046 mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1047 mn->dseg_length = cpu_to_le32(data_len);
1048 mn->data_seg_cnt = cpu_to_le16(1);
1049
1050 rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1051
1052 if (rval) {
1053 ql_log(ql_log_warn, vha, 0x7037,
1054 "Vendor request 84xx updatefw failed.\n");
1055
1056 rval = (DID_ERROR << 16);
1057 } else {
1058 ql_dbg(ql_dbg_user, vha, 0x7038,
1059 "Vendor request 84xx updatefw completed.\n");
1060
1061 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1062 bsg_job->reply->result = DID_OK;
1063 }
1064
1065 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1066
1067 done_free_fw_buf:
1068 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1069
1070 done_unmap_sg:
1071 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1072 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1073
1074 if (!rval)
1075 bsg_job->job_done(bsg_job);
1076 return rval;
1077 }
1078
1079 static int
1080 qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
1081 {
1082 struct Scsi_Host *host = bsg_job->shost;
1083 scsi_qla_host_t *vha = shost_priv(host);
1084 struct qla_hw_data *ha = vha->hw;
1085 struct access_chip_84xx *mn = NULL;
1086 dma_addr_t mn_dma, mgmt_dma;
1087 void *mgmt_b = NULL;
1088 int rval = 0;
1089 struct qla_bsg_a84_mgmt *ql84_mgmt;
1090 uint32_t sg_cnt;
1091 uint32_t data_len = 0;
1092 uint32_t dma_direction = DMA_NONE;
1093
1094 if (!IS_QLA84XX(ha)) {
1095 ql_log(ql_log_warn, vha, 0x703a,
1096 "Not 84xx, exiting.\n");
1097 return -EINVAL;
1098 }
1099
1100 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1101 if (!mn) {
1102 ql_log(ql_log_warn, vha, 0x703c,
1103 "DMA alloc failed for fw buffer.\n");
1104 return -ENOMEM;
1105 }
1106
1107 memset(mn, 0, sizeof(struct access_chip_84xx));
1108 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1109 mn->entry_count = 1;
1110 ql84_mgmt = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
1111 switch (ql84_mgmt->mgmt.cmd) {
1112 case QLA84_MGMT_READ_MEM:
1113 case QLA84_MGMT_GET_INFO:
1114 sg_cnt = dma_map_sg(&ha->pdev->dev,
1115 bsg_job->reply_payload.sg_list,
1116 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1117 if (!sg_cnt) {
1118 ql_log(ql_log_warn, vha, 0x703d,
1119 "dma_map_sg returned %d for reply.\n", sg_cnt);
1120 rval = -ENOMEM;
1121 goto exit_mgmt;
1122 }
1123
1124 dma_direction = DMA_FROM_DEVICE;
1125
1126 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1127 ql_log(ql_log_warn, vha, 0x703e,
1128 "DMA mapping resulted in different sg counts, "
1129 "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1130 bsg_job->reply_payload.sg_cnt, sg_cnt);
1131 rval = -EAGAIN;
1132 goto done_unmap_sg;
1133 }
1134
1135 data_len = bsg_job->reply_payload.payload_len;
1136
1137 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1138 &mgmt_dma, GFP_KERNEL);
1139 if (!mgmt_b) {
1140 ql_log(ql_log_warn, vha, 0x703f,
1141 "DMA alloc failed for mgmt_b.\n");
1142 rval = -ENOMEM;
1143 goto done_unmap_sg;
1144 }
1145
1146 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1147 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1148 mn->parameter1 =
1149 cpu_to_le32(
1150 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1151
1152 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1153 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1154 mn->parameter1 =
1155 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1156
1157 mn->parameter2 =
1158 cpu_to_le32(
1159 ql84_mgmt->mgmt.mgmtp.u.info.context);
1160 }
1161 break;
1162
1163 case QLA84_MGMT_WRITE_MEM:
1164 sg_cnt = dma_map_sg(&ha->pdev->dev,
1165 bsg_job->request_payload.sg_list,
1166 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1167
1168 if (!sg_cnt) {
1169 ql_log(ql_log_warn, vha, 0x7040,
1170 "dma_map_sg returned %d.\n", sg_cnt);
1171 rval = -ENOMEM;
1172 goto exit_mgmt;
1173 }
1174
1175 dma_direction = DMA_TO_DEVICE;
1176
1177 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1178 ql_log(ql_log_warn, vha, 0x7041,
1179 "DMA mapping resulted in different sg counts, "
1180 "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1181 bsg_job->request_payload.sg_cnt, sg_cnt);
1182 rval = -EAGAIN;
1183 goto done_unmap_sg;
1184 }
1185
1186 data_len = bsg_job->request_payload.payload_len;
1187 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1188 &mgmt_dma, GFP_KERNEL);
1189 if (!mgmt_b) {
1190 ql_log(ql_log_warn, vha, 0x7042,
1191 "DMA alloc failed for mgmt_b.\n");
1192 rval = -ENOMEM;
1193 goto done_unmap_sg;
1194 }
1195
1196 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1197 bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1198
1199 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1200 mn->parameter1 =
1201 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1202 break;
1203
1204 case QLA84_MGMT_CHNG_CONFIG:
1205 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1206 mn->parameter1 =
1207 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1208
1209 mn->parameter2 =
1210 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1211
1212 mn->parameter3 =
1213 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1214 break;
1215
1216 default:
1217 rval = -EIO;
1218 goto exit_mgmt;
1219 }
1220
1221 if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1222 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1223 mn->dseg_count = cpu_to_le16(1);
1224 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1225 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1226 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1227 }
1228
1229 rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1230
1231 if (rval) {
1232 ql_log(ql_log_warn, vha, 0x7043,
1233 "Vendor request 84xx mgmt failed.\n");
1234
1235 rval = (DID_ERROR << 16);
1236
1237 } else {
1238 ql_dbg(ql_dbg_user, vha, 0x7044,
1239 "Vendor request 84xx mgmt completed.\n");
1240
1241 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1242 bsg_job->reply->result = DID_OK;
1243
1244 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1245 (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1246 bsg_job->reply->reply_payload_rcv_len =
1247 bsg_job->reply_payload.payload_len;
1248
1249 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1250 bsg_job->reply_payload.sg_cnt, mgmt_b,
1251 data_len);
1252 }
1253 }
1254
1255 done_unmap_sg:
1256 if (mgmt_b)
1257 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1258
1259 if (dma_direction == DMA_TO_DEVICE)
1260 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1261 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1262 else if (dma_direction == DMA_FROM_DEVICE)
1263 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1264 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1265
1266 exit_mgmt:
1267 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1268
1269 if (!rval)
1270 bsg_job->job_done(bsg_job);
1271 return rval;
1272 }
1273
1274 static int
1275 qla24xx_iidma(struct fc_bsg_job *bsg_job)
1276 {
1277 struct Scsi_Host *host = bsg_job->shost;
1278 scsi_qla_host_t *vha = shost_priv(host);
1279 int rval = 0;
1280 struct qla_port_param *port_param = NULL;
1281 fc_port_t *fcport = NULL;
1282 int found = 0;
1283 uint16_t mb[MAILBOX_REGISTER_COUNT];
1284 uint8_t *rsp_ptr = NULL;
1285
1286 if (!IS_IIDMA_CAPABLE(vha->hw)) {
1287 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1288 return -EINVAL;
1289 }
1290
1291 port_param = (void *)bsg_job->request + sizeof(struct fc_bsg_request);
1292 if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1293 ql_log(ql_log_warn, vha, 0x7048,
1294 "Invalid destination type.\n");
1295 return -EINVAL;
1296 }
1297
1298 list_for_each_entry(fcport, &vha->vp_fcports, list) {
1299 if (fcport->port_type != FCT_TARGET)
1300 continue;
1301
1302 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1303 fcport->port_name, sizeof(fcport->port_name)))
1304 continue;
1305
1306 found = 1;
1307 break;
1308 }
1309
1310 if (!found) {
1311 ql_log(ql_log_warn, vha, 0x7049,
1312 "Failed to find port.\n");
1313 return -EINVAL;
1314 }
1315
1316 if (atomic_read(&fcport->state) != FCS_ONLINE) {
1317 ql_log(ql_log_warn, vha, 0x704a,
1318 "Port is not online.\n");
1319 return -EINVAL;
1320 }
1321
1322 if (fcport->flags & FCF_LOGIN_NEEDED) {
1323 ql_log(ql_log_warn, vha, 0x704b,
1324 "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1325 return -EINVAL;
1326 }
1327
1328 if (port_param->mode)
1329 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1330 port_param->speed, mb);
1331 else
1332 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1333 &port_param->speed, mb);
1334
1335 if (rval) {
1336 ql_log(ql_log_warn, vha, 0x704c,
1337 "iIDMA cmd failed for %8phN -- "
1338 "%04x %x %04x %04x.\n", fcport->port_name,
1339 rval, fcport->fp_speed, mb[0], mb[1]);
1340 rval = (DID_ERROR << 16);
1341 } else {
1342 if (!port_param->mode) {
1343 bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1344 sizeof(struct qla_port_param);
1345
1346 rsp_ptr = ((uint8_t *)bsg_job->reply) +
1347 sizeof(struct fc_bsg_reply);
1348
1349 memcpy(rsp_ptr, port_param,
1350 sizeof(struct qla_port_param));
1351 }
1352
1353 bsg_job->reply->result = DID_OK;
1354 bsg_job->job_done(bsg_job);
1355 }
1356
1357 return rval;
1358 }
1359
1360 static int
1361 qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
1362 uint8_t is_update)
1363 {
1364 uint32_t start = 0;
1365 int valid = 0;
1366 struct qla_hw_data *ha = vha->hw;
1367
1368 if (unlikely(pci_channel_offline(ha->pdev)))
1369 return -EINVAL;
1370
1371 start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1372 if (start > ha->optrom_size) {
1373 ql_log(ql_log_warn, vha, 0x7055,
1374 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1375 return -EINVAL;
1376 }
1377
1378 if (ha->optrom_state != QLA_SWAITING) {
1379 ql_log(ql_log_info, vha, 0x7056,
1380 "optrom_state %d.\n", ha->optrom_state);
1381 return -EBUSY;
1382 }
1383
1384 ha->optrom_region_start = start;
1385 ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1386 if (is_update) {
1387 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1388 valid = 1;
1389 else if (start == (ha->flt_region_boot * 4) ||
1390 start == (ha->flt_region_fw * 4))
1391 valid = 1;
1392 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1393 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1394 valid = 1;
1395 if (!valid) {
1396 ql_log(ql_log_warn, vha, 0x7058,
1397 "Invalid start region 0x%x/0x%x.\n", start,
1398 bsg_job->request_payload.payload_len);
1399 return -EINVAL;
1400 }
1401
1402 ha->optrom_region_size = start +
1403 bsg_job->request_payload.payload_len > ha->optrom_size ?
1404 ha->optrom_size - start :
1405 bsg_job->request_payload.payload_len;
1406 ha->optrom_state = QLA_SWRITING;
1407 } else {
1408 ha->optrom_region_size = start +
1409 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1410 ha->optrom_size - start :
1411 bsg_job->reply_payload.payload_len;
1412 ha->optrom_state = QLA_SREADING;
1413 }
1414
1415 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
1416 if (!ha->optrom_buffer) {
1417 ql_log(ql_log_warn, vha, 0x7059,
1418 "Read: Unable to allocate memory for optrom retrieval "
1419 "(%x)\n", ha->optrom_region_size);
1420
1421 ha->optrom_state = QLA_SWAITING;
1422 return -ENOMEM;
1423 }
1424
1425 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
1426 return 0;
1427 }
1428
1429 static int
1430 qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
1431 {
1432 struct Scsi_Host *host = bsg_job->shost;
1433 scsi_qla_host_t *vha = shost_priv(host);
1434 struct qla_hw_data *ha = vha->hw;
1435 int rval = 0;
1436
1437 if (ha->flags.nic_core_reset_hdlr_active)
1438 return -EBUSY;
1439
1440 mutex_lock(&ha->optrom_mutex);
1441 rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1442 if (rval) {
1443 mutex_unlock(&ha->optrom_mutex);
1444 return rval;
1445 }
1446
1447 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1448 ha->optrom_region_start, ha->optrom_region_size);
1449
1450 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1451 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1452 ha->optrom_region_size);
1453
1454 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
1455 bsg_job->reply->result = DID_OK;
1456 vfree(ha->optrom_buffer);
1457 ha->optrom_buffer = NULL;
1458 ha->optrom_state = QLA_SWAITING;
1459 mutex_unlock(&ha->optrom_mutex);
1460 bsg_job->job_done(bsg_job);
1461 return rval;
1462 }
1463
1464 static int
1465 qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1466 {
1467 struct Scsi_Host *host = bsg_job->shost;
1468 scsi_qla_host_t *vha = shost_priv(host);
1469 struct qla_hw_data *ha = vha->hw;
1470 int rval = 0;
1471
1472 mutex_lock(&ha->optrom_mutex);
1473 rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1474 if (rval) {
1475 mutex_unlock(&ha->optrom_mutex);
1476 return rval;
1477 }
1478
1479 /* Set the isp82xx_no_md_cap not to capture minidump */
1480 ha->flags.isp82xx_no_md_cap = 1;
1481
1482 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1483 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1484 ha->optrom_region_size);
1485
1486 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1487 ha->optrom_region_start, ha->optrom_region_size);
1488
1489 bsg_job->reply->result = DID_OK;
1490 vfree(ha->optrom_buffer);
1491 ha->optrom_buffer = NULL;
1492 ha->optrom_state = QLA_SWAITING;
1493 mutex_unlock(&ha->optrom_mutex);
1494 bsg_job->job_done(bsg_job);
1495 return rval;
1496 }
1497
1498 static int
1499 qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job)
1500 {
1501 struct Scsi_Host *host = bsg_job->shost;
1502 scsi_qla_host_t *vha = shost_priv(host);
1503 struct qla_hw_data *ha = vha->hw;
1504 int rval = 0;
1505 uint8_t bsg[DMA_POOL_SIZE];
1506 struct qla_image_version_list *list = (void *)bsg;
1507 struct qla_image_version *image;
1508 uint32_t count;
1509 dma_addr_t sfp_dma;
1510 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1511 if (!sfp) {
1512 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1513 EXT_STATUS_NO_MEMORY;
1514 goto done;
1515 }
1516
1517 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1518 bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1519
1520 image = list->version;
1521 count = list->count;
1522 while (count--) {
1523 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1524 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1525 image->field_address.device, image->field_address.offset,
1526 sizeof(image->field_info), image->field_address.option);
1527 if (rval) {
1528 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1529 EXT_STATUS_MAILBOX;
1530 goto dealloc;
1531 }
1532 image++;
1533 }
1534
1535 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1536
1537 dealloc:
1538 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1539
1540 done:
1541 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1542 bsg_job->reply->result = DID_OK << 16;
1543 bsg_job->job_done(bsg_job);
1544
1545 return 0;
1546 }
1547
1548 static int
1549 qla2x00_read_fru_status(struct fc_bsg_job *bsg_job)
1550 {
1551 struct Scsi_Host *host = bsg_job->shost;
1552 scsi_qla_host_t *vha = shost_priv(host);
1553 struct qla_hw_data *ha = vha->hw;
1554 int rval = 0;
1555 uint8_t bsg[DMA_POOL_SIZE];
1556 struct qla_status_reg *sr = (void *)bsg;
1557 dma_addr_t sfp_dma;
1558 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1559 if (!sfp) {
1560 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1561 EXT_STATUS_NO_MEMORY;
1562 goto done;
1563 }
1564
1565 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1566 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1567
1568 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1569 sr->field_address.device, sr->field_address.offset,
1570 sizeof(sr->status_reg), sr->field_address.option);
1571 sr->status_reg = *sfp;
1572
1573 if (rval) {
1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1575 EXT_STATUS_MAILBOX;
1576 goto dealloc;
1577 }
1578
1579 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1580 bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1581
1582 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1583
1584 dealloc:
1585 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1586
1587 done:
1588 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1589 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr);
1590 bsg_job->reply->result = DID_OK << 16;
1591 bsg_job->job_done(bsg_job);
1592
1593 return 0;
1594 }
1595
1596 static int
1597 qla2x00_write_fru_status(struct fc_bsg_job *bsg_job)
1598 {
1599 struct Scsi_Host *host = bsg_job->shost;
1600 scsi_qla_host_t *vha = shost_priv(host);
1601 struct qla_hw_data *ha = vha->hw;
1602 int rval = 0;
1603 uint8_t bsg[DMA_POOL_SIZE];
1604 struct qla_status_reg *sr = (void *)bsg;
1605 dma_addr_t sfp_dma;
1606 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1607 if (!sfp) {
1608 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1609 EXT_STATUS_NO_MEMORY;
1610 goto done;
1611 }
1612
1613 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1614 bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1615
1616 *sfp = sr->status_reg;
1617 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1618 sr->field_address.device, sr->field_address.offset,
1619 sizeof(sr->status_reg), sr->field_address.option);
1620
1621 if (rval) {
1622 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1623 EXT_STATUS_MAILBOX;
1624 goto dealloc;
1625 }
1626
1627 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1628
1629 dealloc:
1630 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1631
1632 done:
1633 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1634 bsg_job->reply->result = DID_OK << 16;
1635 bsg_job->job_done(bsg_job);
1636
1637 return 0;
1638 }
1639
1640 static int
1641 qla2x00_write_i2c(struct fc_bsg_job *bsg_job)
1642 {
1643 struct Scsi_Host *host = bsg_job->shost;
1644 scsi_qla_host_t *vha = shost_priv(host);
1645 struct qla_hw_data *ha = vha->hw;
1646 int rval = 0;
1647 uint8_t bsg[DMA_POOL_SIZE];
1648 struct qla_i2c_access *i2c = (void *)bsg;
1649 dma_addr_t sfp_dma;
1650 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1651 if (!sfp) {
1652 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1653 EXT_STATUS_NO_MEMORY;
1654 goto done;
1655 }
1656
1657 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1658 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1659
1660 memcpy(sfp, i2c->buffer, i2c->length);
1661 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1662 i2c->device, i2c->offset, i2c->length, i2c->option);
1663
1664 if (rval) {
1665 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1666 EXT_STATUS_MAILBOX;
1667 goto dealloc;
1668 }
1669
1670 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1671
1672 dealloc:
1673 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1674
1675 done:
1676 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1677 bsg_job->reply->result = DID_OK << 16;
1678 bsg_job->job_done(bsg_job);
1679
1680 return 0;
1681 }
1682
1683 static int
1684 qla2x00_read_i2c(struct fc_bsg_job *bsg_job)
1685 {
1686 struct Scsi_Host *host = bsg_job->shost;
1687 scsi_qla_host_t *vha = shost_priv(host);
1688 struct qla_hw_data *ha = vha->hw;
1689 int rval = 0;
1690 uint8_t bsg[DMA_POOL_SIZE];
1691 struct qla_i2c_access *i2c = (void *)bsg;
1692 dma_addr_t sfp_dma;
1693 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1694 if (!sfp) {
1695 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1696 EXT_STATUS_NO_MEMORY;
1697 goto done;
1698 }
1699
1700 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1701 bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1702
1703 rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1704 i2c->device, i2c->offset, i2c->length, i2c->option);
1705
1706 if (rval) {
1707 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
1708 EXT_STATUS_MAILBOX;
1709 goto dealloc;
1710 }
1711
1712 memcpy(i2c->buffer, sfp, i2c->length);
1713 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1714 bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1715
1716 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1717
1718 dealloc:
1719 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1720
1721 done:
1722 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1723 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c);
1724 bsg_job->reply->result = DID_OK << 16;
1725 bsg_job->job_done(bsg_job);
1726
1727 return 0;
1728 }
1729
1730 static int
1731 qla24xx_process_bidir_cmd(struct fc_bsg_job *bsg_job)
1732 {
1733 struct Scsi_Host *host = bsg_job->shost;
1734 scsi_qla_host_t *vha = shost_priv(host);
1735 struct qla_hw_data *ha = vha->hw;
1736 uint16_t thread_id;
1737 uint32_t rval = EXT_STATUS_OK;
1738 uint16_t req_sg_cnt = 0;
1739 uint16_t rsp_sg_cnt = 0;
1740 uint16_t nextlid = 0;
1741 uint32_t tot_dsds;
1742 srb_t *sp = NULL;
1743 uint32_t req_data_len = 0;
1744 uint32_t rsp_data_len = 0;
1745
1746 /* Check the type of the adapter */
1747 if (!IS_BIDI_CAPABLE(ha)) {
1748 ql_log(ql_log_warn, vha, 0x70a0,
1749 "This adapter is not supported\n");
1750 rval = EXT_STATUS_NOT_SUPPORTED;
1751 goto done;
1752 }
1753
1754 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1755 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1756 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1757 rval = EXT_STATUS_BUSY;
1758 goto done;
1759 }
1760
1761 /* Check if host is online */
1762 if (!vha->flags.online) {
1763 ql_log(ql_log_warn, vha, 0x70a1,
1764 "Host is not online\n");
1765 rval = EXT_STATUS_DEVICE_OFFLINE;
1766 goto done;
1767 }
1768
1769 /* Check if cable is plugged in or not */
1770 if (vha->device_flags & DFLG_NO_CABLE) {
1771 ql_log(ql_log_warn, vha, 0x70a2,
1772 "Cable is unplugged...\n");
1773 rval = EXT_STATUS_INVALID_CFG;
1774 goto done;
1775 }
1776
1777 /* Check if the switch is connected or not */
1778 if (ha->current_topology != ISP_CFG_F) {
1779 ql_log(ql_log_warn, vha, 0x70a3,
1780 "Host is not connected to the switch\n");
1781 rval = EXT_STATUS_INVALID_CFG;
1782 goto done;
1783 }
1784
1785 /* Check if operating mode is P2P */
1786 if (ha->operating_mode != P2P) {
1787 ql_log(ql_log_warn, vha, 0x70a4,
1788 "Host is operating mode is not P2p\n");
1789 rval = EXT_STATUS_INVALID_CFG;
1790 goto done;
1791 }
1792
1793 thread_id = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1794
1795 mutex_lock(&ha->selflogin_lock);
1796 if (vha->self_login_loop_id == 0) {
1797 /* Initialize all required fields of fcport */
1798 vha->bidir_fcport.vha = vha;
1799 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1800 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1801 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1802 vha->bidir_fcport.loop_id = vha->loop_id;
1803
1804 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1805 ql_log(ql_log_warn, vha, 0x70a7,
1806 "Failed to login port %06X for bidirectional IOCB\n",
1807 vha->bidir_fcport.d_id.b24);
1808 mutex_unlock(&ha->selflogin_lock);
1809 rval = EXT_STATUS_MAILBOX;
1810 goto done;
1811 }
1812 vha->self_login_loop_id = nextlid - 1;
1813
1814 }
1815 /* Assign the self login loop id to fcport */
1816 mutex_unlock(&ha->selflogin_lock);
1817
1818 vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1819
1820 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1821 bsg_job->request_payload.sg_list,
1822 bsg_job->request_payload.sg_cnt,
1823 DMA_TO_DEVICE);
1824
1825 if (!req_sg_cnt) {
1826 rval = EXT_STATUS_NO_MEMORY;
1827 goto done;
1828 }
1829
1830 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1831 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1832 DMA_FROM_DEVICE);
1833
1834 if (!rsp_sg_cnt) {
1835 rval = EXT_STATUS_NO_MEMORY;
1836 goto done_unmap_req_sg;
1837 }
1838
1839 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1840 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1841 ql_dbg(ql_dbg_user, vha, 0x70a9,
1842 "Dma mapping resulted in different sg counts "
1843 "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1844 "%x dma_reply_sg_cnt: %x]\n",
1845 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1846 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1847 rval = EXT_STATUS_NO_MEMORY;
1848 goto done_unmap_sg;
1849 }
1850
1851 if (req_data_len != rsp_data_len) {
1852 rval = EXT_STATUS_BUSY;
1853 ql_log(ql_log_warn, vha, 0x70aa,
1854 "req_data_len != rsp_data_len\n");
1855 goto done_unmap_sg;
1856 }
1857
1858 req_data_len = bsg_job->request_payload.payload_len;
1859 rsp_data_len = bsg_job->reply_payload.payload_len;
1860
1861
1862 /* Alloc SRB structure */
1863 sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1864 if (!sp) {
1865 ql_dbg(ql_dbg_user, vha, 0x70ac,
1866 "Alloc SRB structure failed\n");
1867 rval = EXT_STATUS_NO_MEMORY;
1868 goto done_unmap_sg;
1869 }
1870
1871 /*Populate srb->ctx with bidir ctx*/
1872 sp->u.bsg_job = bsg_job;
1873 sp->free = qla2x00_bsg_sp_free;
1874 sp->type = SRB_BIDI_CMD;
1875 sp->done = qla2x00_bsg_job_done;
1876
1877 /* Add the read and write sg count */
1878 tot_dsds = rsp_sg_cnt + req_sg_cnt;
1879
1880 rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1881 if (rval != EXT_STATUS_OK)
1882 goto done_free_srb;
1883 /* the bsg request will be completed in the interrupt handler */
1884 return rval;
1885
1886 done_free_srb:
1887 mempool_free(sp, ha->srb_mempool);
1888 done_unmap_sg:
1889 dma_unmap_sg(&ha->pdev->dev,
1890 bsg_job->reply_payload.sg_list,
1891 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1892 done_unmap_req_sg:
1893 dma_unmap_sg(&ha->pdev->dev,
1894 bsg_job->request_payload.sg_list,
1895 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1896 done:
1897
1898 /* Return an error vendor specific response
1899 * and complete the bsg request
1900 */
1901 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1902 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1903 bsg_job->reply->reply_payload_rcv_len = 0;
1904 bsg_job->reply->result = (DID_OK) << 16;
1905 bsg_job->job_done(bsg_job);
1906 /* Always return success, vendor rsp carries correct status */
1907 return 0;
1908 }
1909
1910 static int
1911 qlafx00_mgmt_cmd(struct fc_bsg_job *bsg_job)
1912 {
1913 struct Scsi_Host *host = bsg_job->shost;
1914 scsi_qla_host_t *vha = shost_priv(host);
1915 struct qla_hw_data *ha = vha->hw;
1916 int rval = (DRIVER_ERROR << 16);
1917 struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1918 srb_t *sp;
1919 int req_sg_cnt = 0, rsp_sg_cnt = 0;
1920 struct fc_port *fcport;
1921 char *type = "FC_BSG_HST_FX_MGMT";
1922
1923 /* Copy the IOCB specific information */
1924 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1925 &bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
1926
1927 /* Dump the vendor information */
1928 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1929 (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1930
1931 if (!vha->flags.online) {
1932 ql_log(ql_log_warn, vha, 0x70d0,
1933 "Host is not online.\n");
1934 rval = -EIO;
1935 goto done;
1936 }
1937
1938 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1939 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1940 bsg_job->request_payload.sg_list,
1941 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1942 if (!req_sg_cnt) {
1943 ql_log(ql_log_warn, vha, 0x70c7,
1944 "dma_map_sg return %d for request\n", req_sg_cnt);
1945 rval = -ENOMEM;
1946 goto done;
1947 }
1948 }
1949
1950 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1951 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1952 bsg_job->reply_payload.sg_list,
1953 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1954 if (!rsp_sg_cnt) {
1955 ql_log(ql_log_warn, vha, 0x70c8,
1956 "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1957 rval = -ENOMEM;
1958 goto done_unmap_req_sg;
1959 }
1960 }
1961
1962 ql_dbg(ql_dbg_user, vha, 0x70c9,
1963 "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
1964 "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
1965 req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1966
1967 /* Allocate a dummy fcport structure, since functions preparing the
1968 * IOCB and mailbox command retrieves port specific information
1969 * from fcport structure. For Host based ELS commands there will be
1970 * no fcport structure allocated
1971 */
1972 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1973 if (!fcport) {
1974 ql_log(ql_log_warn, vha, 0x70ca,
1975 "Failed to allocate fcport.\n");
1976 rval = -ENOMEM;
1977 goto done_unmap_rsp_sg;
1978 }
1979
1980 /* Alloc SRB structure */
1981 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
1982 if (!sp) {
1983 ql_log(ql_log_warn, vha, 0x70cb,
1984 "qla2x00_get_sp failed.\n");
1985 rval = -ENOMEM;
1986 goto done_free_fcport;
1987 }
1988
1989 /* Initialize all required fields of fcport */
1990 fcport->vha = vha;
1991 fcport->loop_id = piocb_rqst->dataword;
1992
1993 sp->type = SRB_FXIOCB_BCMD;
1994 sp->name = "bsg_fx_mgmt";
1995 sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
1996 sp->u.bsg_job = bsg_job;
1997 sp->free = qla2x00_bsg_sp_free;
1998 sp->done = qla2x00_bsg_job_done;
1999
2000 ql_dbg(ql_dbg_user, vha, 0x70cc,
2001 "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2002 type, piocb_rqst->func_type, fcport->loop_id);
2003
2004 rval = qla2x00_start_sp(sp);
2005 if (rval != QLA_SUCCESS) {
2006 ql_log(ql_log_warn, vha, 0x70cd,
2007 "qla2x00_start_sp failed=%d.\n", rval);
2008 mempool_free(sp, ha->srb_mempool);
2009 rval = -EIO;
2010 goto done_free_fcport;
2011 }
2012 return rval;
2013
2014 done_free_fcport:
2015 kfree(fcport);
2016
2017 done_unmap_rsp_sg:
2018 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2019 dma_unmap_sg(&ha->pdev->dev,
2020 bsg_job->reply_payload.sg_list,
2021 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2022 done_unmap_req_sg:
2023 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2024 dma_unmap_sg(&ha->pdev->dev,
2025 bsg_job->request_payload.sg_list,
2026 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2027
2028 done:
2029 return rval;
2030 }
2031
2032 static int
2033 qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
2034 {
2035 struct Scsi_Host *host = bsg_job->shost;
2036 scsi_qla_host_t *vha = shost_priv(host);
2037 int rval = 0;
2038 struct qla_serdes_reg sr;
2039
2040 memset(&sr, 0, sizeof(sr));
2041
2042 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2043 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2044
2045 switch (sr.cmd) {
2046 case INT_SC_SERDES_WRITE_REG:
2047 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2048 bsg_job->reply->reply_payload_rcv_len = 0;
2049 break;
2050 case INT_SC_SERDES_READ_REG:
2051 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2052 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2053 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2054 bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
2055 break;
2056 default:
2057 ql_dbg(ql_dbg_user, vha, 0x708c,
2058 "Unknown serdes cmd %x.\n", sr.cmd);
2059 rval = -EINVAL;
2060 break;
2061 }
2062
2063 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
2064 rval ? EXT_STATUS_MAILBOX : 0;
2065
2066 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2067 bsg_job->reply->result = DID_OK << 16;
2068 bsg_job->job_done(bsg_job);
2069 return 0;
2070 }
2071
2072 static int
2073 qla8044_serdes_op(struct fc_bsg_job *bsg_job)
2074 {
2075 struct Scsi_Host *host = bsg_job->shost;
2076 scsi_qla_host_t *vha = shost_priv(host);
2077 int rval = 0;
2078 struct qla_serdes_reg_ex sr;
2079
2080 memset(&sr, 0, sizeof(sr));
2081
2082 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2083 bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2084
2085 switch (sr.cmd) {
2086 case INT_SC_SERDES_WRITE_REG:
2087 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2088 bsg_job->reply->reply_payload_rcv_len = 0;
2089 break;
2090 case INT_SC_SERDES_READ_REG:
2091 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2092 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2093 bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2094 bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
2095 break;
2096 default:
2097 ql_dbg(ql_dbg_user, vha, 0x70cf,
2098 "Unknown serdes cmd %x.\n", sr.cmd);
2099 rval = -EINVAL;
2100 break;
2101 }
2102
2103 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
2104 rval ? EXT_STATUS_MAILBOX : 0;
2105
2106 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2107 bsg_job->reply->result = DID_OK << 16;
2108 bsg_job->job_done(bsg_job);
2109 return 0;
2110 }
2111
2112 static int
2113 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2114 {
2115 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2116 case QL_VND_LOOPBACK:
2117 return qla2x00_process_loopback(bsg_job);
2118
2119 case QL_VND_A84_RESET:
2120 return qla84xx_reset(bsg_job);
2121
2122 case QL_VND_A84_UPDATE_FW:
2123 return qla84xx_updatefw(bsg_job);
2124
2125 case QL_VND_A84_MGMT_CMD:
2126 return qla84xx_mgmt_cmd(bsg_job);
2127
2128 case QL_VND_IIDMA:
2129 return qla24xx_iidma(bsg_job);
2130
2131 case QL_VND_FCP_PRIO_CFG_CMD:
2132 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2133
2134 case QL_VND_READ_FLASH:
2135 return qla2x00_read_optrom(bsg_job);
2136
2137 case QL_VND_UPDATE_FLASH:
2138 return qla2x00_update_optrom(bsg_job);
2139
2140 case QL_VND_SET_FRU_VERSION:
2141 return qla2x00_update_fru_versions(bsg_job);
2142
2143 case QL_VND_READ_FRU_STATUS:
2144 return qla2x00_read_fru_status(bsg_job);
2145
2146 case QL_VND_WRITE_FRU_STATUS:
2147 return qla2x00_write_fru_status(bsg_job);
2148
2149 case QL_VND_WRITE_I2C:
2150 return qla2x00_write_i2c(bsg_job);
2151
2152 case QL_VND_READ_I2C:
2153 return qla2x00_read_i2c(bsg_job);
2154
2155 case QL_VND_DIAG_IO_CMD:
2156 return qla24xx_process_bidir_cmd(bsg_job);
2157
2158 case QL_VND_FX00_MGMT_CMD:
2159 return qlafx00_mgmt_cmd(bsg_job);
2160
2161 case QL_VND_SERDES_OP:
2162 return qla26xx_serdes_op(bsg_job);
2163
2164 case QL_VND_SERDES_OP_EX:
2165 return qla8044_serdes_op(bsg_job);
2166
2167 default:
2168 return -ENOSYS;
2169 }
2170 }
2171
2172 int
2173 qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2174 {
2175 int ret = -EINVAL;
2176 struct fc_rport *rport;
2177 fc_port_t *fcport = NULL;
2178 struct Scsi_Host *host;
2179 scsi_qla_host_t *vha;
2180
2181 /* In case no data transferred. */
2182 bsg_job->reply->reply_payload_rcv_len = 0;
2183
2184 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
2185 rport = bsg_job->rport;
2186 fcport = *(fc_port_t **) rport->dd_data;
2187 host = rport_to_shost(rport);
2188 vha = shost_priv(host);
2189 } else {
2190 host = bsg_job->shost;
2191 vha = shost_priv(host);
2192 }
2193
2194 if (qla2x00_reset_active(vha)) {
2195 ql_dbg(ql_dbg_user, vha, 0x709f,
2196 "BSG: ISP abort active/needed -- cmd=%d.\n",
2197 bsg_job->request->msgcode);
2198 return -EBUSY;
2199 }
2200
2201 ql_dbg(ql_dbg_user, vha, 0x7000,
2202 "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
2203
2204 switch (bsg_job->request->msgcode) {
2205 case FC_BSG_RPT_ELS:
2206 case FC_BSG_HST_ELS_NOLOGIN:
2207 ret = qla2x00_process_els(bsg_job);
2208 break;
2209 case FC_BSG_HST_CT:
2210 ret = qla2x00_process_ct(bsg_job);
2211 break;
2212 case FC_BSG_HST_VENDOR:
2213 ret = qla2x00_process_vendor_specific(bsg_job);
2214 break;
2215 case FC_BSG_HST_ADD_RPORT:
2216 case FC_BSG_HST_DEL_RPORT:
2217 case FC_BSG_RPT_CT:
2218 default:
2219 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2220 break;
2221 }
2222 return ret;
2223 }
2224
2225 int
2226 qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2227 {
2228 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2229 struct qla_hw_data *ha = vha->hw;
2230 srb_t *sp;
2231 int cnt, que;
2232 unsigned long flags;
2233 struct req_que *req;
2234
2235 /* find the bsg job from the active list of commands */
2236 spin_lock_irqsave(&ha->hardware_lock, flags);
2237 for (que = 0; que < ha->max_req_queues; que++) {
2238 req = ha->req_q_map[que];
2239 if (!req)
2240 continue;
2241
2242 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2243 sp = req->outstanding_cmds[cnt];
2244 if (sp) {
2245 if (((sp->type == SRB_CT_CMD) ||
2246 (sp->type == SRB_ELS_CMD_HST) ||
2247 (sp->type == SRB_FXIOCB_BCMD))
2248 && (sp->u.bsg_job == bsg_job)) {
2249 req->outstanding_cmds[cnt] = NULL;
2250 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2251 if (ha->isp_ops->abort_command(sp)) {
2252 ql_log(ql_log_warn, vha, 0x7089,
2253 "mbx abort_command "
2254 "failed.\n");
2255 bsg_job->req->errors =
2256 bsg_job->reply->result = -EIO;
2257 } else {
2258 ql_dbg(ql_dbg_user, vha, 0x708a,
2259 "mbx abort_command "
2260 "success.\n");
2261 bsg_job->req->errors =
2262 bsg_job->reply->result = 0;
2263 }
2264 spin_lock_irqsave(&ha->hardware_lock, flags);
2265 goto done;
2266 }
2267 }
2268 }
2269 }
2270 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2271 ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2272 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2273 return 0;
2274
2275 done:
2276 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2277 sp->free(vha, sp);
2278 return 0;
2279 }
This page took 0.129205 seconds and 6 git commands to generate.