target: remove ->put_session method
[deliverable/linux.git] / drivers / scsi / qla2xxx / tcm_qla2xxx.c
1 /*******************************************************************************
2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs
4 *
5 * (c) Copyright 2010-2013 Datera, Inc.
6 *
7 * Author: Nicholas A. Bellinger <nab@daterainc.com>
8 *
9 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
10 * the TCM_FC / Open-FCoE.org fabric module.
11 *
12 * Copyright (c) 2010 Cisco Systems, Inc
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 ****************************************************************************/
24
25
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <generated/utsrelease.h>
29 #include <linux/utsname.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/slab.h>
33 #include <linux/kthread.h>
34 #include <linux/types.h>
35 #include <linux/string.h>
36 #include <linux/configfs.h>
37 #include <linux/ctype.h>
38 #include <asm/unaligned.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <target/target_core_base.h>
44 #include <target/target_core_fabric.h>
45 #include <target/target_core_fabric_configfs.h>
46 #include <target/configfs_macros.h>
47
48 #include "qla_def.h"
49 #include "qla_target.h"
50 #include "tcm_qla2xxx.h"
51
52 static struct workqueue_struct *tcm_qla2xxx_free_wq;
53 static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
54
55 static const struct target_core_fabric_ops tcm_qla2xxx_ops;
56 static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops;
57
58 /*
59 * Parse WWN.
60 * If strict, we require lower-case hex and colon separators to be sure
61 * the name is the same as what would be generated by ft_format_wwn()
62 * so the name and wwn are mapped one-to-one.
63 */
64 static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
65 {
66 const char *cp;
67 char c;
68 u32 nibble;
69 u32 byte = 0;
70 u32 pos = 0;
71 u32 err;
72
73 *wwn = 0;
74 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
75 c = *cp;
76 if (c == '\n' && cp[1] == '\0')
77 continue;
78 if (strict && pos++ == 2 && byte++ < 7) {
79 pos = 0;
80 if (c == ':')
81 continue;
82 err = 1;
83 goto fail;
84 }
85 if (c == '\0') {
86 err = 2;
87 if (strict && byte != 8)
88 goto fail;
89 return cp - name;
90 }
91 err = 3;
92 if (isdigit(c))
93 nibble = c - '0';
94 else if (isxdigit(c) && (islower(c) || !strict))
95 nibble = tolower(c) - 'a' + 10;
96 else
97 goto fail;
98 *wwn = (*wwn << 4) | nibble;
99 }
100 err = 4;
101 fail:
102 pr_debug("err %u len %zu pos %u byte %u\n",
103 err, cp - name, pos, byte);
104 return -1;
105 }
106
107 static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
108 {
109 u8 b[8];
110
111 put_unaligned_be64(wwn, b);
112 return snprintf(buf, len,
113 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
114 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
115 }
116
117 static char *tcm_qla2xxx_get_fabric_name(void)
118 {
119 return "qla2xxx";
120 }
121
122 /*
123 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
124 */
125 static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
126 {
127 unsigned int i, j;
128 u8 wwn[8];
129
130 memset(wwn, 0, sizeof(wwn));
131
132 /* Validate and store the new name */
133 for (i = 0, j = 0; i < 16; i++) {
134 int value;
135
136 value = hex_to_bin(*ns++);
137 if (value >= 0)
138 j = (j << 4) | value;
139 else
140 return -EINVAL;
141
142 if (i % 2) {
143 wwn[i/2] = j & 0xff;
144 j = 0;
145 }
146 }
147
148 *nm = wwn_to_u64(wwn);
149 return 0;
150 }
151
152 /*
153 * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
154 * store_fc_host_vport_create()
155 */
156 static int tcm_qla2xxx_npiv_parse_wwn(
157 const char *name,
158 size_t count,
159 u64 *wwpn,
160 u64 *wwnn)
161 {
162 unsigned int cnt = count;
163 int rc;
164
165 *wwpn = 0;
166 *wwnn = 0;
167
168 /* count may include a LF at end of string */
169 if (name[cnt-1] == '\n' || name[cnt-1] == 0)
170 cnt--;
171
172 /* validate we have enough characters for WWPN */
173 if ((cnt != (16+1+16)) || (name[16] != ':'))
174 return -EINVAL;
175
176 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
177 if (rc != 0)
178 return rc;
179
180 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
181 if (rc != 0)
182 return rc;
183
184 return 0;
185 }
186
187 static char *tcm_qla2xxx_npiv_get_fabric_name(void)
188 {
189 return "qla2xxx_npiv";
190 }
191
192 static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
193 {
194 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
195 struct tcm_qla2xxx_tpg, se_tpg);
196 struct tcm_qla2xxx_lport *lport = tpg->lport;
197
198 return lport->lport_naa_name;
199 }
200
201 static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
202 {
203 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
204 struct tcm_qla2xxx_tpg, se_tpg);
205 return tpg->lport_tpgt;
206 }
207
208 static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
209 {
210 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
211 struct tcm_qla2xxx_tpg, se_tpg);
212
213 return tpg->tpg_attrib.generate_node_acls;
214 }
215
216 static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
217 {
218 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
219 struct tcm_qla2xxx_tpg, se_tpg);
220
221 return tpg->tpg_attrib.cache_dynamic_acls;
222 }
223
224 static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
225 {
226 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
227 struct tcm_qla2xxx_tpg, se_tpg);
228
229 return tpg->tpg_attrib.demo_mode_write_protect;
230 }
231
232 static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
233 {
234 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
235 struct tcm_qla2xxx_tpg, se_tpg);
236
237 return tpg->tpg_attrib.prod_mode_write_protect;
238 }
239
240 static int tcm_qla2xxx_check_demo_mode_login_only(struct se_portal_group *se_tpg)
241 {
242 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
243 struct tcm_qla2xxx_tpg, se_tpg);
244
245 return tpg->tpg_attrib.demo_mode_login_only;
246 }
247
248 static int tcm_qla2xxx_check_prot_fabric_only(struct se_portal_group *se_tpg)
249 {
250 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
251 struct tcm_qla2xxx_tpg, se_tpg);
252
253 return tpg->tpg_attrib.fabric_prot_type;
254 }
255
256 static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
257 {
258 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
259 struct tcm_qla2xxx_tpg, se_tpg);
260
261 return tpg->lport_tpgt;
262 }
263
264 static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
265 {
266 struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
267 struct qla_tgt_mgmt_cmd, free_work);
268
269 transport_generic_free_cmd(&mcmd->se_cmd, 0);
270 }
271
272 /*
273 * Called from qla_target_template->free_mcmd(), and will call
274 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
275 * release callback. qla_hw_data->hardware_lock is expected to be held
276 */
277 static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
278 {
279 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
280 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
281 }
282
283 static void tcm_qla2xxx_complete_free(struct work_struct *work)
284 {
285 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
286
287 cmd->cmd_in_wq = 0;
288
289 WARN_ON(cmd->cmd_flags & BIT_16);
290
291 cmd->cmd_flags |= BIT_16;
292 transport_generic_free_cmd(&cmd->se_cmd, 0);
293 }
294
295 /*
296 * Called from qla_target_template->free_cmd(), and will call
297 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
298 * release callback. qla_hw_data->hardware_lock is expected to be held
299 */
300 static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
301 {
302 cmd->cmd_in_wq = 1;
303 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
304 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
305 }
306
307 /*
308 * Called from struct target_core_fabric_ops->check_stop_free() context
309 */
310 static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
311 {
312 struct qla_tgt_cmd *cmd;
313
314 if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
315 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
316 cmd->cmd_flags |= BIT_14;
317 }
318
319 return target_put_sess_cmd(se_cmd);
320 }
321
322 /* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
323 * fabric descriptor @se_cmd command to release
324 */
325 static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
326 {
327 struct qla_tgt_cmd *cmd;
328
329 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
330 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
331 struct qla_tgt_mgmt_cmd, se_cmd);
332 qlt_free_mcmd(mcmd);
333 return;
334 }
335
336 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
337 qlt_free_cmd(cmd);
338 }
339
340 static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
341 {
342 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
343 struct scsi_qla_host *vha;
344 unsigned long flags;
345
346 BUG_ON(!sess);
347 vha = sess->vha;
348
349 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
350 target_sess_cmd_list_set_waiting(se_sess);
351 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
352
353 return 1;
354 }
355
356 static void tcm_qla2xxx_close_session(struct se_session *se_sess)
357 {
358 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
359 struct scsi_qla_host *vha;
360 unsigned long flags;
361
362 BUG_ON(!sess);
363 vha = sess->vha;
364
365 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
366 qlt_unreg_sess(sess);
367 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
368 }
369
370 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
371 {
372 return 0;
373 }
374
375 static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
376 {
377 struct qla_tgt_cmd *cmd = container_of(se_cmd,
378 struct qla_tgt_cmd, se_cmd);
379
380 cmd->bufflen = se_cmd->data_length;
381 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
382
383 cmd->sg_cnt = se_cmd->t_data_nents;
384 cmd->sg = se_cmd->t_data_sg;
385
386 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
387 cmd->prot_sg = se_cmd->t_prot_sg;
388 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
389 se_cmd->pi_err = 0;
390
391 /*
392 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
393 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
394 */
395 return qlt_rdy_to_xfer(cmd);
396 }
397
398 static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
399 {
400 unsigned long flags;
401 /*
402 * Check for WRITE_PENDING status to determine if we need to wait for
403 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
404 */
405 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
406 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
407 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
408 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
409 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
410 3000);
411 return 0;
412 }
413 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
414
415 return 0;
416 }
417
418 static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
419 {
420 return;
421 }
422
423 static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
424 {
425 return 0;
426 }
427
428 /*
429 * Called from process context in qla_target.c:qlt_do_work() code
430 */
431 static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
432 unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
433 int data_dir, int bidi)
434 {
435 struct se_cmd *se_cmd = &cmd->se_cmd;
436 struct se_session *se_sess;
437 struct qla_tgt_sess *sess;
438 int flags = TARGET_SCF_ACK_KREF;
439
440 if (bidi)
441 flags |= TARGET_SCF_BIDI_OP;
442
443 sess = cmd->sess;
444 if (!sess) {
445 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
446 return -EINVAL;
447 }
448
449 se_sess = sess->se_sess;
450 if (!se_sess) {
451 pr_err("Unable to locate active struct se_session\n");
452 return -EINVAL;
453 }
454
455 return target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
456 cmd->unpacked_lun, data_length, fcp_task_attr,
457 data_dir, flags);
458 }
459
460 static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
461 {
462 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
463
464 /*
465 * Ensure that the complete FCP WRITE payload has been received.
466 * Otherwise return an exception via CHECK_CONDITION status.
467 */
468 cmd->cmd_in_wq = 0;
469 cmd->cmd_flags |= BIT_11;
470 if (!cmd->write_data_transferred) {
471 /*
472 * Check if se_cmd has already been aborted via LUN_RESET, and
473 * waiting upon completion in tcm_qla2xxx_write_pending_status()
474 */
475 if (cmd->se_cmd.transport_state & CMD_T_ABORTED) {
476 complete(&cmd->se_cmd.t_transport_stop_comp);
477 return;
478 }
479
480 if (cmd->se_cmd.pi_err)
481 transport_generic_request_failure(&cmd->se_cmd,
482 cmd->se_cmd.pi_err);
483 else
484 transport_generic_request_failure(&cmd->se_cmd,
485 TCM_CHECK_CONDITION_ABORT_CMD);
486
487 return;
488 }
489
490 return target_execute_cmd(&cmd->se_cmd);
491 }
492
493 /*
494 * Called from qla_target.c:qlt_do_ctio_completion()
495 */
496 static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
497 {
498 cmd->cmd_flags |= BIT_10;
499 cmd->cmd_in_wq = 1;
500 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
501 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
502 }
503
504 static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
505 {
506 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
507
508 /* take an extra kref to prevent cmd free too early.
509 * need to wait for SCSI status/check condition to
510 * finish responding generate by transport_generic_request_failure.
511 */
512 kref_get(&cmd->se_cmd.cmd_kref);
513 transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
514 }
515
516 /*
517 * Called from qla_target.c:qlt_do_ctio_completion()
518 */
519 static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
520 {
521 INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
522 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
523 }
524
525 /*
526 * Called from qla_target.c:qlt_issue_task_mgmt()
527 */
528 static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
529 uint8_t tmr_func, uint32_t tag)
530 {
531 struct qla_tgt_sess *sess = mcmd->sess;
532 struct se_cmd *se_cmd = &mcmd->se_cmd;
533
534 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
535 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
536 }
537
538 static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
539 {
540 struct qla_tgt_cmd *cmd = container_of(se_cmd,
541 struct qla_tgt_cmd, se_cmd);
542
543 cmd->cmd_flags |= BIT_4;
544 cmd->bufflen = se_cmd->data_length;
545 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
546 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
547
548 cmd->sg_cnt = se_cmd->t_data_nents;
549 cmd->sg = se_cmd->t_data_sg;
550 cmd->offset = 0;
551 cmd->cmd_flags |= BIT_3;
552
553 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
554 cmd->prot_sg = se_cmd->t_prot_sg;
555 cmd->blk_sz = se_cmd->se_dev->dev_attrib.block_size;
556 se_cmd->pi_err = 0;
557
558 /*
559 * Now queue completed DATA_IN the qla2xxx LLD and response ring
560 */
561 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
562 se_cmd->scsi_status);
563 }
564
565 static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
566 {
567 struct qla_tgt_cmd *cmd = container_of(se_cmd,
568 struct qla_tgt_cmd, se_cmd);
569 int xmit_type = QLA_TGT_XMIT_STATUS;
570
571 cmd->bufflen = se_cmd->data_length;
572 cmd->sg = NULL;
573 cmd->sg_cnt = 0;
574 cmd->offset = 0;
575 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
576 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
577 if (cmd->cmd_flags & BIT_5) {
578 pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
579 dump_stack();
580 }
581 cmd->cmd_flags |= BIT_5;
582
583 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
584 /*
585 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
586 * for qla_tgt_xmit_response LLD code
587 */
588 if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
589 se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
590 se_cmd->residual_count = 0;
591 }
592 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
593 se_cmd->residual_count += se_cmd->data_length;
594
595 cmd->bufflen = 0;
596 }
597 /*
598 * Now queue status response to qla2xxx LLD code and response ring
599 */
600 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
601 }
602
603 static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
604 {
605 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
606 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
607 struct qla_tgt_mgmt_cmd, se_cmd);
608
609 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
610 mcmd, se_tmr->function, se_tmr->response);
611 /*
612 * Do translation between TCM TM response codes and
613 * QLA2xxx FC TM response codes.
614 */
615 switch (se_tmr->response) {
616 case TMR_FUNCTION_COMPLETE:
617 mcmd->fc_tm_rsp = FC_TM_SUCCESS;
618 break;
619 case TMR_TASK_DOES_NOT_EXIST:
620 mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
621 break;
622 case TMR_FUNCTION_REJECTED:
623 mcmd->fc_tm_rsp = FC_TM_REJECT;
624 break;
625 case TMR_LUN_DOES_NOT_EXIST:
626 default:
627 mcmd->fc_tm_rsp = FC_TM_FAILED;
628 break;
629 }
630 /*
631 * Queue the TM response to QLA2xxx LLD to build a
632 * CTIO response packet.
633 */
634 qlt_xmit_tm_rsp(mcmd);
635 }
636
637 static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
638 {
639 struct qla_tgt_cmd *cmd = container_of(se_cmd,
640 struct qla_tgt_cmd, se_cmd);
641 struct scsi_qla_host *vha = cmd->vha;
642 struct qla_hw_data *ha = vha->hw;
643
644 if (!cmd->sg_mapped)
645 return;
646
647 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
648 cmd->sg_mapped = 0;
649 }
650
651 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
652 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
653 /*
654 * Expected to be called with struct qla_hw_data->hardware_lock held
655 */
656 static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
657 {
658 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
659 struct se_portal_group *se_tpg = se_nacl->se_tpg;
660 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
661 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
662 struct tcm_qla2xxx_lport, lport_wwn);
663 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
664 struct tcm_qla2xxx_nacl, se_node_acl);
665 void *node;
666
667 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
668
669 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
670 if (WARN_ON(node && (node != se_nacl))) {
671 /*
672 * The nacl no longer matches what we think it should be.
673 * Most likely a new dynamic acl has been added while
674 * someone dropped the hardware lock. It clearly is a
675 * bug elsewhere, but this bit can't make things worse.
676 */
677 btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
678 node, GFP_ATOMIC);
679 }
680
681 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
682 se_nacl, nacl->nport_wwnn, nacl->nport_id);
683 /*
684 * Now clear the se_nacl and session pointers from our HW lport lookup
685 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
686 *
687 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
688 * target_wait_for_sess_cmds() before the session waits for outstanding
689 * I/O to complete, to avoid a race between session shutdown execution
690 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
691 */
692 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
693 }
694
695 static void tcm_qla2xxx_release_session(struct kref *kref)
696 {
697 struct se_session *se_sess = container_of(kref,
698 struct se_session, sess_kref);
699
700 qlt_unreg_sess(se_sess->fabric_sess_ptr);
701 }
702
703 static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
704 {
705 if (!sess)
706 return;
707
708 assert_spin_locked(&sess->vha->hw->hardware_lock);
709 kref_put(&sess->se_sess->sess_kref, tcm_qla2xxx_release_session);
710 }
711
712 static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
713 {
714 assert_spin_locked(&sess->vha->hw->hardware_lock);
715 target_sess_cmd_list_set_waiting(sess->se_sess);
716 }
717
718 static int tcm_qla2xxx_init_nodeacl(struct se_node_acl *se_nacl,
719 const char *name)
720 {
721 struct tcm_qla2xxx_nacl *nacl =
722 container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
723 u64 wwnn;
724
725 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
726 return -EINVAL;
727
728 nacl->nport_wwnn = wwnn;
729 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
730
731 return 0;
732 }
733
734 /* Start items for tcm_qla2xxx_tpg_attrib_cit */
735
736 #define DEF_QLA_TPG_ATTRIB(name) \
737 \
738 static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
739 struct se_portal_group *se_tpg, \
740 char *page) \
741 { \
742 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
743 struct tcm_qla2xxx_tpg, se_tpg); \
744 \
745 return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
746 } \
747 \
748 static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
749 struct se_portal_group *se_tpg, \
750 const char *page, \
751 size_t count) \
752 { \
753 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
754 struct tcm_qla2xxx_tpg, se_tpg); \
755 unsigned long val; \
756 int ret; \
757 \
758 ret = kstrtoul(page, 0, &val); \
759 if (ret < 0) { \
760 pr_err("kstrtoul() failed with" \
761 " ret: %d\n", ret); \
762 return -EINVAL; \
763 } \
764 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
765 \
766 return (!ret) ? count : -EINVAL; \
767 }
768
769 #define DEF_QLA_TPG_ATTR_BOOL(_name) \
770 \
771 static int tcm_qla2xxx_set_attrib_##_name( \
772 struct tcm_qla2xxx_tpg *tpg, \
773 unsigned long val) \
774 { \
775 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
776 \
777 if ((val != 0) && (val != 1)) { \
778 pr_err("Illegal boolean value %lu\n", val); \
779 return -EINVAL; \
780 } \
781 \
782 a->_name = val; \
783 return 0; \
784 }
785
786 #define QLA_TPG_ATTR(_name, _mode) \
787 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
788
789 /*
790 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
791 */
792 DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
793 DEF_QLA_TPG_ATTRIB(generate_node_acls);
794 QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
795
796 /*
797 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
798 */
799 DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
800 DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
801 QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
802
803 /*
804 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
805 */
806 DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
807 DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
808 QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
809
810 /*
811 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
812 */
813 DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
814 DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
815 QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
816
817 /*
818 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_login_only
819 */
820 DEF_QLA_TPG_ATTR_BOOL(demo_mode_login_only);
821 DEF_QLA_TPG_ATTRIB(demo_mode_login_only);
822 QLA_TPG_ATTR(demo_mode_login_only, S_IRUGO | S_IWUSR);
823
824 static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
825 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
826 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
827 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
828 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
829 &tcm_qla2xxx_tpg_attrib_demo_mode_login_only.attr,
830 NULL,
831 };
832
833 /* End items for tcm_qla2xxx_tpg_attrib_cit */
834
835 static ssize_t tcm_qla2xxx_tpg_show_enable(
836 struct se_portal_group *se_tpg,
837 char *page)
838 {
839 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
840 struct tcm_qla2xxx_tpg, se_tpg);
841
842 return snprintf(page, PAGE_SIZE, "%d\n",
843 atomic_read(&tpg->lport_tpg_enabled));
844 }
845
846 static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
847 {
848 struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
849 struct tcm_qla2xxx_tpg, tpg_base_work);
850 struct se_portal_group *se_tpg = &base_tpg->se_tpg;
851 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
852
853 if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
854 atomic_set(&base_tpg->lport_tpg_enabled, 1);
855 qlt_enable_vha(base_vha);
856 }
857 complete(&base_tpg->tpg_base_comp);
858 }
859
860 static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
861 {
862 struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
863 struct tcm_qla2xxx_tpg, tpg_base_work);
864 struct se_portal_group *se_tpg = &base_tpg->se_tpg;
865 struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
866
867 if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
868 atomic_set(&base_tpg->lport_tpg_enabled, 0);
869 target_undepend_item(&se_tpg->tpg_group.cg_item);
870 }
871 complete(&base_tpg->tpg_base_comp);
872 }
873
874 static ssize_t tcm_qla2xxx_tpg_store_enable(
875 struct se_portal_group *se_tpg,
876 const char *page,
877 size_t count)
878 {
879 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
880 struct tcm_qla2xxx_tpg, se_tpg);
881 unsigned long op;
882 int rc;
883
884 rc = kstrtoul(page, 0, &op);
885 if (rc < 0) {
886 pr_err("kstrtoul() returned %d\n", rc);
887 return -EINVAL;
888 }
889 if ((op != 1) && (op != 0)) {
890 pr_err("Illegal value for tpg_enable: %lu\n", op);
891 return -EINVAL;
892 }
893 if (op) {
894 if (atomic_read(&tpg->lport_tpg_enabled))
895 return -EEXIST;
896
897 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
898 } else {
899 if (!atomic_read(&tpg->lport_tpg_enabled))
900 return count;
901
902 INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
903 }
904 init_completion(&tpg->tpg_base_comp);
905 schedule_work(&tpg->tpg_base_work);
906 wait_for_completion(&tpg->tpg_base_comp);
907
908 if (op) {
909 if (!atomic_read(&tpg->lport_tpg_enabled))
910 return -ENODEV;
911 } else {
912 if (atomic_read(&tpg->lport_tpg_enabled))
913 return -EPERM;
914 }
915 return count;
916 }
917
918 TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
919
920 static ssize_t tcm_qla2xxx_tpg_show_dynamic_sessions(
921 struct se_portal_group *se_tpg,
922 char *page)
923 {
924 return target_show_dynamic_sessions(se_tpg, page);
925 }
926
927 TF_TPG_BASE_ATTR_RO(tcm_qla2xxx, dynamic_sessions);
928
929 static ssize_t tcm_qla2xxx_tpg_store_fabric_prot_type(
930 struct se_portal_group *se_tpg,
931 const char *page,
932 size_t count)
933 {
934 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
935 struct tcm_qla2xxx_tpg, se_tpg);
936 unsigned long val;
937 int ret = kstrtoul(page, 0, &val);
938
939 if (ret) {
940 pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
941 return ret;
942 }
943 if (val != 0 && val != 1 && val != 3) {
944 pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
945 return -EINVAL;
946 }
947 tpg->tpg_attrib.fabric_prot_type = val;
948
949 return count;
950 }
951
952 static ssize_t tcm_qla2xxx_tpg_show_fabric_prot_type(
953 struct se_portal_group *se_tpg,
954 char *page)
955 {
956 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
957 struct tcm_qla2xxx_tpg, se_tpg);
958
959 return sprintf(page, "%d\n", tpg->tpg_attrib.fabric_prot_type);
960 }
961 TF_TPG_BASE_ATTR(tcm_qla2xxx, fabric_prot_type, S_IRUGO | S_IWUSR);
962
963 static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
964 &tcm_qla2xxx_tpg_enable.attr,
965 &tcm_qla2xxx_tpg_dynamic_sessions.attr,
966 &tcm_qla2xxx_tpg_fabric_prot_type.attr,
967 NULL,
968 };
969
970 static struct se_portal_group *tcm_qla2xxx_make_tpg(
971 struct se_wwn *wwn,
972 struct config_group *group,
973 const char *name)
974 {
975 struct tcm_qla2xxx_lport *lport = container_of(wwn,
976 struct tcm_qla2xxx_lport, lport_wwn);
977 struct tcm_qla2xxx_tpg *tpg;
978 unsigned long tpgt;
979 int ret;
980
981 if (strstr(name, "tpgt_") != name)
982 return ERR_PTR(-EINVAL);
983 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
984 return ERR_PTR(-EINVAL);
985
986 if ((tpgt != 1)) {
987 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
988 return ERR_PTR(-ENOSYS);
989 }
990
991 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
992 if (!tpg) {
993 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
994 return ERR_PTR(-ENOMEM);
995 }
996 tpg->lport = lport;
997 tpg->lport_tpgt = tpgt;
998 /*
999 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1000 * NodeACLs
1001 */
1002 tpg->tpg_attrib.generate_node_acls = 1;
1003 tpg->tpg_attrib.demo_mode_write_protect = 1;
1004 tpg->tpg_attrib.cache_dynamic_acls = 1;
1005 tpg->tpg_attrib.demo_mode_login_only = 1;
1006
1007 ret = core_tpg_register(&tcm_qla2xxx_ops, wwn, &tpg->se_tpg,
1008 SCSI_PROTOCOL_FCP);
1009 if (ret < 0) {
1010 kfree(tpg);
1011 return NULL;
1012 }
1013
1014 lport->tpg_1 = tpg;
1015
1016 return &tpg->se_tpg;
1017 }
1018
1019 static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1020 {
1021 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1022 struct tcm_qla2xxx_tpg, se_tpg);
1023 struct tcm_qla2xxx_lport *lport = tpg->lport;
1024 struct scsi_qla_host *vha = lport->qla_vha;
1025 /*
1026 * Call into qla2x_target.c LLD logic to shutdown the active
1027 * FC Nexuses and disable target mode operation for this qla_hw_data
1028 */
1029 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stop)
1030 qlt_stop_phase1(vha->vha_tgt.qla_tgt);
1031
1032 core_tpg_deregister(se_tpg);
1033 /*
1034 * Clear local TPG=1 pointer for non NPIV mode.
1035 */
1036 lport->tpg_1 = NULL;
1037 kfree(tpg);
1038 }
1039
1040 static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
1041 struct se_portal_group *se_tpg,
1042 char *page)
1043 {
1044 return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
1045 }
1046
1047 static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
1048 struct se_portal_group *se_tpg,
1049 const char *page,
1050 size_t count)
1051 {
1052 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
1053 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
1054 struct tcm_qla2xxx_lport, lport_wwn);
1055 struct scsi_qla_host *vha = lport->qla_vha;
1056 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1057 struct tcm_qla2xxx_tpg, se_tpg);
1058 unsigned long op;
1059 int rc;
1060
1061 rc = kstrtoul(page, 0, &op);
1062 if (rc < 0) {
1063 pr_err("kstrtoul() returned %d\n", rc);
1064 return -EINVAL;
1065 }
1066 if ((op != 1) && (op != 0)) {
1067 pr_err("Illegal value for tpg_enable: %lu\n", op);
1068 return -EINVAL;
1069 }
1070 if (op) {
1071 if (atomic_read(&tpg->lport_tpg_enabled))
1072 return -EEXIST;
1073
1074 atomic_set(&tpg->lport_tpg_enabled, 1);
1075 qlt_enable_vha(vha);
1076 } else {
1077 if (!atomic_read(&tpg->lport_tpg_enabled))
1078 return count;
1079
1080 atomic_set(&tpg->lport_tpg_enabled, 0);
1081 qlt_stop_phase1(vha->vha_tgt.qla_tgt);
1082 }
1083
1084 return count;
1085 }
1086
1087 TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
1088
1089 static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
1090 &tcm_qla2xxx_npiv_tpg_enable.attr,
1091 NULL,
1092 };
1093
1094 static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1095 struct se_wwn *wwn,
1096 struct config_group *group,
1097 const char *name)
1098 {
1099 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1100 struct tcm_qla2xxx_lport, lport_wwn);
1101 struct tcm_qla2xxx_tpg *tpg;
1102 unsigned long tpgt;
1103 int ret;
1104
1105 if (strstr(name, "tpgt_") != name)
1106 return ERR_PTR(-EINVAL);
1107 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1108 return ERR_PTR(-EINVAL);
1109
1110 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1111 if (!tpg) {
1112 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1113 return ERR_PTR(-ENOMEM);
1114 }
1115 tpg->lport = lport;
1116 tpg->lport_tpgt = tpgt;
1117
1118 /*
1119 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1120 * NodeACLs
1121 */
1122 tpg->tpg_attrib.generate_node_acls = 1;
1123 tpg->tpg_attrib.demo_mode_write_protect = 1;
1124 tpg->tpg_attrib.cache_dynamic_acls = 1;
1125 tpg->tpg_attrib.demo_mode_login_only = 1;
1126
1127 ret = core_tpg_register(&tcm_qla2xxx_npiv_ops, wwn, &tpg->se_tpg,
1128 SCSI_PROTOCOL_FCP);
1129 if (ret < 0) {
1130 kfree(tpg);
1131 return NULL;
1132 }
1133 lport->tpg_1 = tpg;
1134 return &tpg->se_tpg;
1135 }
1136
1137 /*
1138 * Expected to be called with struct qla_hw_data->hardware_lock held
1139 */
1140 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1141 scsi_qla_host_t *vha,
1142 const uint8_t *s_id)
1143 {
1144 struct tcm_qla2xxx_lport *lport;
1145 struct se_node_acl *se_nacl;
1146 struct tcm_qla2xxx_nacl *nacl;
1147 u32 key;
1148
1149 lport = vha->vha_tgt.target_lport_ptr;
1150 if (!lport) {
1151 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1152 dump_stack();
1153 return NULL;
1154 }
1155
1156 key = (((unsigned long)s_id[0] << 16) |
1157 ((unsigned long)s_id[1] << 8) |
1158 (unsigned long)s_id[2]);
1159 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1160
1161 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
1162 if (!se_nacl) {
1163 pr_debug("Unable to locate s_id: 0x%06x\n", key);
1164 return NULL;
1165 }
1166 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
1167 se_nacl, se_nacl->initiatorname);
1168
1169 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1170 if (!nacl->qla_tgt_sess) {
1171 pr_err("Unable to locate struct qla_tgt_sess\n");
1172 return NULL;
1173 }
1174
1175 return nacl->qla_tgt_sess;
1176 }
1177
1178 /*
1179 * Expected to be called with struct qla_hw_data->hardware_lock held
1180 */
1181 static void tcm_qla2xxx_set_sess_by_s_id(
1182 struct tcm_qla2xxx_lport *lport,
1183 struct se_node_acl *new_se_nacl,
1184 struct tcm_qla2xxx_nacl *nacl,
1185 struct se_session *se_sess,
1186 struct qla_tgt_sess *qla_tgt_sess,
1187 uint8_t *s_id)
1188 {
1189 u32 key;
1190 void *slot;
1191 int rc;
1192
1193 key = (((unsigned long)s_id[0] << 16) |
1194 ((unsigned long)s_id[1] << 8) |
1195 (unsigned long)s_id[2]);
1196 pr_debug("set_sess_by_s_id: %06x\n", key);
1197
1198 slot = btree_lookup32(&lport->lport_fcport_map, key);
1199 if (!slot) {
1200 if (new_se_nacl) {
1201 pr_debug("Setting up new fc_port entry to new_se_nacl\n");
1202 nacl->nport_id = key;
1203 rc = btree_insert32(&lport->lport_fcport_map, key,
1204 new_se_nacl, GFP_ATOMIC);
1205 if (rc)
1206 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
1207 (int)key);
1208 } else {
1209 pr_debug("Wiping nonexisting fc_port entry\n");
1210 }
1211
1212 qla_tgt_sess->se_sess = se_sess;
1213 nacl->qla_tgt_sess = qla_tgt_sess;
1214 return;
1215 }
1216
1217 if (nacl->qla_tgt_sess) {
1218 if (new_se_nacl == NULL) {
1219 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
1220 btree_remove32(&lport->lport_fcport_map, key);
1221 nacl->qla_tgt_sess = NULL;
1222 return;
1223 }
1224 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
1225 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1226 qla_tgt_sess->se_sess = se_sess;
1227 nacl->qla_tgt_sess = qla_tgt_sess;
1228 return;
1229 }
1230
1231 if (new_se_nacl == NULL) {
1232 pr_debug("Clearing existing fc_port entry\n");
1233 btree_remove32(&lport->lport_fcport_map, key);
1234 return;
1235 }
1236
1237 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
1238 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1239 qla_tgt_sess->se_sess = se_sess;
1240 nacl->qla_tgt_sess = qla_tgt_sess;
1241
1242 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
1243 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1244 }
1245
1246 /*
1247 * Expected to be called with struct qla_hw_data->hardware_lock held
1248 */
1249 static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1250 scsi_qla_host_t *vha,
1251 const uint16_t loop_id)
1252 {
1253 struct tcm_qla2xxx_lport *lport;
1254 struct se_node_acl *se_nacl;
1255 struct tcm_qla2xxx_nacl *nacl;
1256 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1257
1258 lport = vha->vha_tgt.target_lport_ptr;
1259 if (!lport) {
1260 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1261 dump_stack();
1262 return NULL;
1263 }
1264
1265 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1266
1267 fc_loopid = lport->lport_loopid_map + loop_id;
1268 se_nacl = fc_loopid->se_nacl;
1269 if (!se_nacl) {
1270 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
1271 loop_id);
1272 return NULL;
1273 }
1274
1275 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1276
1277 if (!nacl->qla_tgt_sess) {
1278 pr_err("Unable to locate struct qla_tgt_sess\n");
1279 return NULL;
1280 }
1281
1282 return nacl->qla_tgt_sess;
1283 }
1284
1285 /*
1286 * Expected to be called with struct qla_hw_data->hardware_lock held
1287 */
1288 static void tcm_qla2xxx_set_sess_by_loop_id(
1289 struct tcm_qla2xxx_lport *lport,
1290 struct se_node_acl *new_se_nacl,
1291 struct tcm_qla2xxx_nacl *nacl,
1292 struct se_session *se_sess,
1293 struct qla_tgt_sess *qla_tgt_sess,
1294 uint16_t loop_id)
1295 {
1296 struct se_node_acl *saved_nacl;
1297 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1298
1299 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1300
1301 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
1302 lport->lport_loopid_map)[loop_id];
1303
1304 saved_nacl = fc_loopid->se_nacl;
1305 if (!saved_nacl) {
1306 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
1307 fc_loopid->se_nacl = new_se_nacl;
1308 if (qla_tgt_sess->se_sess != se_sess)
1309 qla_tgt_sess->se_sess = se_sess;
1310 if (nacl->qla_tgt_sess != qla_tgt_sess)
1311 nacl->qla_tgt_sess = qla_tgt_sess;
1312 return;
1313 }
1314
1315 if (nacl->qla_tgt_sess) {
1316 if (new_se_nacl == NULL) {
1317 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1318 fc_loopid->se_nacl = NULL;
1319 nacl->qla_tgt_sess = NULL;
1320 return;
1321 }
1322
1323 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1324 fc_loopid->se_nacl = new_se_nacl;
1325 if (qla_tgt_sess->se_sess != se_sess)
1326 qla_tgt_sess->se_sess = se_sess;
1327 if (nacl->qla_tgt_sess != qla_tgt_sess)
1328 nacl->qla_tgt_sess = qla_tgt_sess;
1329 return;
1330 }
1331
1332 if (new_se_nacl == NULL) {
1333 pr_debug("Clearing fc_loopid->se_nacl\n");
1334 fc_loopid->se_nacl = NULL;
1335 return;
1336 }
1337
1338 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
1339 fc_loopid->se_nacl = new_se_nacl;
1340 if (qla_tgt_sess->se_sess != se_sess)
1341 qla_tgt_sess->se_sess = se_sess;
1342 if (nacl->qla_tgt_sess != qla_tgt_sess)
1343 nacl->qla_tgt_sess = qla_tgt_sess;
1344
1345 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
1346 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1347 }
1348
1349 /*
1350 * Should always be called with qla_hw_data->hardware_lock held.
1351 */
1352 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1353 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
1354 {
1355 struct se_session *se_sess = sess->se_sess;
1356 unsigned char be_sid[3];
1357
1358 be_sid[0] = sess->s_id.b.domain;
1359 be_sid[1] = sess->s_id.b.area;
1360 be_sid[2] = sess->s_id.b.al_pa;
1361
1362 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1363 sess, be_sid);
1364 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1365 sess, sess->loop_id);
1366 }
1367
1368 static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1369 {
1370 struct qla_tgt *tgt = sess->tgt;
1371 struct qla_hw_data *ha = tgt->ha;
1372 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1373 struct se_session *se_sess;
1374 struct se_node_acl *se_nacl;
1375 struct tcm_qla2xxx_lport *lport;
1376 struct tcm_qla2xxx_nacl *nacl;
1377
1378 BUG_ON(in_interrupt());
1379
1380 se_sess = sess->se_sess;
1381 if (!se_sess) {
1382 pr_err("struct qla_tgt_sess->se_sess is NULL\n");
1383 dump_stack();
1384 return;
1385 }
1386 se_nacl = se_sess->se_node_acl;
1387 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1388
1389 lport = vha->vha_tgt.target_lport_ptr;
1390 if (!lport) {
1391 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1392 dump_stack();
1393 return;
1394 }
1395 target_wait_for_sess_cmds(se_sess);
1396
1397 transport_deregister_session_configfs(sess->se_sess);
1398 transport_deregister_session(sess->se_sess);
1399 }
1400
1401 /*
1402 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1403 * to locate struct se_node_acl
1404 */
1405 static int tcm_qla2xxx_check_initiator_node_acl(
1406 scsi_qla_host_t *vha,
1407 unsigned char *fc_wwpn,
1408 void *qla_tgt_sess,
1409 uint8_t *s_id,
1410 uint16_t loop_id)
1411 {
1412 struct qla_hw_data *ha = vha->hw;
1413 struct tcm_qla2xxx_lport *lport;
1414 struct tcm_qla2xxx_tpg *tpg;
1415 struct tcm_qla2xxx_nacl *nacl;
1416 struct se_portal_group *se_tpg;
1417 struct se_node_acl *se_nacl;
1418 struct se_session *se_sess;
1419 struct qla_tgt_sess *sess = qla_tgt_sess;
1420 unsigned char port_name[36];
1421 unsigned long flags;
1422 int num_tags = (ha->fw_xcb_count) ? ha->fw_xcb_count :
1423 TCM_QLA2XXX_DEFAULT_TAGS;
1424
1425 lport = vha->vha_tgt.target_lport_ptr;
1426 if (!lport) {
1427 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1428 dump_stack();
1429 return -EINVAL;
1430 }
1431 /*
1432 * Locate the TPG=1 reference..
1433 */
1434 tpg = lport->tpg_1;
1435 if (!tpg) {
1436 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1437 return -EINVAL;
1438 }
1439 se_tpg = &tpg->se_tpg;
1440
1441 se_sess = transport_init_session_tags(num_tags,
1442 sizeof(struct qla_tgt_cmd),
1443 TARGET_PROT_ALL);
1444 if (IS_ERR(se_sess)) {
1445 pr_err("Unable to initialize struct se_session\n");
1446 return PTR_ERR(se_sess);
1447 }
1448 /*
1449 * Format the FCP Initiator port_name into colon seperated values to
1450 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1451 */
1452 memset(&port_name, 0, 36);
1453 snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
1454 /*
1455 * Locate our struct se_node_acl either from an explict NodeACL created
1456 * via ConfigFS, or via running in TPG demo mode.
1457 */
1458 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
1459 port_name);
1460 if (!se_sess->se_node_acl) {
1461 transport_free_session(se_sess);
1462 return -EINVAL;
1463 }
1464 se_nacl = se_sess->se_node_acl;
1465 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1466 /*
1467 * And now setup the new se_nacl and session pointers into our HW lport
1468 * mappings for fabric S_ID and LOOP_ID.
1469 */
1470 spin_lock_irqsave(&ha->hardware_lock, flags);
1471 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1472 qla_tgt_sess, s_id);
1473 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1474 qla_tgt_sess, loop_id);
1475 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1476 /*
1477 * Finally register the new FC Nexus with TCM
1478 */
1479 transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1480
1481 return 0;
1482 }
1483
1484 static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1485 uint16_t loop_id, bool conf_compl_supported)
1486 {
1487 struct qla_tgt *tgt = sess->tgt;
1488 struct qla_hw_data *ha = tgt->ha;
1489 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
1490 struct tcm_qla2xxx_lport *lport = vha->vha_tgt.target_lport_ptr;
1491 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
1492 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
1493 struct tcm_qla2xxx_nacl, se_node_acl);
1494 u32 key;
1495
1496
1497 if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
1498 pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
1499 sess, sess->port_name,
1500 sess->loop_id, loop_id, sess->s_id.b.domain,
1501 sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
1502 s_id.b.area, s_id.b.al_pa);
1503
1504 if (sess->loop_id != loop_id) {
1505 /*
1506 * Because we can shuffle loop IDs around and we
1507 * update different sessions non-atomically, we might
1508 * have overwritten this session's old loop ID
1509 * already, and we might end up overwriting some other
1510 * session that will be updated later. So we have to
1511 * be extra careful and we can't warn about those things...
1512 */
1513 if (lport->lport_loopid_map[sess->loop_id].se_nacl == se_nacl)
1514 lport->lport_loopid_map[sess->loop_id].se_nacl = NULL;
1515
1516 lport->lport_loopid_map[loop_id].se_nacl = se_nacl;
1517
1518 sess->loop_id = loop_id;
1519 }
1520
1521 if (sess->s_id.b24 != s_id.b24) {
1522 key = (((u32) sess->s_id.b.domain << 16) |
1523 ((u32) sess->s_id.b.area << 8) |
1524 ((u32) sess->s_id.b.al_pa));
1525
1526 if (btree_lookup32(&lport->lport_fcport_map, key))
1527 WARN(btree_remove32(&lport->lport_fcport_map, key) != se_nacl,
1528 "Found wrong se_nacl when updating s_id %x:%x:%x\n",
1529 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
1530 else
1531 WARN(1, "No lport_fcport_map entry for s_id %x:%x:%x\n",
1532 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa);
1533
1534 key = (((u32) s_id.b.domain << 16) |
1535 ((u32) s_id.b.area << 8) |
1536 ((u32) s_id.b.al_pa));
1537
1538 if (btree_lookup32(&lport->lport_fcport_map, key)) {
1539 WARN(1, "Already have lport_fcport_map entry for s_id %x:%x:%x\n",
1540 s_id.b.domain, s_id.b.area, s_id.b.al_pa);
1541 btree_update32(&lport->lport_fcport_map, key, se_nacl);
1542 } else {
1543 btree_insert32(&lport->lport_fcport_map, key, se_nacl, GFP_ATOMIC);
1544 }
1545
1546 sess->s_id = s_id;
1547 nacl->nport_id = key;
1548 }
1549
1550 sess->conf_compl_supported = conf_compl_supported;
1551 }
1552
1553 /*
1554 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1555 */
1556 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1557 .handle_cmd = tcm_qla2xxx_handle_cmd,
1558 .handle_data = tcm_qla2xxx_handle_data,
1559 .handle_dif_err = tcm_qla2xxx_handle_dif_err,
1560 .handle_tmr = tcm_qla2xxx_handle_tmr,
1561 .free_cmd = tcm_qla2xxx_free_cmd,
1562 .free_mcmd = tcm_qla2xxx_free_mcmd,
1563 .free_session = tcm_qla2xxx_free_session,
1564 .update_sess = tcm_qla2xxx_update_sess,
1565 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1566 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1567 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
1568 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1569 .put_sess = tcm_qla2xxx_put_sess,
1570 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1571 };
1572
1573 static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1574 {
1575 int rc;
1576
1577 rc = btree_init32(&lport->lport_fcport_map);
1578 if (rc) {
1579 pr_err("Unable to initialize lport->lport_fcport_map btree\n");
1580 return rc;
1581 }
1582
1583 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
1584 65536);
1585 if (!lport->lport_loopid_map) {
1586 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1587 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1588 btree_destroy32(&lport->lport_fcport_map);
1589 return -ENOMEM;
1590 }
1591 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1592 * 65536);
1593 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1594 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1595 return 0;
1596 }
1597
1598 static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha,
1599 void *target_lport_ptr,
1600 u64 npiv_wwpn, u64 npiv_wwnn)
1601 {
1602 struct qla_hw_data *ha = vha->hw;
1603 struct tcm_qla2xxx_lport *lport =
1604 (struct tcm_qla2xxx_lport *)target_lport_ptr;
1605 /*
1606 * Setup tgt_ops, local pointer to vha and target_lport_ptr
1607 */
1608 ha->tgt.tgt_ops = &tcm_qla2xxx_template;
1609 vha->vha_tgt.target_lport_ptr = target_lport_ptr;
1610 lport->qla_vha = vha;
1611
1612 return 0;
1613 }
1614
1615 static struct se_wwn *tcm_qla2xxx_make_lport(
1616 struct target_fabric_configfs *tf,
1617 struct config_group *group,
1618 const char *name)
1619 {
1620 struct tcm_qla2xxx_lport *lport;
1621 u64 wwpn;
1622 int ret = -ENODEV;
1623
1624 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
1625 return ERR_PTR(-EINVAL);
1626
1627 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1628 if (!lport) {
1629 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
1630 return ERR_PTR(-ENOMEM);
1631 }
1632 lport->lport_wwpn = wwpn;
1633 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1634 wwpn);
1635 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) wwpn);
1636
1637 ret = tcm_qla2xxx_init_lport(lport);
1638 if (ret != 0)
1639 goto out;
1640
1641 ret = qlt_lport_register(lport, wwpn, 0, 0,
1642 tcm_qla2xxx_lport_register_cb);
1643 if (ret != 0)
1644 goto out_lport;
1645
1646 return &lport->lport_wwn;
1647 out_lport:
1648 vfree(lport->lport_loopid_map);
1649 btree_destroy32(&lport->lport_fcport_map);
1650 out:
1651 kfree(lport);
1652 return ERR_PTR(ret);
1653 }
1654
1655 static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1656 {
1657 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1658 struct tcm_qla2xxx_lport, lport_wwn);
1659 struct scsi_qla_host *vha = lport->qla_vha;
1660 struct se_node_acl *node;
1661 u32 key = 0;
1662
1663 /*
1664 * Call into qla2x_target.c LLD logic to complete the
1665 * shutdown of struct qla_tgt after the call to
1666 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1667 */
1668 if (vha->vha_tgt.qla_tgt && !vha->vha_tgt.qla_tgt->tgt_stopped)
1669 qlt_stop_phase2(vha->vha_tgt.qla_tgt);
1670
1671 qlt_lport_deregister(vha);
1672
1673 vfree(lport->lport_loopid_map);
1674 btree_for_each_safe32(&lport->lport_fcport_map, key, node)
1675 btree_remove32(&lport->lport_fcport_map, key);
1676 btree_destroy32(&lport->lport_fcport_map);
1677 kfree(lport);
1678 }
1679
1680 static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
1681 void *target_lport_ptr,
1682 u64 npiv_wwpn, u64 npiv_wwnn)
1683 {
1684 struct fc_vport *vport;
1685 struct Scsi_Host *sh = base_vha->host;
1686 struct scsi_qla_host *npiv_vha;
1687 struct tcm_qla2xxx_lport *lport =
1688 (struct tcm_qla2xxx_lport *)target_lport_ptr;
1689 struct tcm_qla2xxx_lport *base_lport =
1690 (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
1691 struct tcm_qla2xxx_tpg *base_tpg;
1692 struct fc_vport_identifiers vport_id;
1693
1694 if (!qla_tgt_mode_enabled(base_vha)) {
1695 pr_err("qla2xxx base_vha not enabled for target mode\n");
1696 return -EPERM;
1697 }
1698
1699 if (!base_lport || !base_lport->tpg_1 ||
1700 !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
1701 pr_err("qla2xxx base_lport or tpg_1 not available\n");
1702 return -EPERM;
1703 }
1704 base_tpg = base_lport->tpg_1;
1705
1706 memset(&vport_id, 0, sizeof(vport_id));
1707 vport_id.port_name = npiv_wwpn;
1708 vport_id.node_name = npiv_wwnn;
1709 vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
1710 vport_id.vport_type = FC_PORTTYPE_NPIV;
1711 vport_id.disable = false;
1712
1713 vport = fc_vport_create(sh, 0, &vport_id);
1714 if (!vport) {
1715 pr_err("fc_vport_create failed for qla2xxx_npiv\n");
1716 return -ENODEV;
1717 }
1718 /*
1719 * Setup local pointer to NPIV vhba + target_lport_ptr
1720 */
1721 npiv_vha = (struct scsi_qla_host *)vport->dd_data;
1722 npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
1723 lport->qla_vha = npiv_vha;
1724 scsi_host_get(npiv_vha->host);
1725 return 0;
1726 }
1727
1728
1729 static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1730 struct target_fabric_configfs *tf,
1731 struct config_group *group,
1732 const char *name)
1733 {
1734 struct tcm_qla2xxx_lport *lport;
1735 u64 phys_wwpn, npiv_wwpn, npiv_wwnn;
1736 char *p, tmp[128];
1737 int ret;
1738
1739 snprintf(tmp, 128, "%s", name);
1740
1741 p = strchr(tmp, '@');
1742 if (!p) {
1743 pr_err("Unable to locate NPIV '@' seperator\n");
1744 return ERR_PTR(-EINVAL);
1745 }
1746 *p++ = '\0';
1747
1748 if (tcm_qla2xxx_parse_wwn(tmp, &phys_wwpn, 1) < 0)
1749 return ERR_PTR(-EINVAL);
1750
1751 if (tcm_qla2xxx_npiv_parse_wwn(p, strlen(p)+1,
1752 &npiv_wwpn, &npiv_wwnn) < 0)
1753 return ERR_PTR(-EINVAL);
1754
1755 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1756 if (!lport) {
1757 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
1758 return ERR_PTR(-ENOMEM);
1759 }
1760 lport->lport_npiv_wwpn = npiv_wwpn;
1761 lport->lport_npiv_wwnn = npiv_wwnn;
1762 sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
1763
1764 ret = tcm_qla2xxx_init_lport(lport);
1765 if (ret != 0)
1766 goto out;
1767
1768 ret = qlt_lport_register(lport, phys_wwpn, npiv_wwpn, npiv_wwnn,
1769 tcm_qla2xxx_lport_register_npiv_cb);
1770 if (ret != 0)
1771 goto out_lport;
1772
1773 return &lport->lport_wwn;
1774 out_lport:
1775 vfree(lport->lport_loopid_map);
1776 btree_destroy32(&lport->lport_fcport_map);
1777 out:
1778 kfree(lport);
1779 return ERR_PTR(ret);
1780 }
1781
1782 static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1783 {
1784 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1785 struct tcm_qla2xxx_lport, lport_wwn);
1786 struct scsi_qla_host *npiv_vha = lport->qla_vha;
1787 struct qla_hw_data *ha = npiv_vha->hw;
1788 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
1789
1790 scsi_host_put(npiv_vha->host);
1791 /*
1792 * Notify libfc that we want to release the vha->fc_vport
1793 */
1794 fc_vport_terminate(npiv_vha->fc_vport);
1795 scsi_host_put(base_vha->host);
1796 kfree(lport);
1797 }
1798
1799
1800 static ssize_t tcm_qla2xxx_wwn_show_attr_version(
1801 struct target_fabric_configfs *tf,
1802 char *page)
1803 {
1804 return sprintf(page,
1805 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1806 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1807 utsname()->machine);
1808 }
1809
1810 TF_WWN_ATTR_RO(tcm_qla2xxx, version);
1811
1812 static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1813 &tcm_qla2xxx_wwn_version.attr,
1814 NULL,
1815 };
1816
1817 static const struct target_core_fabric_ops tcm_qla2xxx_ops = {
1818 .module = THIS_MODULE,
1819 .name = "qla2xxx",
1820 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
1821 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1822 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1823 .tpg_get_tag = tcm_qla2xxx_get_tag,
1824 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1825 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1826 .tpg_check_demo_mode_write_protect =
1827 tcm_qla2xxx_check_demo_write_protect,
1828 .tpg_check_prod_mode_write_protect =
1829 tcm_qla2xxx_check_prod_write_protect,
1830 .tpg_check_prot_fabric_only = tcm_qla2xxx_check_prot_fabric_only,
1831 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
1832 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1833 .check_stop_free = tcm_qla2xxx_check_stop_free,
1834 .release_cmd = tcm_qla2xxx_release_cmd,
1835 .shutdown_session = tcm_qla2xxx_shutdown_session,
1836 .close_session = tcm_qla2xxx_close_session,
1837 .sess_get_index = tcm_qla2xxx_sess_get_index,
1838 .sess_get_initiator_sid = NULL,
1839 .write_pending = tcm_qla2xxx_write_pending,
1840 .write_pending_status = tcm_qla2xxx_write_pending_status,
1841 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1842 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1843 .queue_data_in = tcm_qla2xxx_queue_data_in,
1844 .queue_status = tcm_qla2xxx_queue_status,
1845 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1846 .aborted_task = tcm_qla2xxx_aborted_task,
1847 /*
1848 * Setup function pointers for generic logic in
1849 * target_core_fabric_configfs.c
1850 */
1851 .fabric_make_wwn = tcm_qla2xxx_make_lport,
1852 .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
1853 .fabric_make_tpg = tcm_qla2xxx_make_tpg,
1854 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1855 .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
1856
1857 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
1858 .tfc_tpg_base_attrs = tcm_qla2xxx_tpg_attrs,
1859 .tfc_tpg_attrib_attrs = tcm_qla2xxx_tpg_attrib_attrs,
1860 };
1861
1862 static const struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1863 .module = THIS_MODULE,
1864 .name = "qla2xxx_npiv",
1865 .node_acl_size = sizeof(struct tcm_qla2xxx_nacl),
1866 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1867 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1868 .tpg_get_tag = tcm_qla2xxx_get_tag,
1869 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1870 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1871 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_demo_mode,
1872 .tpg_check_prod_mode_write_protect =
1873 tcm_qla2xxx_check_prod_write_protect,
1874 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_demo_mode_login_only,
1875 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1876 .check_stop_free = tcm_qla2xxx_check_stop_free,
1877 .release_cmd = tcm_qla2xxx_release_cmd,
1878 .shutdown_session = tcm_qla2xxx_shutdown_session,
1879 .close_session = tcm_qla2xxx_close_session,
1880 .sess_get_index = tcm_qla2xxx_sess_get_index,
1881 .sess_get_initiator_sid = NULL,
1882 .write_pending = tcm_qla2xxx_write_pending,
1883 .write_pending_status = tcm_qla2xxx_write_pending_status,
1884 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1885 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1886 .queue_data_in = tcm_qla2xxx_queue_data_in,
1887 .queue_status = tcm_qla2xxx_queue_status,
1888 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1889 .aborted_task = tcm_qla2xxx_aborted_task,
1890 /*
1891 * Setup function pointers for generic logic in
1892 * target_core_fabric_configfs.c
1893 */
1894 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
1895 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
1896 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
1897 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1898 .fabric_init_nodeacl = tcm_qla2xxx_init_nodeacl,
1899
1900 .tfc_wwn_attrs = tcm_qla2xxx_wwn_attrs,
1901 .tfc_tpg_base_attrs = tcm_qla2xxx_npiv_tpg_attrs,
1902 };
1903
1904 static int tcm_qla2xxx_register_configfs(void)
1905 {
1906 int ret;
1907
1908 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1909 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1910 utsname()->machine);
1911
1912 ret = target_register_template(&tcm_qla2xxx_ops);
1913 if (ret)
1914 return ret;
1915
1916 ret = target_register_template(&tcm_qla2xxx_npiv_ops);
1917 if (ret)
1918 goto out_fabric;
1919
1920 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
1921 WQ_MEM_RECLAIM, 0);
1922 if (!tcm_qla2xxx_free_wq) {
1923 ret = -ENOMEM;
1924 goto out_fabric_npiv;
1925 }
1926
1927 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
1928 if (!tcm_qla2xxx_cmd_wq) {
1929 ret = -ENOMEM;
1930 goto out_free_wq;
1931 }
1932
1933 return 0;
1934
1935 out_free_wq:
1936 destroy_workqueue(tcm_qla2xxx_free_wq);
1937 out_fabric_npiv:
1938 target_unregister_template(&tcm_qla2xxx_npiv_ops);
1939 out_fabric:
1940 target_unregister_template(&tcm_qla2xxx_ops);
1941 return ret;
1942 }
1943
1944 static void tcm_qla2xxx_deregister_configfs(void)
1945 {
1946 destroy_workqueue(tcm_qla2xxx_cmd_wq);
1947 destroy_workqueue(tcm_qla2xxx_free_wq);
1948
1949 target_unregister_template(&tcm_qla2xxx_ops);
1950 target_unregister_template(&tcm_qla2xxx_npiv_ops);
1951 }
1952
1953 static int __init tcm_qla2xxx_init(void)
1954 {
1955 int ret;
1956
1957 ret = tcm_qla2xxx_register_configfs();
1958 if (ret < 0)
1959 return ret;
1960
1961 return 0;
1962 }
1963
1964 static void __exit tcm_qla2xxx_exit(void)
1965 {
1966 tcm_qla2xxx_deregister_configfs();
1967 }
1968
1969 MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
1970 MODULE_LICENSE("GPL");
1971 module_init(tcm_qla2xxx_init);
1972 module_exit(tcm_qla2xxx_exit);
This page took 0.111652 seconds and 5 git commands to generate.