1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
38 #include <linux/export.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_device.h>
44 #include <target/target_core_base.h>
45 #include <target/target_core_backend.h>
46 #include <target/target_core_fabric.h>
48 #include "target_core_internal.h"
49 #include "target_core_alua.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
53 static void se_dev_start(struct se_device
*dev
);
54 static void se_dev_stop(struct se_device
*dev
);
56 static struct se_hba
*lun0_hba
;
57 static struct se_subsystem_dev
*lun0_su_dev
;
58 /* not static, needed by tpg.c */
59 struct se_device
*g_lun0_dev
;
61 int transport_lookup_cmd_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
63 struct se_lun
*se_lun
= NULL
;
64 struct se_session
*se_sess
= se_cmd
->se_sess
;
65 struct se_device
*dev
;
68 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
) {
69 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
70 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
74 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
75 se_cmd
->se_deve
= &se_sess
->se_node_acl
->device_list
[unpacked_lun
];
76 if (se_cmd
->se_deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
77 struct se_dev_entry
*deve
= se_cmd
->se_deve
;
80 deve
->total_bytes
+= se_cmd
->data_length
;
82 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
83 (deve
->lun_flags
& TRANSPORT_LUNFLAGS_READ_ONLY
)) {
84 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
85 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
86 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
87 " Access for 0x%08x\n",
88 se_cmd
->se_tfo
->get_fabric_name(),
90 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
94 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
95 deve
->write_bytes
+= se_cmd
->data_length
;
96 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
97 deve
->read_bytes
+= se_cmd
->data_length
;
101 se_lun
= deve
->se_lun
;
102 se_cmd
->se_lun
= deve
->se_lun
;
103 se_cmd
->pr_res_key
= deve
->pr_res_key
;
104 se_cmd
->orig_fe_lun
= unpacked_lun
;
105 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
107 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
111 * Use the se_portal_group->tpg_virt_lun0 to allow for
112 * REPORT_LUNS, et al to be returned when no active
113 * MappedLUN=0 exists for this Initiator Port.
115 if (unpacked_lun
!= 0) {
116 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
117 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
118 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
119 " Access for 0x%08x\n",
120 se_cmd
->se_tfo
->get_fabric_name(),
125 * Force WRITE PROTECT for virtual LUN 0
127 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
128 (se_cmd
->data_direction
!= DMA_NONE
)) {
129 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
130 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
134 se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
135 se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
136 se_cmd
->orig_fe_lun
= 0;
137 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
140 * Determine if the struct se_lun is online.
141 * FIXME: Check for LUN_RESET + UNIT Attention
143 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
144 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
145 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
149 /* Directly associate cmd with se_dev */
150 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
152 /* TODO: get rid of this and use atomics for stats */
153 dev
= se_lun
->lun_se_dev
;
154 spin_lock_irqsave(&dev
->stats_lock
, flags
);
156 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
157 dev
->write_bytes
+= se_cmd
->data_length
;
158 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
159 dev
->read_bytes
+= se_cmd
->data_length
;
160 spin_unlock_irqrestore(&dev
->stats_lock
, flags
);
162 spin_lock_irqsave(&se_lun
->lun_cmd_lock
, flags
);
163 list_add_tail(&se_cmd
->se_lun_node
, &se_lun
->lun_cmd_list
);
164 spin_unlock_irqrestore(&se_lun
->lun_cmd_lock
, flags
);
168 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
170 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
172 struct se_dev_entry
*deve
;
173 struct se_lun
*se_lun
= NULL
;
174 struct se_session
*se_sess
= se_cmd
->se_sess
;
175 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
178 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
) {
179 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
180 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
184 spin_lock_irqsave(&se_sess
->se_node_acl
->device_list_lock
, flags
);
185 se_cmd
->se_deve
= &se_sess
->se_node_acl
->device_list
[unpacked_lun
];
186 deve
= se_cmd
->se_deve
;
188 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
189 se_tmr
->tmr_lun
= deve
->se_lun
;
190 se_cmd
->se_lun
= deve
->se_lun
;
191 se_lun
= deve
->se_lun
;
192 se_cmd
->pr_res_key
= deve
->pr_res_key
;
193 se_cmd
->orig_fe_lun
= unpacked_lun
;
195 spin_unlock_irqrestore(&se_sess
->se_node_acl
->device_list_lock
, flags
);
198 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
199 " Access for 0x%08x\n",
200 se_cmd
->se_tfo
->get_fabric_name(),
202 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
206 * Determine if the struct se_lun is online.
207 * FIXME: Check for LUN_RESET + UNIT Attention
209 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
210 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
214 /* Directly associate cmd with se_dev */
215 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
216 se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
218 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
219 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
220 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
224 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
227 * This function is called from core_scsi3_emulate_pro_register_and_move()
228 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
229 * when a matching rtpi is found.
231 struct se_dev_entry
*core_get_se_deve_from_rtpi(
232 struct se_node_acl
*nacl
,
235 struct se_dev_entry
*deve
;
237 struct se_port
*port
;
238 struct se_portal_group
*tpg
= nacl
->se_tpg
;
241 spin_lock_irq(&nacl
->device_list_lock
);
242 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
243 deve
= &nacl
->device_list
[i
];
245 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
250 pr_err("%s device entries device pointer is"
251 " NULL, but Initiator has access.\n",
252 tpg
->se_tpg_tfo
->get_fabric_name());
257 pr_err("%s device entries device pointer is"
258 " NULL, but Initiator has access.\n",
259 tpg
->se_tpg_tfo
->get_fabric_name());
262 if (port
->sep_rtpi
!= rtpi
)
265 atomic_inc(&deve
->pr_ref_count
);
266 smp_mb__after_atomic_inc();
267 spin_unlock_irq(&nacl
->device_list_lock
);
271 spin_unlock_irq(&nacl
->device_list_lock
);
276 int core_free_device_list_for_node(
277 struct se_node_acl
*nacl
,
278 struct se_portal_group
*tpg
)
280 struct se_dev_entry
*deve
;
284 if (!nacl
->device_list
)
287 spin_lock_irq(&nacl
->device_list_lock
);
288 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
289 deve
= &nacl
->device_list
[i
];
291 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
295 pr_err("%s device entries device pointer is"
296 " NULL, but Initiator has access.\n",
297 tpg
->se_tpg_tfo
->get_fabric_name());
302 spin_unlock_irq(&nacl
->device_list_lock
);
303 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
304 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
305 spin_lock_irq(&nacl
->device_list_lock
);
307 spin_unlock_irq(&nacl
->device_list_lock
);
309 kfree(nacl
->device_list
);
310 nacl
->device_list
= NULL
;
315 void core_dec_lacl_count(struct se_node_acl
*se_nacl
, struct se_cmd
*se_cmd
)
317 struct se_dev_entry
*deve
;
320 spin_lock_irqsave(&se_nacl
->device_list_lock
, flags
);
321 deve
= &se_nacl
->device_list
[se_cmd
->orig_fe_lun
];
323 spin_unlock_irqrestore(&se_nacl
->device_list_lock
, flags
);
326 void core_update_device_list_access(
329 struct se_node_acl
*nacl
)
331 struct se_dev_entry
*deve
;
333 spin_lock_irq(&nacl
->device_list_lock
);
334 deve
= &nacl
->device_list
[mapped_lun
];
335 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
336 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
337 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
339 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
340 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
342 spin_unlock_irq(&nacl
->device_list_lock
);
345 /* core_update_device_list_for_node():
349 int core_update_device_list_for_node(
351 struct se_lun_acl
*lun_acl
,
354 struct se_node_acl
*nacl
,
355 struct se_portal_group
*tpg
,
358 struct se_port
*port
= lun
->lun_sep
;
359 struct se_dev_entry
*deve
= &nacl
->device_list
[mapped_lun
];
362 * If the MappedLUN entry is being disabled, the entry in
363 * port->sep_alua_list must be removed now before clearing the
364 * struct se_dev_entry pointers below as logic in
365 * core_alua_do_transition_tg_pt() depends on these being present.
369 * deve->se_lun_acl will be NULL for demo-mode created LUNs
370 * that have not been explicitly concerted to MappedLUNs ->
371 * struct se_lun_acl, but we remove deve->alua_port_list from
372 * port->sep_alua_list. This also means that active UAs and
373 * NodeACL context specific PR metadata for demo-mode
374 * MappedLUN *deve will be released below..
376 spin_lock_bh(&port
->sep_alua_lock
);
377 list_del(&deve
->alua_port_list
);
378 spin_unlock_bh(&port
->sep_alua_lock
);
381 spin_lock_irq(&nacl
->device_list_lock
);
384 * Check if the call is handling demo mode -> explict LUN ACL
385 * transition. This transition must be for the same struct se_lun
386 * + mapped_lun that was setup in demo mode..
388 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
389 if (deve
->se_lun_acl
!= NULL
) {
390 pr_err("struct se_dev_entry->se_lun_acl"
391 " already set for demo mode -> explict"
392 " LUN ACL transition\n");
393 spin_unlock_irq(&nacl
->device_list_lock
);
396 if (deve
->se_lun
!= lun
) {
397 pr_err("struct se_dev_entry->se_lun does"
398 " match passed struct se_lun for demo mode"
399 " -> explict LUN ACL transition\n");
400 spin_unlock_irq(&nacl
->device_list_lock
);
403 deve
->se_lun_acl
= lun_acl
;
407 deve
->se_lun_acl
= lun_acl
;
408 deve
->mapped_lun
= mapped_lun
;
409 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
412 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
413 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
414 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
416 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
417 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
421 spin_unlock_irq(&nacl
->device_list_lock
);
424 deve
->creation_time
= get_jiffies_64();
425 deve
->attach_count
++;
426 spin_unlock_irq(&nacl
->device_list_lock
);
428 spin_lock_bh(&port
->sep_alua_lock
);
429 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
430 spin_unlock_bh(&port
->sep_alua_lock
);
435 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
436 * PR operation to complete.
438 spin_unlock_irq(&nacl
->device_list_lock
);
439 while (atomic_read(&deve
->pr_ref_count
) != 0)
441 spin_lock_irq(&nacl
->device_list_lock
);
443 * Disable struct se_dev_entry LUN ACL mapping
445 core_scsi3_ua_release_all(deve
);
447 deve
->se_lun_acl
= NULL
;
449 deve
->creation_time
= 0;
450 deve
->attach_count
--;
451 spin_unlock_irq(&nacl
->device_list_lock
);
453 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
457 /* core_clear_lun_from_tpg():
461 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
463 struct se_node_acl
*nacl
;
464 struct se_dev_entry
*deve
;
467 spin_lock_irq(&tpg
->acl_node_lock
);
468 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
469 spin_unlock_irq(&tpg
->acl_node_lock
);
471 spin_lock_irq(&nacl
->device_list_lock
);
472 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
473 deve
= &nacl
->device_list
[i
];
474 if (lun
!= deve
->se_lun
)
476 spin_unlock_irq(&nacl
->device_list_lock
);
478 core_update_device_list_for_node(lun
, NULL
,
479 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
482 spin_lock_irq(&nacl
->device_list_lock
);
484 spin_unlock_irq(&nacl
->device_list_lock
);
486 spin_lock_irq(&tpg
->acl_node_lock
);
488 spin_unlock_irq(&tpg
->acl_node_lock
);
491 static struct se_port
*core_alloc_port(struct se_device
*dev
)
493 struct se_port
*port
, *port_tmp
;
495 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
497 pr_err("Unable to allocate struct se_port\n");
498 return ERR_PTR(-ENOMEM
);
500 INIT_LIST_HEAD(&port
->sep_alua_list
);
501 INIT_LIST_HEAD(&port
->sep_list
);
502 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
503 spin_lock_init(&port
->sep_alua_lock
);
504 mutex_init(&port
->sep_tg_pt_md_mutex
);
506 spin_lock(&dev
->se_port_lock
);
507 if (dev
->dev_port_count
== 0x0000ffff) {
508 pr_warn("Reached dev->dev_port_count =="
510 spin_unlock(&dev
->se_port_lock
);
511 return ERR_PTR(-ENOSPC
);
515 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
516 * Here is the table from spc4r17 section 7.7.3.8.
518 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
522 * 1h Relative port 1, historically known as port A
523 * 2h Relative port 2, historically known as port B
524 * 3h to FFFFh Relative port 3 through 65 535
526 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
530 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
532 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
535 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
538 spin_unlock(&dev
->se_port_lock
);
543 static void core_export_port(
544 struct se_device
*dev
,
545 struct se_portal_group
*tpg
,
546 struct se_port
*port
,
549 struct se_subsystem_dev
*su_dev
= dev
->se_sub_dev
;
550 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
552 spin_lock(&dev
->se_port_lock
);
553 spin_lock(&lun
->lun_sep_lock
);
557 spin_unlock(&lun
->lun_sep_lock
);
559 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
560 spin_unlock(&dev
->se_port_lock
);
562 if (su_dev
->t10_alua
.alua_type
== SPC3_ALUA_EMULATED
) {
563 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
564 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
565 pr_err("Unable to allocate t10_alua_tg_pt"
569 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
570 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
571 su_dev
->t10_alua
.default_tg_pt_gp
);
572 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
573 pr_debug("%s/%s: Adding to default ALUA Target Port"
574 " Group: alua/default_tg_pt_gp\n",
575 dev
->transport
->name
, tpg
->se_tpg_tfo
->get_fabric_name());
578 dev
->dev_port_count
++;
579 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFER */
583 * Called with struct se_device->se_port_lock spinlock held.
585 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
586 __releases(&dev
->se_port_lock
) __acquires(&dev
->se_port_lock
)
589 * Wait for any port reference for PR ALL_TG_PT=1 operation
590 * to complete in __core_scsi3_alloc_registration()
592 spin_unlock(&dev
->se_port_lock
);
593 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
595 spin_lock(&dev
->se_port_lock
);
597 core_alua_free_tg_pt_gp_mem(port
);
599 list_del(&port
->sep_list
);
600 dev
->dev_port_count
--;
605 struct se_device
*dev
,
606 struct se_portal_group
*tpg
,
609 struct se_port
*port
;
611 port
= core_alloc_port(dev
);
613 return PTR_ERR(port
);
615 lun
->lun_se_dev
= dev
;
618 atomic_inc(&dev
->dev_export_obj
.obj_access_count
);
619 core_export_port(dev
, tpg
, port
, lun
);
623 void core_dev_unexport(
624 struct se_device
*dev
,
625 struct se_portal_group
*tpg
,
628 struct se_port
*port
= lun
->lun_sep
;
630 spin_lock(&lun
->lun_sep_lock
);
631 if (lun
->lun_se_dev
== NULL
) {
632 spin_unlock(&lun
->lun_sep_lock
);
635 spin_unlock(&lun
->lun_sep_lock
);
637 spin_lock(&dev
->se_port_lock
);
638 atomic_dec(&dev
->dev_export_obj
.obj_access_count
);
639 core_release_port(dev
, port
);
640 spin_unlock(&dev
->se_port_lock
);
643 lun
->lun_se_dev
= NULL
;
646 int target_report_luns(struct se_task
*se_task
)
648 struct se_cmd
*se_cmd
= se_task
->task_se_cmd
;
649 struct se_dev_entry
*deve
;
650 struct se_lun
*se_lun
;
651 struct se_session
*se_sess
= se_cmd
->se_sess
;
653 u32 lun_count
= 0, offset
= 8, i
;
655 buf
= transport_kmap_data_sg(se_cmd
);
660 * If no struct se_session pointer is present, this struct se_cmd is
661 * coming via a target_core_mod PASSTHROUGH op, and not through
662 * a $FABRIC_MOD. In that case, report LUN=0 only.
665 int_to_scsilun(0, (struct scsi_lun
*)&buf
[offset
]);
670 spin_lock_irq(&se_sess
->se_node_acl
->device_list_lock
);
671 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
672 deve
= &se_sess
->se_node_acl
->device_list
[i
];
673 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
675 se_lun
= deve
->se_lun
;
677 * We determine the correct LUN LIST LENGTH even once we
678 * have reached the initial allocation length.
682 if ((offset
+ 8) > se_cmd
->data_length
)
685 int_to_scsilun(deve
->mapped_lun
, (struct scsi_lun
*)&buf
[offset
]);
688 spin_unlock_irq(&se_sess
->se_node_acl
->device_list_lock
);
691 * See SPC3 r07, page 159.
695 buf
[0] = ((lun_count
>> 24) & 0xff);
696 buf
[1] = ((lun_count
>> 16) & 0xff);
697 buf
[2] = ((lun_count
>> 8) & 0xff);
698 buf
[3] = (lun_count
& 0xff);
699 transport_kunmap_data_sg(se_cmd
);
701 se_task
->task_scsi_status
= GOOD
;
702 transport_complete_task(se_task
, 1);
706 /* se_release_device_for_hba():
710 void se_release_device_for_hba(struct se_device
*dev
)
712 struct se_hba
*hba
= dev
->se_hba
;
714 if ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
715 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) ||
716 (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
) ||
717 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) ||
718 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
))
722 kthread_stop(dev
->process_thread
);
723 if (dev
->transport
->free_device
)
724 dev
->transport
->free_device(dev
->dev_ptr
);
727 spin_lock(&hba
->device_lock
);
728 list_del(&dev
->dev_list
);
730 spin_unlock(&hba
->device_lock
);
732 core_scsi3_free_all_registrations(dev
);
733 se_release_vpd_for_dev(dev
);
738 void se_release_vpd_for_dev(struct se_device
*dev
)
740 struct t10_vpd
*vpd
, *vpd_tmp
;
742 spin_lock(&dev
->se_sub_dev
->t10_wwn
.t10_vpd_lock
);
743 list_for_each_entry_safe(vpd
, vpd_tmp
,
744 &dev
->se_sub_dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
745 list_del(&vpd
->vpd_list
);
748 spin_unlock(&dev
->se_sub_dev
->t10_wwn
.t10_vpd_lock
);
751 /* se_free_virtual_device():
753 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
755 int se_free_virtual_device(struct se_device
*dev
, struct se_hba
*hba
)
757 if (!list_empty(&dev
->dev_sep_list
))
760 core_alua_free_lu_gp_mem(dev
);
761 se_release_device_for_hba(dev
);
766 static void se_dev_start(struct se_device
*dev
)
768 struct se_hba
*hba
= dev
->se_hba
;
770 spin_lock(&hba
->device_lock
);
771 atomic_inc(&dev
->dev_obj
.obj_access_count
);
772 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 1) {
773 if (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) {
774 dev
->dev_status
&= ~TRANSPORT_DEVICE_DEACTIVATED
;
775 dev
->dev_status
|= TRANSPORT_DEVICE_ACTIVATED
;
776 } else if (dev
->dev_status
&
777 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
) {
779 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
780 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
783 spin_unlock(&hba
->device_lock
);
786 static void se_dev_stop(struct se_device
*dev
)
788 struct se_hba
*hba
= dev
->se_hba
;
790 spin_lock(&hba
->device_lock
);
791 atomic_dec(&dev
->dev_obj
.obj_access_count
);
792 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 0) {
793 if (dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) {
794 dev
->dev_status
&= ~TRANSPORT_DEVICE_ACTIVATED
;
795 dev
->dev_status
|= TRANSPORT_DEVICE_DEACTIVATED
;
796 } else if (dev
->dev_status
&
797 TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) {
798 dev
->dev_status
&= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
799 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
802 spin_unlock(&hba
->device_lock
);
805 int se_dev_check_online(struct se_device
*dev
)
810 spin_lock_irqsave(&dev
->dev_status_lock
, flags
);
811 ret
= ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
812 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
)) ? 0 : 1;
813 spin_unlock_irqrestore(&dev
->dev_status_lock
, flags
);
818 int se_dev_check_shutdown(struct se_device
*dev
)
822 spin_lock_irq(&dev
->dev_status_lock
);
823 ret
= (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
);
824 spin_unlock_irq(&dev
->dev_status_lock
);
829 u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
831 u32 tmp
, aligned_max_sectors
;
833 * Limit max_sectors to a PAGE_SIZE aligned value for modern
834 * transport_allocate_data_tasks() operation.
836 tmp
= rounddown((max_sectors
* block_size
), PAGE_SIZE
);
837 aligned_max_sectors
= (tmp
/ block_size
);
838 if (max_sectors
!= aligned_max_sectors
) {
839 printk(KERN_INFO
"Rounding down aligned max_sectors from %u"
840 " to %u\n", max_sectors
, aligned_max_sectors
);
841 return aligned_max_sectors
;
847 void se_dev_set_default_attribs(
848 struct se_device
*dev
,
849 struct se_dev_limits
*dev_limits
)
851 struct queue_limits
*limits
= &dev_limits
->limits
;
853 dev
->se_sub_dev
->se_dev_attrib
.emulate_dpo
= DA_EMULATE_DPO
;
854 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
855 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_read
= DA_EMULATE_FUA_READ
;
856 dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
857 dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
858 dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
859 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
860 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
861 dev
->se_sub_dev
->se_dev_attrib
.emulate_reservations
= DA_EMULATE_RESERVATIONS
;
862 dev
->se_sub_dev
->se_dev_attrib
.emulate_alua
= DA_EMULATE_ALUA
;
863 dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
864 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= DA_IS_NONROT
;
865 dev
->se_sub_dev
->se_dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
867 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
868 * iblock_create_virtdevice() from struct queue_limits values
869 * if blk_queue_discard()==1
871 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
872 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
=
873 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
874 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
875 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
=
876 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
878 * block_size is based on subsystem plugin dependent requirements.
880 dev
->se_sub_dev
->se_dev_attrib
.hw_block_size
= limits
->logical_block_size
;
881 dev
->se_sub_dev
->se_dev_attrib
.block_size
= limits
->logical_block_size
;
883 * max_sectors is based on subsystem plugin dependent requirements.
885 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
= limits
->max_hw_sectors
;
887 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
889 limits
->max_sectors
= se_dev_align_max_sectors(limits
->max_sectors
,
890 limits
->logical_block_size
);
891 dev
->se_sub_dev
->se_dev_attrib
.max_sectors
= limits
->max_sectors
;
893 * Set fabric_max_sectors, which is reported in block limits
896 dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
= DA_FABRIC_MAX_SECTORS
;
898 * Set optimal_sectors from fabric_max_sectors, which can be
899 * lowered via configfs.
901 dev
->se_sub_dev
->se_dev_attrib
.optimal_sectors
= DA_FABRIC_MAX_SECTORS
;
903 * queue_depth is based on subsystem plugin dependent requirements.
905 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
= dev_limits
->hw_queue_depth
;
906 dev
->se_sub_dev
->se_dev_attrib
.queue_depth
= dev_limits
->queue_depth
;
909 int se_dev_set_max_unmap_lba_count(
910 struct se_device
*dev
,
911 u32 max_unmap_lba_count
)
913 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
= max_unmap_lba_count
;
914 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
915 dev
, dev
->se_sub_dev
->se_dev_attrib
.max_unmap_lba_count
);
919 int se_dev_set_max_unmap_block_desc_count(
920 struct se_device
*dev
,
921 u32 max_unmap_block_desc_count
)
923 dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
=
924 max_unmap_block_desc_count
;
925 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
926 dev
, dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
);
930 int se_dev_set_unmap_granularity(
931 struct se_device
*dev
,
932 u32 unmap_granularity
)
934 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
= unmap_granularity
;
935 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
936 dev
, dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity
);
940 int se_dev_set_unmap_granularity_alignment(
941 struct se_device
*dev
,
942 u32 unmap_granularity_alignment
)
944 dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
= unmap_granularity_alignment
;
945 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
946 dev
, dev
->se_sub_dev
->se_dev_attrib
.unmap_granularity_alignment
);
950 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
952 if (flag
!= 0 && flag
!= 1) {
953 pr_err("Illegal value %d\n", flag
);
958 pr_err("dpo_emulated not supported\n");
965 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
967 if (flag
!= 0 && flag
!= 1) {
968 pr_err("Illegal value %d\n", flag
);
972 if (flag
&& dev
->transport
->fua_write_emulated
== 0) {
973 pr_err("fua_write_emulated not supported\n");
976 dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
= flag
;
977 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
978 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_fua_write
);
982 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
984 if (flag
!= 0 && flag
!= 1) {
985 pr_err("Illegal value %d\n", flag
);
990 pr_err("ua read emulated not supported\n");
997 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
999 if (flag
!= 0 && flag
!= 1) {
1000 pr_err("Illegal value %d\n", flag
);
1003 if (flag
&& dev
->transport
->write_cache_emulated
== 0) {
1004 pr_err("write_cache_emulated not supported\n");
1007 dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
= flag
;
1008 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1009 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_write_cache
);
1013 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
1015 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
1016 pr_err("Illegal value %d\n", flag
);
1020 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1021 pr_err("dev[%p]: Unable to change SE Device"
1022 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1024 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1027 dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
= flag
;
1028 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1029 dev
, dev
->se_sub_dev
->se_dev_attrib
.emulate_ua_intlck_ctrl
);
1034 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
1036 if ((flag
!= 0) && (flag
!= 1)) {
1037 pr_err("Illegal value %d\n", flag
);
1041 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1042 pr_err("dev[%p]: Unable to change SE Device TAS while"
1043 " dev_export_obj: %d count exists\n", dev
,
1044 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1047 dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
= flag
;
1048 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1049 dev
, (dev
->se_sub_dev
->se_dev_attrib
.emulate_tas
) ? "Enabled" : "Disabled");
1054 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
1056 if ((flag
!= 0) && (flag
!= 1)) {
1057 pr_err("Illegal value %d\n", flag
);
1061 * We expect this value to be non-zero when generic Block Layer
1062 * Discard supported is detected iblock_create_virtdevice().
1064 if (flag
&& !dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
) {
1065 pr_err("Generic Block Discard not supported\n");
1069 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpu
= flag
;
1070 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1075 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
1077 if ((flag
!= 0) && (flag
!= 1)) {
1078 pr_err("Illegal value %d\n", flag
);
1082 * We expect this value to be non-zero when generic Block Layer
1083 * Discard supported is detected iblock_create_virtdevice().
1085 if (flag
&& !dev
->se_sub_dev
->se_dev_attrib
.max_unmap_block_desc_count
) {
1086 pr_err("Generic Block Discard not supported\n");
1090 dev
->se_sub_dev
->se_dev_attrib
.emulate_tpws
= flag
;
1091 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1096 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1098 if ((flag
!= 0) && (flag
!= 1)) {
1099 pr_err("Illegal value %d\n", flag
);
1102 dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
= flag
;
1103 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1104 (dev
->se_sub_dev
->se_dev_attrib
.enforce_pr_isids
) ? "Enabled" : "Disabled");
1108 int se_dev_set_is_nonrot(struct se_device
*dev
, int flag
)
1110 if ((flag
!= 0) && (flag
!= 1)) {
1111 printk(KERN_ERR
"Illegal value %d\n", flag
);
1114 dev
->se_sub_dev
->se_dev_attrib
.is_nonrot
= flag
;
1115 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1120 int se_dev_set_emulate_rest_reord(struct se_device
*dev
, int flag
)
1123 printk(KERN_ERR
"dev[%p]: SE Device emulatation of restricted"
1124 " reordering not implemented\n", dev
);
1127 dev
->se_sub_dev
->se_dev_attrib
.emulate_rest_reord
= flag
;
1128 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev
, flag
);
1133 * Note, this can only be called on unexported SE Device Object.
1135 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1137 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1138 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1139 " dev_export_obj: %d count exists\n", dev
,
1140 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1144 pr_err("dev[%p]: Illegal ZERO value for queue"
1149 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1150 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
) {
1151 pr_err("dev[%p]: Passed queue_depth: %u"
1152 " exceeds TCM/SE_Device TCQ: %u\n",
1154 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
);
1158 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.queue_depth
) {
1159 if (queue_depth
> dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
) {
1160 pr_err("dev[%p]: Passed queue_depth:"
1161 " %u exceeds TCM/SE_Device MAX"
1162 " TCQ: %u\n", dev
, queue_depth
,
1163 dev
->se_sub_dev
->se_dev_attrib
.hw_queue_depth
);
1169 dev
->se_sub_dev
->se_dev_attrib
.queue_depth
= dev
->queue_depth
= queue_depth
;
1170 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1175 int se_dev_set_max_sectors(struct se_device
*dev
, u32 max_sectors
)
1177 int force
= 0; /* Force setting for VDEVS */
1179 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1180 pr_err("dev[%p]: Unable to change SE Device"
1181 " max_sectors while dev_export_obj: %d count exists\n",
1182 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1186 pr_err("dev[%p]: Illegal ZERO value for"
1187 " max_sectors\n", dev
);
1190 if (max_sectors
< DA_STATUS_MAX_SECTORS_MIN
) {
1191 pr_err("dev[%p]: Passed max_sectors: %u less than"
1192 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev
, max_sectors
,
1193 DA_STATUS_MAX_SECTORS_MIN
);
1196 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1197 if (max_sectors
> dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
) {
1198 pr_err("dev[%p]: Passed max_sectors: %u"
1199 " greater than TCM/SE_Device max_sectors:"
1200 " %u\n", dev
, max_sectors
,
1201 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
);
1205 if (!force
&& (max_sectors
>
1206 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
)) {
1207 pr_err("dev[%p]: Passed max_sectors: %u"
1208 " greater than TCM/SE_Device max_sectors"
1209 ": %u, use force=1 to override.\n", dev
,
1210 max_sectors
, dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
);
1213 if (max_sectors
> DA_STATUS_MAX_SECTORS_MAX
) {
1214 pr_err("dev[%p]: Passed max_sectors: %u"
1215 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1216 " %u\n", dev
, max_sectors
,
1217 DA_STATUS_MAX_SECTORS_MAX
);
1222 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1224 max_sectors
= se_dev_align_max_sectors(max_sectors
,
1225 dev
->se_sub_dev
->se_dev_attrib
.block_size
);
1227 dev
->se_sub_dev
->se_dev_attrib
.max_sectors
= max_sectors
;
1228 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1233 int se_dev_set_fabric_max_sectors(struct se_device
*dev
, u32 fabric_max_sectors
)
1235 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1236 pr_err("dev[%p]: Unable to change SE Device"
1237 " fabric_max_sectors while dev_export_obj: %d count exists\n",
1238 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1241 if (!fabric_max_sectors
) {
1242 pr_err("dev[%p]: Illegal ZERO value for"
1243 " fabric_max_sectors\n", dev
);
1246 if (fabric_max_sectors
< DA_STATUS_MAX_SECTORS_MIN
) {
1247 pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
1248 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev
, fabric_max_sectors
,
1249 DA_STATUS_MAX_SECTORS_MIN
);
1252 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1253 if (fabric_max_sectors
> dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
) {
1254 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1255 " greater than TCM/SE_Device max_sectors:"
1256 " %u\n", dev
, fabric_max_sectors
,
1257 dev
->se_sub_dev
->se_dev_attrib
.hw_max_sectors
);
1261 if (fabric_max_sectors
> DA_STATUS_MAX_SECTORS_MAX
) {
1262 pr_err("dev[%p]: Passed fabric_max_sectors: %u"
1263 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1264 " %u\n", dev
, fabric_max_sectors
,
1265 DA_STATUS_MAX_SECTORS_MAX
);
1270 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
1272 fabric_max_sectors
= se_dev_align_max_sectors(fabric_max_sectors
,
1273 dev
->se_sub_dev
->se_dev_attrib
.block_size
);
1275 dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
= fabric_max_sectors
;
1276 pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
1277 dev
, fabric_max_sectors
);
1281 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1283 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1284 pr_err("dev[%p]: Unable to change SE Device"
1285 " optimal_sectors while dev_export_obj: %d count exists\n",
1286 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1289 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1290 pr_err("dev[%p]: Passed optimal_sectors cannot be"
1291 " changed for TCM/pSCSI\n", dev
);
1294 if (optimal_sectors
> dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
) {
1295 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1296 " greater than fabric_max_sectors: %u\n", dev
,
1297 optimal_sectors
, dev
->se_sub_dev
->se_dev_attrib
.fabric_max_sectors
);
1301 dev
->se_sub_dev
->se_dev_attrib
.optimal_sectors
= optimal_sectors
;
1302 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1303 dev
, optimal_sectors
);
1307 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1309 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1310 pr_err("dev[%p]: Unable to change SE Device block_size"
1311 " while dev_export_obj: %d count exists\n", dev
,
1312 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1316 if ((block_size
!= 512) &&
1317 (block_size
!= 1024) &&
1318 (block_size
!= 2048) &&
1319 (block_size
!= 4096)) {
1320 pr_err("dev[%p]: Illegal value for block_device: %u"
1321 " for SE device, must be 512, 1024, 2048 or 4096\n",
1326 if (dev
->transport
->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1327 pr_err("dev[%p]: Not allowed to change block_size for"
1328 " Physical Device, use for Linux/SCSI to change"
1329 " block_size for underlying hardware\n", dev
);
1333 dev
->se_sub_dev
->se_dev_attrib
.block_size
= block_size
;
1334 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1339 struct se_lun
*core_dev_add_lun(
1340 struct se_portal_group
*tpg
,
1342 struct se_device
*dev
,
1345 struct se_lun
*lun_p
;
1349 if (atomic_read(&dev
->dev_access_obj
.obj_access_count
) != 0) {
1350 pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
1351 atomic_read(&dev
->dev_access_obj
.obj_access_count
));
1352 return ERR_PTR(-EACCES
);
1355 lun_p
= core_tpg_pre_addlun(tpg
, lun
);
1359 if (dev
->dev_flags
& DF_READ_ONLY
)
1360 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1362 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
1364 rc
= core_tpg_post_addlun(tpg
, lun_p
, lun_access
, dev
);
1368 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1369 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1370 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun_p
->unpacked_lun
,
1371 tpg
->se_tpg_tfo
->get_fabric_name(), hba
->hba_id
);
1373 * Update LUN maps for dynamically added initiators when
1374 * generate_node_acl is enabled.
1376 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
1377 struct se_node_acl
*acl
;
1378 spin_lock_irq(&tpg
->acl_node_lock
);
1379 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1380 if (acl
->dynamic_node_acl
&&
1381 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
1382 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
1383 spin_unlock_irq(&tpg
->acl_node_lock
);
1384 core_tpg_add_node_to_devs(acl
, tpg
);
1385 spin_lock_irq(&tpg
->acl_node_lock
);
1388 spin_unlock_irq(&tpg
->acl_node_lock
);
1394 /* core_dev_del_lun():
1398 int core_dev_del_lun(
1399 struct se_portal_group
*tpg
,
1404 lun
= core_tpg_pre_dellun(tpg
, unpacked_lun
);
1406 return PTR_ERR(lun
);
1408 core_tpg_post_dellun(tpg
, lun
);
1410 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1411 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1412 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
,
1413 tpg
->se_tpg_tfo
->get_fabric_name());
1418 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1422 spin_lock(&tpg
->tpg_lun_lock
);
1423 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1424 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1425 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1426 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1427 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1428 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1429 spin_unlock(&tpg
->tpg_lun_lock
);
1432 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1434 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1435 pr_err("%s Logical Unit Number: %u is not free on"
1436 " Target Portal Group: %hu, ignoring request.\n",
1437 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1438 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1439 spin_unlock(&tpg
->tpg_lun_lock
);
1442 spin_unlock(&tpg
->tpg_lun_lock
);
1447 /* core_dev_get_lun():
1451 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1455 spin_lock(&tpg
->tpg_lun_lock
);
1456 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1457 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1458 "_TPG-1: %u for Target Portal Group: %hu\n",
1459 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1460 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1461 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1462 spin_unlock(&tpg
->tpg_lun_lock
);
1465 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1467 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1468 pr_err("%s Logical Unit Number: %u is not active on"
1469 " Target Portal Group: %hu, ignoring request.\n",
1470 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1471 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1472 spin_unlock(&tpg
->tpg_lun_lock
);
1475 spin_unlock(&tpg
->tpg_lun_lock
);
1480 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1481 struct se_portal_group
*tpg
,
1483 char *initiatorname
,
1486 struct se_lun_acl
*lacl
;
1487 struct se_node_acl
*nacl
;
1489 if (strlen(initiatorname
) >= TRANSPORT_IQN_LEN
) {
1490 pr_err("%s InitiatorName exceeds maximum size.\n",
1491 tpg
->se_tpg_tfo
->get_fabric_name());
1495 nacl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
1500 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1502 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1507 INIT_LIST_HEAD(&lacl
->lacl_list
);
1508 lacl
->mapped_lun
= mapped_lun
;
1509 lacl
->se_lun_nacl
= nacl
;
1510 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
1515 int core_dev_add_initiator_node_lun_acl(
1516 struct se_portal_group
*tpg
,
1517 struct se_lun_acl
*lacl
,
1522 struct se_node_acl
*nacl
;
1524 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1526 pr_err("%s Logical Unit Number: %u is not active on"
1527 " Target Portal Group: %hu, ignoring request.\n",
1528 tpg
->se_tpg_tfo
->get_fabric_name(), unpacked_lun
,
1529 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
));
1533 nacl
= lacl
->se_lun_nacl
;
1537 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1538 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1539 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1543 if (core_update_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1544 lun_access
, nacl
, tpg
, 1) < 0)
1547 spin_lock(&lun
->lun_acl_lock
);
1548 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1549 atomic_inc(&lun
->lun_acl_count
);
1550 smp_mb__after_atomic_inc();
1551 spin_unlock(&lun
->lun_acl_lock
);
1553 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1554 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1555 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1556 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1557 lacl
->initiatorname
);
1559 * Check to see if there are any existing persistent reservation APTPL
1560 * pre-registrations that need to be enabled for this LUN ACL..
1562 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, lacl
);
1566 /* core_dev_del_initiator_node_lun_acl():
1570 int core_dev_del_initiator_node_lun_acl(
1571 struct se_portal_group
*tpg
,
1573 struct se_lun_acl
*lacl
)
1575 struct se_node_acl
*nacl
;
1577 nacl
= lacl
->se_lun_nacl
;
1581 spin_lock(&lun
->lun_acl_lock
);
1582 list_del(&lacl
->lacl_list
);
1583 atomic_dec(&lun
->lun_acl_count
);
1584 smp_mb__after_atomic_dec();
1585 spin_unlock(&lun
->lun_acl_lock
);
1587 core_update_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1588 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
1590 lacl
->se_lun
= NULL
;
1592 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1593 " InitiatorNode: %s Mapped LUN: %u\n",
1594 tpg
->se_tpg_tfo
->get_fabric_name(),
1595 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1596 lacl
->initiatorname
, lacl
->mapped_lun
);
1601 void core_dev_free_initiator_node_lun_acl(
1602 struct se_portal_group
*tpg
,
1603 struct se_lun_acl
*lacl
)
1605 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1606 " Mapped LUN: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1607 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
1608 tpg
->se_tpg_tfo
->get_fabric_name(),
1609 lacl
->initiatorname
, lacl
->mapped_lun
);
1614 int core_dev_setup_virtual_lun0(void)
1617 struct se_device
*dev
;
1618 struct se_subsystem_dev
*se_dev
= NULL
;
1619 struct se_subsystem_api
*t
;
1623 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
1625 return PTR_ERR(hba
);
1630 se_dev
= kzalloc(sizeof(struct se_subsystem_dev
), GFP_KERNEL
);
1632 pr_err("Unable to allocate memory for"
1633 " struct se_subsystem_dev\n");
1637 INIT_LIST_HEAD(&se_dev
->t10_wwn
.t10_vpd_list
);
1638 spin_lock_init(&se_dev
->t10_wwn
.t10_vpd_lock
);
1639 INIT_LIST_HEAD(&se_dev
->t10_pr
.registration_list
);
1640 INIT_LIST_HEAD(&se_dev
->t10_pr
.aptpl_reg_list
);
1641 spin_lock_init(&se_dev
->t10_pr
.registration_lock
);
1642 spin_lock_init(&se_dev
->t10_pr
.aptpl_reg_lock
);
1643 INIT_LIST_HEAD(&se_dev
->t10_alua
.tg_pt_gps_list
);
1644 spin_lock_init(&se_dev
->t10_alua
.tg_pt_gps_lock
);
1645 spin_lock_init(&se_dev
->se_dev_lock
);
1646 se_dev
->t10_pr
.pr_aptpl_buf_len
= PR_APTPL_BUF_LEN
;
1647 se_dev
->t10_wwn
.t10_sub_dev
= se_dev
;
1648 se_dev
->t10_alua
.t10_sub_dev
= se_dev
;
1649 se_dev
->se_dev_attrib
.da_sub_dev
= se_dev
;
1650 se_dev
->se_dev_hba
= hba
;
1652 se_dev
->se_dev_su_ptr
= t
->allocate_virtdevice(hba
, "virt_lun0");
1653 if (!se_dev
->se_dev_su_ptr
) {
1654 pr_err("Unable to locate subsystem dependent pointer"
1655 " from allocate_virtdevice()\n");
1659 lun0_su_dev
= se_dev
;
1662 sprintf(buf
, "rd_pages=8");
1663 t
->set_configfs_dev_params(hba
, se_dev
, buf
, sizeof(buf
));
1665 dev
= t
->create_virtdevice(hba
, se_dev
, se_dev
->se_dev_su_ptr
);
1670 se_dev
->se_dev_ptr
= dev
;
1678 core_delete_hba(lun0_hba
);
1685 void core_dev_release_virtual_lun0(void)
1687 struct se_hba
*hba
= lun0_hba
;
1688 struct se_subsystem_dev
*su_dev
= lun0_su_dev
;
1694 se_free_virtual_device(g_lun0_dev
, hba
);
1697 core_delete_hba(hba
);