1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the TCM Virtual Device and Disk Transport
5 * agnostic related functions.
7 * (c) Copyright 2003-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
25 ******************************************************************************/
27 #include <linux/net.h>
28 #include <linux/string.h>
29 #include <linux/delay.h>
30 #include <linux/timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/kthread.h>
35 #include <linux/export.h>
36 #include <asm/unaligned.h>
39 #include <scsi/scsi.h>
40 #include <scsi/scsi_device.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_backend.h>
44 #include <target/target_core_fabric.h>
46 #include "target_core_internal.h"
47 #include "target_core_alua.h"
48 #include "target_core_pr.h"
49 #include "target_core_ua.h"
51 DEFINE_MUTEX(g_device_mutex
);
52 LIST_HEAD(g_device_list
);
54 static struct se_hba
*lun0_hba
;
55 /* not static, needed by tpg.c */
56 struct se_device
*g_lun0_dev
;
59 transport_lookup_cmd_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
61 struct se_lun
*se_lun
= NULL
;
62 struct se_session
*se_sess
= se_cmd
->se_sess
;
63 struct se_node_acl
*nacl
= se_sess
->se_node_acl
;
64 struct se_device
*dev
;
65 struct se_dev_entry
*deve
;
67 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
68 return TCM_NON_EXISTENT_LUN
;
71 deve
= target_nacl_find_deve(nacl
, unpacked_lun
);
73 atomic_long_inc(&deve
->total_cmds
);
75 if ((se_cmd
->data_direction
== DMA_TO_DEVICE
) &&
76 (deve
->lun_flags
& TRANSPORT_LUNFLAGS_READ_ONLY
)) {
77 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
78 " Access for 0x%08x\n",
79 se_cmd
->se_tfo
->get_fabric_name(),
82 return TCM_WRITE_PROTECTED
;
85 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
86 atomic_long_add(se_cmd
->data_length
,
88 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
89 atomic_long_add(se_cmd
->data_length
,
92 se_lun
= rcu_dereference(deve
->se_lun
);
93 se_cmd
->se_lun
= rcu_dereference(deve
->se_lun
);
94 se_cmd
->pr_res_key
= deve
->pr_res_key
;
95 se_cmd
->orig_fe_lun
= unpacked_lun
;
96 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
98 percpu_ref_get(&se_lun
->lun_ref
);
99 se_cmd
->lun_ref_active
= true;
105 * Use the se_portal_group->tpg_virt_lun0 to allow for
106 * REPORT_LUNS, et al to be returned when no active
107 * MappedLUN=0 exists for this Initiator Port.
109 if (unpacked_lun
!= 0) {
110 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
111 " Access for 0x%08x\n",
112 se_cmd
->se_tfo
->get_fabric_name(),
114 return TCM_NON_EXISTENT_LUN
;
117 * Force WRITE PROTECT for virtual LUN 0
119 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
120 (se_cmd
->data_direction
!= DMA_NONE
))
121 return TCM_WRITE_PROTECTED
;
123 se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
124 se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
125 se_cmd
->orig_fe_lun
= 0;
126 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
128 percpu_ref_get(&se_lun
->lun_ref
);
129 se_cmd
->lun_ref_active
= true;
132 /* Directly associate cmd with se_dev */
133 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
135 dev
= se_lun
->lun_se_dev
;
136 atomic_long_inc(&dev
->num_cmds
);
137 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
138 atomic_long_add(se_cmd
->data_length
, &dev
->write_bytes
);
139 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
140 atomic_long_add(se_cmd
->data_length
, &dev
->read_bytes
);
144 EXPORT_SYMBOL(transport_lookup_cmd_lun
);
146 int transport_lookup_tmr_lun(struct se_cmd
*se_cmd
, u32 unpacked_lun
)
148 struct se_dev_entry
*deve
;
149 struct se_lun
*se_lun
= NULL
;
150 struct se_session
*se_sess
= se_cmd
->se_sess
;
151 struct se_node_acl
*nacl
= se_sess
->se_node_acl
;
152 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
155 if (unpacked_lun
>= TRANSPORT_MAX_LUNS_PER_TPG
)
159 deve
= target_nacl_find_deve(nacl
, unpacked_lun
);
161 se_tmr
->tmr_lun
= rcu_dereference(deve
->se_lun
);
162 se_cmd
->se_lun
= rcu_dereference(deve
->se_lun
);
163 se_lun
= rcu_dereference(deve
->se_lun
);
164 se_cmd
->pr_res_key
= deve
->pr_res_key
;
165 se_cmd
->orig_fe_lun
= unpacked_lun
;
170 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
171 " Access for 0x%08x\n",
172 se_cmd
->se_tfo
->get_fabric_name(),
177 /* Directly associate cmd with se_dev */
178 se_cmd
->se_dev
= se_lun
->lun_se_dev
;
179 se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
181 spin_lock_irqsave(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
182 list_add_tail(&se_tmr
->tmr_list
, &se_tmr
->tmr_dev
->dev_tmr_list
);
183 spin_unlock_irqrestore(&se_tmr
->tmr_dev
->se_tmr_lock
, flags
);
187 EXPORT_SYMBOL(transport_lookup_tmr_lun
);
189 bool target_lun_is_rdonly(struct se_cmd
*cmd
)
191 struct se_session
*se_sess
= cmd
->se_sess
;
192 struct se_dev_entry
*deve
;
195 if (cmd
->se_lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
)
199 deve
= target_nacl_find_deve(se_sess
->se_node_acl
, cmd
->orig_fe_lun
);
200 ret
= (deve
&& deve
->lun_flags
& TRANSPORT_LUNFLAGS_READ_ONLY
);
205 EXPORT_SYMBOL(target_lun_is_rdonly
);
208 * This function is called from core_scsi3_emulate_pro_register_and_move()
209 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
210 * when a matching rtpi is found.
212 struct se_dev_entry
*core_get_se_deve_from_rtpi(
213 struct se_node_acl
*nacl
,
216 struct se_dev_entry
*deve
;
218 struct se_portal_group
*tpg
= nacl
->se_tpg
;
221 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
222 lun
= rcu_dereference(deve
->se_lun
);
224 pr_err("%s device entries device pointer is"
225 " NULL, but Initiator has access.\n",
226 tpg
->se_tpg_tfo
->get_fabric_name());
229 if (lun
->lun_rtpi
!= rtpi
)
232 kref_get(&deve
->pr_kref
);
242 void core_free_device_list_for_node(
243 struct se_node_acl
*nacl
,
244 struct se_portal_group
*tpg
)
246 struct se_dev_entry
*deve
;
248 mutex_lock(&nacl
->lun_entry_mutex
);
249 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
250 struct se_lun
*lun
= rcu_dereference_check(deve
->se_lun
,
251 lockdep_is_held(&nacl
->lun_entry_mutex
));
252 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
254 mutex_unlock(&nacl
->lun_entry_mutex
);
257 void core_update_device_list_access(
260 struct se_node_acl
*nacl
)
262 struct se_dev_entry
*deve
;
264 mutex_lock(&nacl
->lun_entry_mutex
);
265 deve
= target_nacl_find_deve(nacl
, mapped_lun
);
267 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
268 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
269 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
271 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
272 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
275 mutex_unlock(&nacl
->lun_entry_mutex
);
279 * Called with rcu_read_lock or nacl->device_list_lock held.
281 struct se_dev_entry
*target_nacl_find_deve(struct se_node_acl
*nacl
, u32 mapped_lun
)
283 struct se_dev_entry
*deve
;
285 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
)
286 if (deve
->mapped_lun
== mapped_lun
)
291 EXPORT_SYMBOL(target_nacl_find_deve
);
293 void target_pr_kref_release(struct kref
*kref
)
295 struct se_dev_entry
*deve
= container_of(kref
, struct se_dev_entry
,
297 complete(&deve
->pr_comp
);
300 /* core_enable_device_list_for_node():
304 int core_enable_device_list_for_node(
306 struct se_lun_acl
*lun_acl
,
309 struct se_node_acl
*nacl
,
310 struct se_portal_group
*tpg
)
312 struct se_port
*port
= lun
->lun_sep
;
313 struct se_dev_entry
*orig
, *new;
315 new = kzalloc(sizeof(*new), GFP_KERNEL
);
317 pr_err("Unable to allocate se_dev_entry memory\n");
321 atomic_set(&new->ua_count
, 0);
322 spin_lock_init(&new->ua_lock
);
323 INIT_LIST_HEAD(&new->alua_port_list
);
324 INIT_LIST_HEAD(&new->ua_list
);
326 new->mapped_lun
= mapped_lun
;
327 kref_init(&new->pr_kref
);
328 init_completion(&new->pr_comp
);
330 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
)
331 new->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
333 new->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
335 new->creation_time
= get_jiffies_64();
338 mutex_lock(&nacl
->lun_entry_mutex
);
339 orig
= target_nacl_find_deve(nacl
, mapped_lun
);
340 if (orig
&& orig
->se_lun
) {
341 struct se_lun
*orig_lun
= rcu_dereference_check(orig
->se_lun
,
342 lockdep_is_held(&nacl
->lun_entry_mutex
));
344 if (orig_lun
!= lun
) {
345 pr_err("Existing orig->se_lun doesn't match new lun"
346 " for dynamic -> explicit NodeACL conversion:"
347 " %s\n", nacl
->initiatorname
);
348 mutex_unlock(&nacl
->lun_entry_mutex
);
352 BUG_ON(orig
->se_lun_acl
!= NULL
);
354 rcu_assign_pointer(new->se_lun
, lun
);
355 rcu_assign_pointer(new->se_lun_acl
, lun_acl
);
356 hlist_del_rcu(&orig
->link
);
357 hlist_add_head_rcu(&new->link
, &nacl
->lun_entry_hlist
);
358 mutex_unlock(&nacl
->lun_entry_mutex
);
360 spin_lock_bh(&port
->sep_alua_lock
);
361 list_del(&orig
->alua_port_list
);
362 list_add_tail(&new->alua_port_list
, &port
->sep_alua_list
);
363 spin_unlock_bh(&port
->sep_alua_lock
);
365 kref_put(&orig
->pr_kref
, target_pr_kref_release
);
366 wait_for_completion(&orig
->pr_comp
);
368 kfree_rcu(orig
, rcu_head
);
372 rcu_assign_pointer(new->se_lun
, lun
);
373 rcu_assign_pointer(new->se_lun_acl
, lun_acl
);
374 hlist_add_head_rcu(&new->link
, &nacl
->lun_entry_hlist
);
375 mutex_unlock(&nacl
->lun_entry_mutex
);
377 spin_lock_bh(&port
->sep_alua_lock
);
378 list_add_tail(&new->alua_port_list
, &port
->sep_alua_list
);
379 spin_unlock_bh(&port
->sep_alua_lock
);
385 * Called with se_node_acl->lun_entry_mutex held.
387 void core_disable_device_list_for_node(
389 struct se_dev_entry
*orig
,
390 struct se_node_acl
*nacl
,
391 struct se_portal_group
*tpg
)
393 struct se_port
*port
= lun
->lun_sep
;
395 * If the MappedLUN entry is being disabled, the entry in
396 * port->sep_alua_list must be removed now before clearing the
397 * struct se_dev_entry pointers below as logic in
398 * core_alua_do_transition_tg_pt() depends on these being present.
400 * deve->se_lun_acl will be NULL for demo-mode created LUNs
401 * that have not been explicitly converted to MappedLUNs ->
402 * struct se_lun_acl, but we remove deve->alua_port_list from
403 * port->sep_alua_list. This also means that active UAs and
404 * NodeACL context specific PR metadata for demo-mode
405 * MappedLUN *deve will be released below..
407 spin_lock_bh(&port
->sep_alua_lock
);
408 list_del(&orig
->alua_port_list
);
409 spin_unlock_bh(&port
->sep_alua_lock
);
411 * Disable struct se_dev_entry LUN ACL mapping
413 core_scsi3_ua_release_all(orig
);
415 hlist_del_rcu(&orig
->link
);
416 clear_bit(DEF_PR_REG_ACTIVE
, &orig
->deve_flags
);
417 rcu_assign_pointer(orig
->se_lun
, NULL
);
418 rcu_assign_pointer(orig
->se_lun_acl
, NULL
);
420 orig
->creation_time
= 0;
421 orig
->attach_count
--;
423 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
424 * or REGISTER_AND_MOVE PR operation to complete.
426 kref_put(&orig
->pr_kref
, target_pr_kref_release
);
427 wait_for_completion(&orig
->pr_comp
);
429 kfree_rcu(orig
, rcu_head
);
431 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
434 /* core_clear_lun_from_tpg():
438 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
440 struct se_node_acl
*nacl
;
441 struct se_dev_entry
*deve
;
443 mutex_lock(&tpg
->acl_node_mutex
);
444 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
446 mutex_lock(&nacl
->lun_entry_mutex
);
447 hlist_for_each_entry_rcu(deve
, &nacl
->lun_entry_hlist
, link
) {
448 struct se_lun
*tmp_lun
= rcu_dereference_check(deve
->se_lun
,
449 lockdep_is_held(&nacl
->lun_entry_mutex
));
454 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
456 mutex_unlock(&nacl
->lun_entry_mutex
);
458 mutex_unlock(&tpg
->acl_node_mutex
);
461 static struct se_port
*core_alloc_port(struct se_device
*dev
)
463 struct se_port
*port
, *port_tmp
;
465 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
467 pr_err("Unable to allocate struct se_port\n");
468 return ERR_PTR(-ENOMEM
);
470 INIT_LIST_HEAD(&port
->sep_alua_list
);
471 INIT_LIST_HEAD(&port
->sep_list
);
472 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
473 spin_lock_init(&port
->sep_alua_lock
);
474 mutex_init(&port
->sep_tg_pt_md_mutex
);
476 spin_lock(&dev
->se_port_lock
);
477 if (dev
->dev_port_count
== 0x0000ffff) {
478 pr_warn("Reached dev->dev_port_count =="
480 spin_unlock(&dev
->se_port_lock
);
481 return ERR_PTR(-ENOSPC
);
485 * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
486 * Here is the table from spc4r17 section 7.7.3.8.
488 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
492 * 1h Relative port 1, historically known as port A
493 * 2h Relative port 2, historically known as port B
494 * 3h to FFFFh Relative port 3 through 65 535
496 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
500 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
502 * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
505 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
508 spin_unlock(&dev
->se_port_lock
);
513 static void core_export_port(
514 struct se_device
*dev
,
515 struct se_portal_group
*tpg
,
516 struct se_port
*port
,
519 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
521 spin_lock(&dev
->se_port_lock
);
522 spin_lock(&lun
->lun_sep_lock
);
526 spin_unlock(&lun
->lun_sep_lock
);
528 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
529 spin_unlock(&dev
->se_port_lock
);
531 if (!(dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
) &&
532 !(dev
->se_hba
->hba_flags
& HBA_FLAGS_INTERNAL_USE
)) {
533 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
534 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
535 pr_err("Unable to allocate t10_alua_tg_pt"
539 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
540 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
541 dev
->t10_alua
.default_tg_pt_gp
);
542 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
543 pr_debug("%s/%s: Adding to default ALUA Target Port"
544 " Group: alua/default_tg_pt_gp\n",
545 dev
->transport
->name
, tpg
->se_tpg_tfo
->get_fabric_name());
548 dev
->dev_port_count
++;
549 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFIER */
553 * Called with struct se_device->se_port_lock spinlock held.
555 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
556 __releases(&dev
->se_port_lock
) __acquires(&dev
->se_port_lock
)
559 * Wait for any port reference for PR ALL_TG_PT=1 operation
560 * to complete in __core_scsi3_alloc_registration()
562 spin_unlock(&dev
->se_port_lock
);
563 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
565 spin_lock(&dev
->se_port_lock
);
567 core_alua_free_tg_pt_gp_mem(port
);
569 list_del(&port
->sep_list
);
570 dev
->dev_port_count
--;
575 struct se_device
*dev
,
576 struct se_portal_group
*tpg
,
579 struct se_hba
*hba
= dev
->se_hba
;
580 struct se_port
*port
;
582 port
= core_alloc_port(dev
);
584 return PTR_ERR(port
);
586 lun
->lun_index
= dev
->dev_index
;
587 lun
->lun_se_dev
= dev
;
588 lun
->lun_rtpi
= port
->sep_rtpi
;
590 spin_lock(&hba
->device_lock
);
592 spin_unlock(&hba
->device_lock
);
594 core_export_port(dev
, tpg
, port
, lun
);
598 void core_dev_unexport(
599 struct se_device
*dev
,
600 struct se_portal_group
*tpg
,
603 struct se_hba
*hba
= dev
->se_hba
;
604 struct se_port
*port
= lun
->lun_sep
;
606 spin_lock(&lun
->lun_sep_lock
);
607 if (lun
->lun_se_dev
== NULL
) {
608 spin_unlock(&lun
->lun_sep_lock
);
611 spin_unlock(&lun
->lun_sep_lock
);
613 spin_lock(&dev
->se_port_lock
);
614 core_release_port(dev
, port
);
615 spin_unlock(&dev
->se_port_lock
);
617 spin_lock(&hba
->device_lock
);
619 spin_unlock(&hba
->device_lock
);
622 lun
->lun_se_dev
= NULL
;
625 static void se_release_vpd_for_dev(struct se_device
*dev
)
627 struct t10_vpd
*vpd
, *vpd_tmp
;
629 spin_lock(&dev
->t10_wwn
.t10_vpd_lock
);
630 list_for_each_entry_safe(vpd
, vpd_tmp
,
631 &dev
->t10_wwn
.t10_vpd_list
, vpd_list
) {
632 list_del(&vpd
->vpd_list
);
635 spin_unlock(&dev
->t10_wwn
.t10_vpd_lock
);
638 static u32
se_dev_align_max_sectors(u32 max_sectors
, u32 block_size
)
640 u32 aligned_max_sectors
;
643 * Limit max_sectors to a PAGE_SIZE aligned value for modern
644 * transport_allocate_data_tasks() operation.
646 alignment
= max(1ul, PAGE_SIZE
/ block_size
);
647 aligned_max_sectors
= rounddown(max_sectors
, alignment
);
649 if (max_sectors
!= aligned_max_sectors
)
650 pr_info("Rounding down aligned max_sectors from %u to %u\n",
651 max_sectors
, aligned_max_sectors
);
653 return aligned_max_sectors
;
656 bool se_dev_check_wce(struct se_device
*dev
)
660 if (dev
->transport
->get_write_cache
)
661 wce
= dev
->transport
->get_write_cache(dev
);
662 else if (dev
->dev_attrib
.emulate_write_cache
> 0)
668 int se_dev_set_max_unmap_lba_count(
669 struct se_device
*dev
,
670 u32 max_unmap_lba_count
)
672 dev
->dev_attrib
.max_unmap_lba_count
= max_unmap_lba_count
;
673 pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
674 dev
, dev
->dev_attrib
.max_unmap_lba_count
);
677 EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count
);
679 int se_dev_set_max_unmap_block_desc_count(
680 struct se_device
*dev
,
681 u32 max_unmap_block_desc_count
)
683 dev
->dev_attrib
.max_unmap_block_desc_count
=
684 max_unmap_block_desc_count
;
685 pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
686 dev
, dev
->dev_attrib
.max_unmap_block_desc_count
);
689 EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count
);
691 int se_dev_set_unmap_granularity(
692 struct se_device
*dev
,
693 u32 unmap_granularity
)
695 dev
->dev_attrib
.unmap_granularity
= unmap_granularity
;
696 pr_debug("dev[%p]: Set unmap_granularity: %u\n",
697 dev
, dev
->dev_attrib
.unmap_granularity
);
700 EXPORT_SYMBOL(se_dev_set_unmap_granularity
);
702 int se_dev_set_unmap_granularity_alignment(
703 struct se_device
*dev
,
704 u32 unmap_granularity_alignment
)
706 dev
->dev_attrib
.unmap_granularity_alignment
= unmap_granularity_alignment
;
707 pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
708 dev
, dev
->dev_attrib
.unmap_granularity_alignment
);
711 EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment
);
713 int se_dev_set_max_write_same_len(
714 struct se_device
*dev
,
715 u32 max_write_same_len
)
717 dev
->dev_attrib
.max_write_same_len
= max_write_same_len
;
718 pr_debug("dev[%p]: Set max_write_same_len: %u\n",
719 dev
, dev
->dev_attrib
.max_write_same_len
);
722 EXPORT_SYMBOL(se_dev_set_max_write_same_len
);
724 static void dev_set_t10_wwn_model_alias(struct se_device
*dev
)
726 const char *configname
;
728 configname
= config_item_name(&dev
->dev_group
.cg_item
);
729 if (strlen(configname
) >= 16) {
730 pr_warn("dev[%p]: Backstore name '%s' is too long for "
731 "INQUIRY_MODEL, truncating to 16 bytes\n", dev
,
734 snprintf(&dev
->t10_wwn
.model
[0], 16, "%s", configname
);
737 int se_dev_set_emulate_model_alias(struct se_device
*dev
, int flag
)
739 if (dev
->export_count
) {
740 pr_err("dev[%p]: Unable to change model alias"
741 " while export_count is %d\n",
742 dev
, dev
->export_count
);
746 if (flag
!= 0 && flag
!= 1) {
747 pr_err("Illegal value %d\n", flag
);
752 dev_set_t10_wwn_model_alias(dev
);
754 strncpy(&dev
->t10_wwn
.model
[0],
755 dev
->transport
->inquiry_prod
, 16);
757 dev
->dev_attrib
.emulate_model_alias
= flag
;
761 EXPORT_SYMBOL(se_dev_set_emulate_model_alias
);
763 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
765 printk_once(KERN_WARNING
766 "ignoring deprecated emulate_dpo attribute\n");
769 EXPORT_SYMBOL(se_dev_set_emulate_dpo
);
771 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
773 if (flag
!= 0 && flag
!= 1) {
774 pr_err("Illegal value %d\n", flag
);
778 dev
->transport
->get_write_cache
) {
779 pr_warn("emulate_fua_write not supported for this device, ignoring\n");
782 if (dev
->export_count
) {
783 pr_err("emulate_fua_write cannot be changed with active"
784 " exports: %d\n", dev
->export_count
);
787 dev
->dev_attrib
.emulate_fua_write
= flag
;
788 pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
789 dev
, dev
->dev_attrib
.emulate_fua_write
);
792 EXPORT_SYMBOL(se_dev_set_emulate_fua_write
);
794 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
796 printk_once(KERN_WARNING
797 "ignoring deprecated emulate_fua_read attribute\n");
800 EXPORT_SYMBOL(se_dev_set_emulate_fua_read
);
802 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
804 if (flag
!= 0 && flag
!= 1) {
805 pr_err("Illegal value %d\n", flag
);
809 dev
->transport
->get_write_cache
) {
810 pr_err("emulate_write_cache not supported for this device\n");
813 if (dev
->export_count
) {
814 pr_err("emulate_write_cache cannot be changed with active"
815 " exports: %d\n", dev
->export_count
);
818 dev
->dev_attrib
.emulate_write_cache
= flag
;
819 pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
820 dev
, dev
->dev_attrib
.emulate_write_cache
);
823 EXPORT_SYMBOL(se_dev_set_emulate_write_cache
);
825 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
827 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
828 pr_err("Illegal value %d\n", flag
);
832 if (dev
->export_count
) {
833 pr_err("dev[%p]: Unable to change SE Device"
834 " UA_INTRLCK_CTRL while export_count is %d\n",
835 dev
, dev
->export_count
);
838 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= flag
;
839 pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
840 dev
, dev
->dev_attrib
.emulate_ua_intlck_ctrl
);
844 EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl
);
846 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
848 if ((flag
!= 0) && (flag
!= 1)) {
849 pr_err("Illegal value %d\n", flag
);
853 if (dev
->export_count
) {
854 pr_err("dev[%p]: Unable to change SE Device TAS while"
855 " export_count is %d\n",
856 dev
, dev
->export_count
);
859 dev
->dev_attrib
.emulate_tas
= flag
;
860 pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
861 dev
, (dev
->dev_attrib
.emulate_tas
) ? "Enabled" : "Disabled");
865 EXPORT_SYMBOL(se_dev_set_emulate_tas
);
867 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
869 if ((flag
!= 0) && (flag
!= 1)) {
870 pr_err("Illegal value %d\n", flag
);
874 * We expect this value to be non-zero when generic Block Layer
875 * Discard supported is detected iblock_create_virtdevice().
877 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
878 pr_err("Generic Block Discard not supported\n");
882 dev
->dev_attrib
.emulate_tpu
= flag
;
883 pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
887 EXPORT_SYMBOL(se_dev_set_emulate_tpu
);
889 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
891 if ((flag
!= 0) && (flag
!= 1)) {
892 pr_err("Illegal value %d\n", flag
);
896 * We expect this value to be non-zero when generic Block Layer
897 * Discard supported is detected iblock_create_virtdevice().
899 if (flag
&& !dev
->dev_attrib
.max_unmap_block_desc_count
) {
900 pr_err("Generic Block Discard not supported\n");
904 dev
->dev_attrib
.emulate_tpws
= flag
;
905 pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
909 EXPORT_SYMBOL(se_dev_set_emulate_tpws
);
911 int se_dev_set_emulate_caw(struct se_device
*dev
, int flag
)
913 if (flag
!= 0 && flag
!= 1) {
914 pr_err("Illegal value %d\n", flag
);
917 dev
->dev_attrib
.emulate_caw
= flag
;
918 pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
923 EXPORT_SYMBOL(se_dev_set_emulate_caw
);
925 int se_dev_set_emulate_3pc(struct se_device
*dev
, int flag
)
927 if (flag
!= 0 && flag
!= 1) {
928 pr_err("Illegal value %d\n", flag
);
931 dev
->dev_attrib
.emulate_3pc
= flag
;
932 pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
937 EXPORT_SYMBOL(se_dev_set_emulate_3pc
);
939 int se_dev_set_pi_prot_type(struct se_device
*dev
, int flag
)
941 int rc
, old_prot
= dev
->dev_attrib
.pi_prot_type
;
943 if (flag
!= 0 && flag
!= 1 && flag
!= 2 && flag
!= 3) {
944 pr_err("Illegal value %d for pi_prot_type\n", flag
);
948 pr_err("DIF TYPE2 protection currently not supported\n");
951 if (dev
->dev_attrib
.hw_pi_prot_type
) {
952 pr_warn("DIF protection enabled on underlying hardware,"
956 if (!dev
->transport
->init_prot
|| !dev
->transport
->free_prot
) {
957 /* 0 is only allowed value for non-supporting backends */
961 pr_err("DIF protection not supported by backend: %s\n",
962 dev
->transport
->name
);
965 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
966 pr_err("DIF protection requires device to be configured\n");
969 if (dev
->export_count
) {
970 pr_err("dev[%p]: Unable to change SE Device PROT type while"
971 " export_count is %d\n", dev
, dev
->export_count
);
975 dev
->dev_attrib
.pi_prot_type
= flag
;
977 if (flag
&& !old_prot
) {
978 rc
= dev
->transport
->init_prot(dev
);
980 dev
->dev_attrib
.pi_prot_type
= old_prot
;
984 } else if (!flag
&& old_prot
) {
985 dev
->transport
->free_prot(dev
);
987 pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev
, flag
);
991 EXPORT_SYMBOL(se_dev_set_pi_prot_type
);
993 int se_dev_set_pi_prot_format(struct se_device
*dev
, int flag
)
1001 pr_err("Illegal value %d for pi_prot_format\n", flag
);
1004 if (!dev
->transport
->format_prot
) {
1005 pr_err("DIF protection format not supported by backend %s\n",
1006 dev
->transport
->name
);
1009 if (!(dev
->dev_flags
& DF_CONFIGURED
)) {
1010 pr_err("DIF protection format requires device to be configured\n");
1013 if (dev
->export_count
) {
1014 pr_err("dev[%p]: Unable to format SE Device PROT type while"
1015 " export_count is %d\n", dev
, dev
->export_count
);
1019 rc
= dev
->transport
->format_prot(dev
);
1023 pr_debug("dev[%p]: SE Device Protection Format complete\n", dev
);
1027 EXPORT_SYMBOL(se_dev_set_pi_prot_format
);
1029 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1031 if ((flag
!= 0) && (flag
!= 1)) {
1032 pr_err("Illegal value %d\n", flag
);
1035 dev
->dev_attrib
.enforce_pr_isids
= flag
;
1036 pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1037 (dev
->dev_attrib
.enforce_pr_isids
) ? "Enabled" : "Disabled");
1040 EXPORT_SYMBOL(se_dev_set_enforce_pr_isids
);
1042 int se_dev_set_force_pr_aptpl(struct se_device
*dev
, int flag
)
1044 if ((flag
!= 0) && (flag
!= 1)) {
1045 printk(KERN_ERR
"Illegal value %d\n", flag
);
1048 if (dev
->export_count
) {
1049 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1050 " export_count is %d\n", dev
, dev
->export_count
);
1054 dev
->dev_attrib
.force_pr_aptpl
= flag
;
1055 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev
, flag
);
1058 EXPORT_SYMBOL(se_dev_set_force_pr_aptpl
);
1060 int se_dev_set_is_nonrot(struct se_device
*dev
, int flag
)
1062 if ((flag
!= 0) && (flag
!= 1)) {
1063 printk(KERN_ERR
"Illegal value %d\n", flag
);
1066 dev
->dev_attrib
.is_nonrot
= flag
;
1067 pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
1071 EXPORT_SYMBOL(se_dev_set_is_nonrot
);
1073 int se_dev_set_emulate_rest_reord(struct se_device
*dev
, int flag
)
1076 printk(KERN_ERR
"dev[%p]: SE Device emulation of restricted"
1077 " reordering not implemented\n", dev
);
1080 dev
->dev_attrib
.emulate_rest_reord
= flag
;
1081 pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev
, flag
);
1084 EXPORT_SYMBOL(se_dev_set_emulate_rest_reord
);
1087 * Note, this can only be called on unexported SE Device Object.
1089 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1091 if (dev
->export_count
) {
1092 pr_err("dev[%p]: Unable to change SE Device TCQ while"
1093 " export_count is %d\n",
1094 dev
, dev
->export_count
);
1098 pr_err("dev[%p]: Illegal ZERO value for queue"
1103 if (queue_depth
> dev
->dev_attrib
.queue_depth
) {
1104 if (queue_depth
> dev
->dev_attrib
.hw_queue_depth
) {
1105 pr_err("dev[%p]: Passed queue_depth:"
1106 " %u exceeds TCM/SE_Device MAX"
1107 " TCQ: %u\n", dev
, queue_depth
,
1108 dev
->dev_attrib
.hw_queue_depth
);
1112 dev
->dev_attrib
.queue_depth
= dev
->queue_depth
= queue_depth
;
1113 pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
1117 EXPORT_SYMBOL(se_dev_set_queue_depth
);
1119 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1121 if (dev
->export_count
) {
1122 pr_err("dev[%p]: Unable to change SE Device"
1123 " optimal_sectors while export_count is %d\n",
1124 dev
, dev
->export_count
);
1127 if (optimal_sectors
> dev
->dev_attrib
.hw_max_sectors
) {
1128 pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
1129 " greater than hw_max_sectors: %u\n", dev
,
1130 optimal_sectors
, dev
->dev_attrib
.hw_max_sectors
);
1134 dev
->dev_attrib
.optimal_sectors
= optimal_sectors
;
1135 pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
1136 dev
, optimal_sectors
);
1139 EXPORT_SYMBOL(se_dev_set_optimal_sectors
);
1141 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1143 if (dev
->export_count
) {
1144 pr_err("dev[%p]: Unable to change SE Device block_size"
1145 " while export_count is %d\n",
1146 dev
, dev
->export_count
);
1150 if ((block_size
!= 512) &&
1151 (block_size
!= 1024) &&
1152 (block_size
!= 2048) &&
1153 (block_size
!= 4096)) {
1154 pr_err("dev[%p]: Illegal value for block_device: %u"
1155 " for SE device, must be 512, 1024, 2048 or 4096\n",
1160 dev
->dev_attrib
.block_size
= block_size
;
1161 pr_debug("dev[%p]: SE Device block_size changed to %u\n",
1164 if (dev
->dev_attrib
.max_bytes_per_io
)
1165 dev
->dev_attrib
.hw_max_sectors
=
1166 dev
->dev_attrib
.max_bytes_per_io
/ block_size
;
1170 EXPORT_SYMBOL(se_dev_set_block_size
);
1172 int core_dev_add_lun(
1173 struct se_portal_group
*tpg
,
1174 struct se_device
*dev
,
1179 rc
= core_tpg_add_lun(tpg
, lun
,
1180 TRANSPORT_LUNFLAGS_READ_WRITE
, dev
);
1184 pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1185 " CORE HBA: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1186 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1187 tpg
->se_tpg_tfo
->get_fabric_name(), dev
->se_hba
->hba_id
);
1189 * Update LUN maps for dynamically added initiators when
1190 * generate_node_acl is enabled.
1192 if (tpg
->se_tpg_tfo
->tpg_check_demo_mode(tpg
)) {
1193 struct se_node_acl
*acl
;
1195 mutex_lock(&tpg
->acl_node_mutex
);
1196 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1197 if (acl
->dynamic_node_acl
&&
1198 (!tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only
||
1199 !tpg
->se_tpg_tfo
->tpg_check_demo_mode_login_only(tpg
))) {
1200 core_tpg_add_node_to_devs(acl
, tpg
, lun
);
1203 mutex_unlock(&tpg
->acl_node_mutex
);
1209 /* core_dev_del_lun():
1213 void core_dev_del_lun(
1214 struct se_portal_group
*tpg
,
1217 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1218 " device object\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1219 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1220 tpg
->se_tpg_tfo
->get_fabric_name());
1222 core_tpg_remove_lun(tpg
, lun
);
1225 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1226 struct se_portal_group
*tpg
,
1227 struct se_node_acl
*nacl
,
1231 struct se_lun_acl
*lacl
;
1233 if (strlen(nacl
->initiatorname
) >= TRANSPORT_IQN_LEN
) {
1234 pr_err("%s InitiatorName exceeds maximum size.\n",
1235 tpg
->se_tpg_tfo
->get_fabric_name());
1239 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1241 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
1246 lacl
->mapped_lun
= mapped_lun
;
1247 lacl
->se_lun_nacl
= nacl
;
1248 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s",
1249 nacl
->initiatorname
);
1254 int core_dev_add_initiator_node_lun_acl(
1255 struct se_portal_group
*tpg
,
1256 struct se_lun_acl
*lacl
,
1260 struct se_node_acl
*nacl
= lacl
->se_lun_nacl
;
1265 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1266 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1267 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1271 if (core_enable_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1272 lun_access
, nacl
, tpg
) < 0)
1275 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1276 " InitiatorNode: %s\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1277 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
, lacl
->mapped_lun
,
1278 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1279 lacl
->initiatorname
);
1281 * Check to see if there are any existing persistent reservation APTPL
1282 * pre-registrations that need to be enabled for this LUN ACL..
1284 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, nacl
,
1289 int core_dev_del_initiator_node_lun_acl(
1290 struct se_portal_group
*tpg
,
1292 struct se_lun_acl
*lacl
)
1294 struct se_node_acl
*nacl
;
1295 struct se_dev_entry
*deve
;
1297 nacl
= lacl
->se_lun_nacl
;
1301 mutex_lock(&nacl
->lun_entry_mutex
);
1302 deve
= target_nacl_find_deve(nacl
, lacl
->mapped_lun
);
1304 core_disable_device_list_for_node(lun
, deve
, nacl
, tpg
);
1305 mutex_unlock(&nacl
->lun_entry_mutex
);
1307 pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1308 " InitiatorNode: %s Mapped LUN: %u\n",
1309 tpg
->se_tpg_tfo
->get_fabric_name(),
1310 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1311 lacl
->initiatorname
, lacl
->mapped_lun
);
1316 void core_dev_free_initiator_node_lun_acl(
1317 struct se_portal_group
*tpg
,
1318 struct se_lun_acl
*lacl
)
1320 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1321 " Mapped LUN: %u\n", tpg
->se_tpg_tfo
->get_fabric_name(),
1322 tpg
->se_tpg_tfo
->tpg_get_tag(tpg
),
1323 tpg
->se_tpg_tfo
->get_fabric_name(),
1324 lacl
->initiatorname
, lacl
->mapped_lun
);
1329 static void scsi_dump_inquiry(struct se_device
*dev
)
1331 struct t10_wwn
*wwn
= &dev
->t10_wwn
;
1335 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
1337 for (i
= 0; i
< 8; i
++)
1338 if (wwn
->vendor
[i
] >= 0x20)
1339 buf
[i
] = wwn
->vendor
[i
];
1343 pr_debug(" Vendor: %s\n", buf
);
1345 for (i
= 0; i
< 16; i
++)
1346 if (wwn
->model
[i
] >= 0x20)
1347 buf
[i
] = wwn
->model
[i
];
1351 pr_debug(" Model: %s\n", buf
);
1353 for (i
= 0; i
< 4; i
++)
1354 if (wwn
->revision
[i
] >= 0x20)
1355 buf
[i
] = wwn
->revision
[i
];
1359 pr_debug(" Revision: %s\n", buf
);
1361 device_type
= dev
->transport
->get_device_type(dev
);
1362 pr_debug(" Type: %s ", scsi_device_type(device_type
));
1365 struct se_device
*target_alloc_device(struct se_hba
*hba
, const char *name
)
1367 struct se_device
*dev
;
1368 struct se_lun
*xcopy_lun
;
1370 dev
= hba
->backend
->ops
->alloc_device(hba
, name
);
1374 dev
->dev_link_magic
= SE_DEV_LINK_MAGIC
;
1376 dev
->transport
= hba
->backend
->ops
;
1377 dev
->prot_length
= sizeof(struct se_dif_v1_tuple
);
1379 INIT_LIST_HEAD(&dev
->dev_list
);
1380 INIT_LIST_HEAD(&dev
->dev_sep_list
);
1381 INIT_LIST_HEAD(&dev
->dev_tmr_list
);
1382 INIT_LIST_HEAD(&dev
->delayed_cmd_list
);
1383 INIT_LIST_HEAD(&dev
->state_list
);
1384 INIT_LIST_HEAD(&dev
->qf_cmd_list
);
1385 INIT_LIST_HEAD(&dev
->g_dev_node
);
1386 spin_lock_init(&dev
->execute_task_lock
);
1387 spin_lock_init(&dev
->delayed_cmd_lock
);
1388 spin_lock_init(&dev
->dev_reservation_lock
);
1389 spin_lock_init(&dev
->se_port_lock
);
1390 spin_lock_init(&dev
->se_tmr_lock
);
1391 spin_lock_init(&dev
->qf_cmd_lock
);
1392 sema_init(&dev
->caw_sem
, 1);
1393 atomic_set(&dev
->dev_ordered_id
, 0);
1394 INIT_LIST_HEAD(&dev
->t10_wwn
.t10_vpd_list
);
1395 spin_lock_init(&dev
->t10_wwn
.t10_vpd_lock
);
1396 INIT_LIST_HEAD(&dev
->t10_pr
.registration_list
);
1397 INIT_LIST_HEAD(&dev
->t10_pr
.aptpl_reg_list
);
1398 spin_lock_init(&dev
->t10_pr
.registration_lock
);
1399 spin_lock_init(&dev
->t10_pr
.aptpl_reg_lock
);
1400 INIT_LIST_HEAD(&dev
->t10_alua
.tg_pt_gps_list
);
1401 spin_lock_init(&dev
->t10_alua
.tg_pt_gps_lock
);
1402 INIT_LIST_HEAD(&dev
->t10_alua
.lba_map_list
);
1403 spin_lock_init(&dev
->t10_alua
.lba_map_lock
);
1405 dev
->t10_wwn
.t10_dev
= dev
;
1406 dev
->t10_alua
.t10_dev
= dev
;
1408 dev
->dev_attrib
.da_dev
= dev
;
1409 dev
->dev_attrib
.emulate_model_alias
= DA_EMULATE_MODEL_ALIAS
;
1410 dev
->dev_attrib
.emulate_dpo
= 1;
1411 dev
->dev_attrib
.emulate_fua_write
= 1;
1412 dev
->dev_attrib
.emulate_fua_read
= 1;
1413 dev
->dev_attrib
.emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
1414 dev
->dev_attrib
.emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
1415 dev
->dev_attrib
.emulate_tas
= DA_EMULATE_TAS
;
1416 dev
->dev_attrib
.emulate_tpu
= DA_EMULATE_TPU
;
1417 dev
->dev_attrib
.emulate_tpws
= DA_EMULATE_TPWS
;
1418 dev
->dev_attrib
.emulate_caw
= DA_EMULATE_CAW
;
1419 dev
->dev_attrib
.emulate_3pc
= DA_EMULATE_3PC
;
1420 dev
->dev_attrib
.pi_prot_type
= TARGET_DIF_TYPE0_PROT
;
1421 dev
->dev_attrib
.enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
1422 dev
->dev_attrib
.force_pr_aptpl
= DA_FORCE_PR_APTPL
;
1423 dev
->dev_attrib
.is_nonrot
= DA_IS_NONROT
;
1424 dev
->dev_attrib
.emulate_rest_reord
= DA_EMULATE_REST_REORD
;
1425 dev
->dev_attrib
.max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
1426 dev
->dev_attrib
.max_unmap_block_desc_count
=
1427 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
1428 dev
->dev_attrib
.unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
1429 dev
->dev_attrib
.unmap_granularity_alignment
=
1430 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
1431 dev
->dev_attrib
.max_write_same_len
= DA_MAX_WRITE_SAME_LEN
;
1433 xcopy_lun
= &dev
->xcopy_lun
;
1434 xcopy_lun
->lun_se_dev
= dev
;
1435 init_completion(&xcopy_lun
->lun_shutdown_comp
);
1436 spin_lock_init(&xcopy_lun
->lun_sep_lock
);
1437 init_completion(&xcopy_lun
->lun_ref_comp
);
1442 int target_configure_device(struct se_device
*dev
)
1444 struct se_hba
*hba
= dev
->se_hba
;
1447 if (dev
->dev_flags
& DF_CONFIGURED
) {
1448 pr_err("se_dev->se_dev_ptr already set for storage"
1453 ret
= dev
->transport
->configure_device(dev
);
1457 * XXX: there is not much point to have two different values here..
1459 dev
->dev_attrib
.block_size
= dev
->dev_attrib
.hw_block_size
;
1460 dev
->dev_attrib
.queue_depth
= dev
->dev_attrib
.hw_queue_depth
;
1463 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
1465 dev
->dev_attrib
.hw_max_sectors
=
1466 se_dev_align_max_sectors(dev
->dev_attrib
.hw_max_sectors
,
1467 dev
->dev_attrib
.hw_block_size
);
1468 dev
->dev_attrib
.optimal_sectors
= dev
->dev_attrib
.hw_max_sectors
;
1470 dev
->dev_index
= scsi_get_new_index(SCSI_DEVICE_INDEX
);
1471 dev
->creation_time
= get_jiffies_64();
1473 ret
= core_setup_alua(dev
);
1478 * Startup the struct se_device processing thread
1480 dev
->tmr_wq
= alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM
| WQ_UNBOUND
, 1,
1481 dev
->transport
->name
);
1483 pr_err("Unable to create tmr workqueue for %s\n",
1484 dev
->transport
->name
);
1490 * Setup work_queue for QUEUE_FULL
1492 INIT_WORK(&dev
->qf_work_queue
, target_qf_do_work
);
1495 * Preload the initial INQUIRY const values if we are doing
1496 * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
1497 * passthrough because this is being provided by the backend LLD.
1499 if (!(dev
->transport
->transport_flags
& TRANSPORT_FLAG_PASSTHROUGH
)) {
1500 strncpy(&dev
->t10_wwn
.vendor
[0], "LIO-ORG", 8);
1501 strncpy(&dev
->t10_wwn
.model
[0],
1502 dev
->transport
->inquiry_prod
, 16);
1503 strncpy(&dev
->t10_wwn
.revision
[0],
1504 dev
->transport
->inquiry_rev
, 4);
1507 scsi_dump_inquiry(dev
);
1509 spin_lock(&hba
->device_lock
);
1511 spin_unlock(&hba
->device_lock
);
1513 mutex_lock(&g_device_mutex
);
1514 list_add_tail(&dev
->g_dev_node
, &g_device_list
);
1515 mutex_unlock(&g_device_mutex
);
1517 dev
->dev_flags
|= DF_CONFIGURED
;
1522 core_alua_free_lu_gp_mem(dev
);
1524 se_release_vpd_for_dev(dev
);
1528 void target_free_device(struct se_device
*dev
)
1530 struct se_hba
*hba
= dev
->se_hba
;
1532 WARN_ON(!list_empty(&dev
->dev_sep_list
));
1534 if (dev
->dev_flags
& DF_CONFIGURED
) {
1535 destroy_workqueue(dev
->tmr_wq
);
1537 mutex_lock(&g_device_mutex
);
1538 list_del(&dev
->g_dev_node
);
1539 mutex_unlock(&g_device_mutex
);
1541 spin_lock(&hba
->device_lock
);
1543 spin_unlock(&hba
->device_lock
);
1546 core_alua_free_lu_gp_mem(dev
);
1547 core_alua_set_lba_map(dev
, NULL
, 0, 0);
1548 core_scsi3_free_all_registrations(dev
);
1549 se_release_vpd_for_dev(dev
);
1551 if (dev
->transport
->free_prot
)
1552 dev
->transport
->free_prot(dev
);
1554 dev
->transport
->free_device(dev
);
1557 int core_dev_setup_virtual_lun0(void)
1560 struct se_device
*dev
;
1561 char buf
[] = "rd_pages=8,rd_nullio=1";
1564 hba
= core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE
);
1566 return PTR_ERR(hba
);
1568 dev
= target_alloc_device(hba
, "virt_lun0");
1574 hba
->backend
->ops
->set_configfs_dev_params(dev
, buf
, sizeof(buf
));
1576 ret
= target_configure_device(dev
);
1578 goto out_free_se_dev
;
1585 target_free_device(dev
);
1587 core_delete_hba(hba
);
1592 void core_dev_release_virtual_lun0(void)
1594 struct se_hba
*hba
= lun0_hba
;
1600 target_free_device(g_lun0_dev
);
1601 core_delete_hba(hba
);
1605 * Common CDB parsing for kernel and user passthrough.
1608 passthrough_parse_cdb(struct se_cmd
*cmd
,
1609 sense_reason_t (*exec_cmd
)(struct se_cmd
*cmd
))
1611 unsigned char *cdb
= cmd
->t_task_cdb
;
1614 * Clear a lun set in the cdb if the initiator talking to use spoke
1615 * and old standards version, as we can't assume the underlying device
1616 * won't choke up on it.
1619 case READ_10
: /* SBC - RDProtect */
1620 case READ_12
: /* SBC - RDProtect */
1621 case READ_16
: /* SBC - RDProtect */
1622 case SEND_DIAGNOSTIC
: /* SPC - SELF-TEST Code */
1623 case VERIFY
: /* SBC - VRProtect */
1624 case VERIFY_16
: /* SBC - VRProtect */
1625 case WRITE_VERIFY
: /* SBC - VRProtect */
1626 case WRITE_VERIFY_12
: /* SBC - VRProtect */
1627 case MAINTENANCE_IN
: /* SPC - Parameter Data Format for SA RTPG */
1630 cdb
[1] &= 0x1f; /* clear logical unit number */
1635 * For REPORT LUNS we always need to emulate the response, for everything
1638 if (cdb
[0] == REPORT_LUNS
) {
1639 cmd
->execute_cmd
= spc_emulate_report_luns
;
1640 return TCM_NO_SENSE
;
1643 /* Set DATA_CDB flag for ops that should have it */
1654 case WRITE_VERIFY_12
:
1655 case 0x8e: /* WRITE_VERIFY_16 */
1656 case COMPARE_AND_WRITE
:
1657 case XDWRITEREAD_10
:
1658 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1660 case VARIABLE_LENGTH_CMD
:
1661 switch (get_unaligned_be16(&cdb
[8])) {
1664 case 0x0c: /* WRITE_VERIFY_32 */
1665 case XDWRITEREAD_32
:
1666 cmd
->se_cmd_flags
|= SCF_SCSI_DATA_CDB
;
1671 cmd
->execute_cmd
= exec_cmd
;
1673 return TCM_NO_SENSE
;
1675 EXPORT_SYMBOL(passthrough_parse_cdb
);