1 /*******************************************************************************
2 * Filename: target_core_device.c (based on iscsi_target_device.c)
4 * This file contains the iSCSI Virtual Device and Disk Transport
5 * agnostic related functions.
7 * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
8 * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
9 * Copyright (c) 2007-2010 Rising Tide Systems
10 * Copyright (c) 2008-2010 Linux-iSCSI.org
12 * Nicholas A. Bellinger <nab@kernel.org>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 ******************************************************************************/
30 #include <linux/net.h>
31 #include <linux/string.h>
32 #include <linux/delay.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/kthread.h>
40 #include <scsi/scsi.h>
42 #include <target/target_core_base.h>
43 #include <target/target_core_device.h>
44 #include <target/target_core_tpg.h>
45 #include <target/target_core_transport.h>
46 #include <target/target_core_fabric_ops.h>
48 #include "target_core_alua.h"
49 #include "target_core_hba.h"
50 #include "target_core_pr.h"
51 #include "target_core_ua.h"
53 static void se_dev_start(struct se_device
*dev
);
54 static void se_dev_stop(struct se_device
*dev
);
56 int transport_get_lun_for_cmd(
57 struct se_cmd
*se_cmd
,
61 struct se_dev_entry
*deve
;
62 struct se_lun
*se_lun
= NULL
;
63 struct se_session
*se_sess
= SE_SESS(se_cmd
);
67 spin_lock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
68 deve
= se_cmd
->se_deve
=
69 &SE_NODE_ACL(se_sess
)->device_list
[unpacked_lun
];
70 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
73 deve
->total_bytes
+= se_cmd
->data_length
;
75 if (se_cmd
->data_direction
== DMA_TO_DEVICE
) {
77 TRANSPORT_LUNFLAGS_READ_ONLY
) {
81 deve
->write_bytes
+= se_cmd
->data_length
;
82 } else if (se_cmd
->data_direction
==
84 deve
->read_bytes
+= se_cmd
->data_length
;
89 se_lun
= se_cmd
->se_lun
= deve
->se_lun
;
90 se_cmd
->pr_res_key
= deve
->pr_res_key
;
91 se_cmd
->orig_fe_lun
= unpacked_lun
;
92 se_cmd
->se_orig_obj_ptr
= SE_LUN(se_cmd
)->lun_se_dev
;
93 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
96 spin_unlock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
100 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
101 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
102 printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
103 " Access for 0x%08x\n",
104 CMD_TFO(se_cmd
)->get_fabric_name(),
109 * Use the se_portal_group->tpg_virt_lun0 to allow for
110 * REPORT_LUNS, et al to be returned when no active
111 * MappedLUN=0 exists for this Initiator Port.
113 if (unpacked_lun
!= 0) {
114 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
115 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
116 printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
117 " Access for 0x%08x\n",
118 CMD_TFO(se_cmd
)->get_fabric_name(),
123 * Force WRITE PROTECT for virtual LUN 0
125 if ((se_cmd
->data_direction
!= DMA_FROM_DEVICE
) &&
126 (se_cmd
->data_direction
!= DMA_NONE
)) {
127 se_cmd
->scsi_sense_reason
= TCM_WRITE_PROTECTED
;
128 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
132 printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
133 CMD_TFO(se_cmd
)->get_fabric_name());
135 se_lun
= se_cmd
->se_lun
= &se_sess
->se_tpg
->tpg_virt_lun0
;
136 se_cmd
->orig_fe_lun
= 0;
137 se_cmd
->se_orig_obj_ptr
= SE_LUN(se_cmd
)->lun_se_dev
;
138 se_cmd
->se_cmd_flags
|= SCF_SE_LUN_CMD
;
142 * Determine if the struct se_lun is online.
144 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
145 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
146 se_cmd
->scsi_sense_reason
= TCM_NON_EXISTENT_LUN
;
147 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
152 struct se_device
*dev
= se_lun
->lun_se_dev
;
153 spin_lock(&dev
->stats_lock
);
155 if (se_cmd
->data_direction
== DMA_TO_DEVICE
)
156 dev
->write_bytes
+= se_cmd
->data_length
;
157 else if (se_cmd
->data_direction
== DMA_FROM_DEVICE
)
158 dev
->read_bytes
+= se_cmd
->data_length
;
159 spin_unlock(&dev
->stats_lock
);
163 * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
164 * for tracking state of struct se_cmds during LUN shutdown events.
166 spin_lock_irqsave(&se_lun
->lun_cmd_lock
, flags
);
167 list_add_tail(&se_cmd
->se_lun_list
, &se_lun
->lun_cmd_list
);
168 atomic_set(&T_TASK(se_cmd
)->transport_lun_active
, 1);
170 printk(KERN_INFO
"Adding ITT: 0x%08x to LUN LIST[%d]\n",
171 CMD_TFO(se_cmd
)->get_task_tag(se_cmd
), se_lun
->unpacked_lun
);
173 spin_unlock_irqrestore(&se_lun
->lun_cmd_lock
, flags
);
177 EXPORT_SYMBOL(transport_get_lun_for_cmd
);
179 int transport_get_lun_for_tmr(
180 struct se_cmd
*se_cmd
,
183 struct se_device
*dev
= NULL
;
184 struct se_dev_entry
*deve
;
185 struct se_lun
*se_lun
= NULL
;
186 struct se_session
*se_sess
= SE_SESS(se_cmd
);
187 struct se_tmr_req
*se_tmr
= se_cmd
->se_tmr_req
;
189 spin_lock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
190 deve
= se_cmd
->se_deve
=
191 &SE_NODE_ACL(se_sess
)->device_list
[unpacked_lun
];
192 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
193 se_lun
= se_cmd
->se_lun
= se_tmr
->tmr_lun
= deve
->se_lun
;
194 dev
= se_tmr
->tmr_dev
= se_lun
->lun_se_dev
;
195 se_cmd
->pr_res_key
= deve
->pr_res_key
;
196 se_cmd
->orig_fe_lun
= unpacked_lun
;
197 se_cmd
->se_orig_obj_ptr
= SE_LUN(se_cmd
)->lun_se_dev
;
198 /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
200 spin_unlock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
203 printk(KERN_INFO
"TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
204 " Access for 0x%08x\n",
205 CMD_TFO(se_cmd
)->get_fabric_name(),
207 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
211 * Determine if the struct se_lun is online.
213 /* #warning FIXME: Check for LUN_RESET + UNIT Attention */
214 if (se_dev_check_online(se_lun
->lun_se_dev
) != 0) {
215 se_cmd
->se_cmd_flags
|= SCF_SCSI_CDB_EXCEPTION
;
219 spin_lock(&dev
->se_tmr_lock
);
220 list_add_tail(&se_tmr
->tmr_list
, &dev
->dev_tmr_list
);
221 spin_unlock(&dev
->se_tmr_lock
);
225 EXPORT_SYMBOL(transport_get_lun_for_tmr
);
228 * This function is called from core_scsi3_emulate_pro_register_and_move()
229 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
230 * when a matching rtpi is found.
232 struct se_dev_entry
*core_get_se_deve_from_rtpi(
233 struct se_node_acl
*nacl
,
236 struct se_dev_entry
*deve
;
238 struct se_port
*port
;
239 struct se_portal_group
*tpg
= nacl
->se_tpg
;
242 spin_lock_irq(&nacl
->device_list_lock
);
243 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
244 deve
= &nacl
->device_list
[i
];
246 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
251 printk(KERN_ERR
"%s device entries device pointer is"
252 " NULL, but Initiator has access.\n",
253 TPG_TFO(tpg
)->get_fabric_name());
258 printk(KERN_ERR
"%s device entries device pointer is"
259 " NULL, but Initiator has access.\n",
260 TPG_TFO(tpg
)->get_fabric_name());
263 if (port
->sep_rtpi
!= rtpi
)
266 atomic_inc(&deve
->pr_ref_count
);
267 smp_mb__after_atomic_inc();
268 spin_unlock_irq(&nacl
->device_list_lock
);
272 spin_unlock_irq(&nacl
->device_list_lock
);
277 int core_free_device_list_for_node(
278 struct se_node_acl
*nacl
,
279 struct se_portal_group
*tpg
)
281 struct se_dev_entry
*deve
;
285 if (!nacl
->device_list
)
288 spin_lock_irq(&nacl
->device_list_lock
);
289 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
290 deve
= &nacl
->device_list
[i
];
292 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
296 printk(KERN_ERR
"%s device entries device pointer is"
297 " NULL, but Initiator has access.\n",
298 TPG_TFO(tpg
)->get_fabric_name());
303 spin_unlock_irq(&nacl
->device_list_lock
);
304 core_update_device_list_for_node(lun
, NULL
, deve
->mapped_lun
,
305 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
306 spin_lock_irq(&nacl
->device_list_lock
);
308 spin_unlock_irq(&nacl
->device_list_lock
);
310 kfree(nacl
->device_list
);
311 nacl
->device_list
= NULL
;
316 void core_dec_lacl_count(struct se_node_acl
*se_nacl
, struct se_cmd
*se_cmd
)
318 struct se_dev_entry
*deve
;
320 spin_lock_irq(&se_nacl
->device_list_lock
);
321 deve
= &se_nacl
->device_list
[se_cmd
->orig_fe_lun
];
323 spin_unlock_irq(&se_nacl
->device_list_lock
);
328 void core_update_device_list_access(
331 struct se_node_acl
*nacl
)
333 struct se_dev_entry
*deve
;
335 spin_lock_irq(&nacl
->device_list_lock
);
336 deve
= &nacl
->device_list
[mapped_lun
];
337 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
338 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
339 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
341 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
342 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
344 spin_unlock_irq(&nacl
->device_list_lock
);
349 /* core_update_device_list_for_node():
353 int core_update_device_list_for_node(
355 struct se_lun_acl
*lun_acl
,
358 struct se_node_acl
*nacl
,
359 struct se_portal_group
*tpg
,
362 struct se_port
*port
= lun
->lun_sep
;
363 struct se_dev_entry
*deve
= &nacl
->device_list
[mapped_lun
];
366 * If the MappedLUN entry is being disabled, the entry in
367 * port->sep_alua_list must be removed now before clearing the
368 * struct se_dev_entry pointers below as logic in
369 * core_alua_do_transition_tg_pt() depends on these being present.
373 * deve->se_lun_acl will be NULL for demo-mode created LUNs
374 * that have not been explictly concerted to MappedLUNs ->
375 * struct se_lun_acl, but we remove deve->alua_port_list from
376 * port->sep_alua_list. This also means that active UAs and
377 * NodeACL context specific PR metadata for demo-mode
378 * MappedLUN *deve will be released below..
380 spin_lock_bh(&port
->sep_alua_lock
);
381 list_del(&deve
->alua_port_list
);
382 spin_unlock_bh(&port
->sep_alua_lock
);
385 spin_lock_irq(&nacl
->device_list_lock
);
388 * Check if the call is handling demo mode -> explict LUN ACL
389 * transition. This transition must be for the same struct se_lun
390 * + mapped_lun that was setup in demo mode..
392 if (deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
) {
393 if (deve
->se_lun_acl
!= NULL
) {
394 printk(KERN_ERR
"struct se_dev_entry->se_lun_acl"
395 " already set for demo mode -> explict"
396 " LUN ACL transition\n");
397 spin_unlock_irq(&nacl
->device_list_lock
);
400 if (deve
->se_lun
!= lun
) {
401 printk(KERN_ERR
"struct se_dev_entry->se_lun does"
402 " match passed struct se_lun for demo mode"
403 " -> explict LUN ACL transition\n");
404 spin_unlock_irq(&nacl
->device_list_lock
);
407 deve
->se_lun_acl
= lun_acl
;
411 deve
->se_lun_acl
= lun_acl
;
412 deve
->mapped_lun
= mapped_lun
;
413 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
;
416 if (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) {
417 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_ONLY
;
418 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_WRITE
;
420 deve
->lun_flags
&= ~TRANSPORT_LUNFLAGS_READ_WRITE
;
421 deve
->lun_flags
|= TRANSPORT_LUNFLAGS_READ_ONLY
;
425 spin_unlock_irq(&nacl
->device_list_lock
);
428 deve
->creation_time
= get_jiffies_64();
429 deve
->attach_count
++;
430 spin_unlock_irq(&nacl
->device_list_lock
);
432 spin_lock_bh(&port
->sep_alua_lock
);
433 list_add_tail(&deve
->alua_port_list
, &port
->sep_alua_list
);
434 spin_unlock_bh(&port
->sep_alua_lock
);
439 * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
440 * PR operation to complete.
442 spin_unlock_irq(&nacl
->device_list_lock
);
443 while (atomic_read(&deve
->pr_ref_count
) != 0)
445 spin_lock_irq(&nacl
->device_list_lock
);
447 * Disable struct se_dev_entry LUN ACL mapping
449 core_scsi3_ua_release_all(deve
);
451 deve
->se_lun_acl
= NULL
;
453 deve
->creation_time
= 0;
454 deve
->attach_count
--;
455 spin_unlock_irq(&nacl
->device_list_lock
);
457 core_scsi3_free_pr_reg_from_nacl(lun
->lun_se_dev
, nacl
);
461 /* core_clear_lun_from_tpg():
465 void core_clear_lun_from_tpg(struct se_lun
*lun
, struct se_portal_group
*tpg
)
467 struct se_node_acl
*nacl
;
468 struct se_dev_entry
*deve
;
471 spin_lock_bh(&tpg
->acl_node_lock
);
472 list_for_each_entry(nacl
, &tpg
->acl_node_list
, acl_list
) {
473 spin_unlock_bh(&tpg
->acl_node_lock
);
475 spin_lock_irq(&nacl
->device_list_lock
);
476 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
477 deve
= &nacl
->device_list
[i
];
478 if (lun
!= deve
->se_lun
)
480 spin_unlock_irq(&nacl
->device_list_lock
);
482 core_update_device_list_for_node(lun
, NULL
,
483 deve
->mapped_lun
, TRANSPORT_LUNFLAGS_NO_ACCESS
,
486 spin_lock_irq(&nacl
->device_list_lock
);
488 spin_unlock_irq(&nacl
->device_list_lock
);
490 spin_lock_bh(&tpg
->acl_node_lock
);
492 spin_unlock_bh(&tpg
->acl_node_lock
);
497 static struct se_port
*core_alloc_port(struct se_device
*dev
)
499 struct se_port
*port
, *port_tmp
;
501 port
= kzalloc(sizeof(struct se_port
), GFP_KERNEL
);
503 printk(KERN_ERR
"Unable to allocate struct se_port\n");
506 INIT_LIST_HEAD(&port
->sep_alua_list
);
507 INIT_LIST_HEAD(&port
->sep_list
);
508 atomic_set(&port
->sep_tg_pt_secondary_offline
, 0);
509 spin_lock_init(&port
->sep_alua_lock
);
510 mutex_init(&port
->sep_tg_pt_md_mutex
);
512 spin_lock(&dev
->se_port_lock
);
513 if (dev
->dev_port_count
== 0x0000ffff) {
514 printk(KERN_WARNING
"Reached dev->dev_port_count =="
516 spin_unlock(&dev
->se_port_lock
);
521 * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
522 * Here is the table from spc4r17 section 7.7.3.8.
524 * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
528 * 1h Relative port 1, historically known as port A
529 * 2h Relative port 2, historically known as port B
530 * 3h to FFFFh Relative port 3 through 65 535
532 port
->sep_rtpi
= dev
->dev_rpti_counter
++;
533 if (!(port
->sep_rtpi
))
536 list_for_each_entry(port_tmp
, &dev
->dev_sep_list
, sep_list
) {
538 * Make sure RELATIVE TARGET PORT IDENTIFER is unique
541 if (port
->sep_rtpi
== port_tmp
->sep_rtpi
)
544 spin_unlock(&dev
->se_port_lock
);
549 static void core_export_port(
550 struct se_device
*dev
,
551 struct se_portal_group
*tpg
,
552 struct se_port
*port
,
555 struct se_subsystem_dev
*su_dev
= SU_DEV(dev
);
556 struct t10_alua_tg_pt_gp_member
*tg_pt_gp_mem
= NULL
;
558 spin_lock(&dev
->se_port_lock
);
559 spin_lock(&lun
->lun_sep_lock
);
563 spin_unlock(&lun
->lun_sep_lock
);
565 list_add_tail(&port
->sep_list
, &dev
->dev_sep_list
);
566 spin_unlock(&dev
->se_port_lock
);
568 if (T10_ALUA(su_dev
)->alua_type
== SPC3_ALUA_EMULATED
) {
569 tg_pt_gp_mem
= core_alua_allocate_tg_pt_gp_mem(port
);
570 if (IS_ERR(tg_pt_gp_mem
) || !tg_pt_gp_mem
) {
571 printk(KERN_ERR
"Unable to allocate t10_alua_tg_pt"
575 spin_lock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
576 __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem
,
577 T10_ALUA(su_dev
)->default_tg_pt_gp
);
578 spin_unlock(&tg_pt_gp_mem
->tg_pt_gp_mem_lock
);
579 printk(KERN_INFO
"%s/%s: Adding to default ALUA Target Port"
580 " Group: alua/default_tg_pt_gp\n",
581 TRANSPORT(dev
)->name
, TPG_TFO(tpg
)->get_fabric_name());
584 dev
->dev_port_count
++;
585 port
->sep_index
= port
->sep_rtpi
; /* RELATIVE TARGET PORT IDENTIFER */
589 * Called with struct se_device->se_port_lock spinlock held.
591 static void core_release_port(struct se_device
*dev
, struct se_port
*port
)
594 * Wait for any port reference for PR ALL_TG_PT=1 operation
595 * to complete in __core_scsi3_alloc_registration()
597 spin_unlock(&dev
->se_port_lock
);
598 if (atomic_read(&port
->sep_tg_pt_ref_cnt
))
600 spin_lock(&dev
->se_port_lock
);
602 core_alua_free_tg_pt_gp_mem(port
);
604 list_del(&port
->sep_list
);
605 dev
->dev_port_count
--;
612 struct se_device
*dev
,
613 struct se_portal_group
*tpg
,
616 struct se_port
*port
;
618 port
= core_alloc_port(dev
);
622 lun
->lun_se_dev
= dev
;
625 atomic_inc(&dev
->dev_export_obj
.obj_access_count
);
626 core_export_port(dev
, tpg
, port
, lun
);
630 void core_dev_unexport(
631 struct se_device
*dev
,
632 struct se_portal_group
*tpg
,
635 struct se_port
*port
= lun
->lun_sep
;
637 spin_lock(&lun
->lun_sep_lock
);
638 if (lun
->lun_se_dev
== NULL
) {
639 spin_unlock(&lun
->lun_sep_lock
);
642 spin_unlock(&lun
->lun_sep_lock
);
644 spin_lock(&dev
->se_port_lock
);
645 atomic_dec(&dev
->dev_export_obj
.obj_access_count
);
646 core_release_port(dev
, port
);
647 spin_unlock(&dev
->se_port_lock
);
650 lun
->lun_se_dev
= NULL
;
653 int transport_core_report_lun_response(struct se_cmd
*se_cmd
)
655 struct se_dev_entry
*deve
;
656 struct se_lun
*se_lun
;
657 struct se_session
*se_sess
= SE_SESS(se_cmd
);
658 struct se_task
*se_task
;
659 unsigned char *buf
= (unsigned char *)T_TASK(se_cmd
)->t_task_buf
;
660 u32 cdb_offset
= 0, lun_count
= 0, offset
= 8;
663 list_for_each_entry(se_task
, &T_TASK(se_cmd
)->t_task_list
, t_list
)
667 printk(KERN_ERR
"Unable to locate struct se_task for struct se_cmd\n");
668 return PYX_TRANSPORT_LU_COMM_FAILURE
;
672 * If no struct se_session pointer is present, this struct se_cmd is
673 * coming via a target_core_mod PASSTHROUGH op, and not through
674 * a $FABRIC_MOD. In that case, report LUN=0 only.
678 buf
[offset
++] = ((lun
>> 56) & 0xff);
679 buf
[offset
++] = ((lun
>> 48) & 0xff);
680 buf
[offset
++] = ((lun
>> 40) & 0xff);
681 buf
[offset
++] = ((lun
>> 32) & 0xff);
682 buf
[offset
++] = ((lun
>> 24) & 0xff);
683 buf
[offset
++] = ((lun
>> 16) & 0xff);
684 buf
[offset
++] = ((lun
>> 8) & 0xff);
685 buf
[offset
++] = (lun
& 0xff);
690 spin_lock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
691 for (i
= 0; i
< TRANSPORT_MAX_LUNS_PER_TPG
; i
++) {
692 deve
= &SE_NODE_ACL(se_sess
)->device_list
[i
];
693 if (!(deve
->lun_flags
& TRANSPORT_LUNFLAGS_INITIATOR_ACCESS
))
695 se_lun
= deve
->se_lun
;
697 * We determine the correct LUN LIST LENGTH even once we
698 * have reached the initial allocation length.
702 if ((cdb_offset
+ 8) >= se_cmd
->data_length
)
705 lun
= cpu_to_be64(CMD_TFO(se_cmd
)->pack_lun(deve
->mapped_lun
));
706 buf
[offset
++] = ((lun
>> 56) & 0xff);
707 buf
[offset
++] = ((lun
>> 48) & 0xff);
708 buf
[offset
++] = ((lun
>> 40) & 0xff);
709 buf
[offset
++] = ((lun
>> 32) & 0xff);
710 buf
[offset
++] = ((lun
>> 24) & 0xff);
711 buf
[offset
++] = ((lun
>> 16) & 0xff);
712 buf
[offset
++] = ((lun
>> 8) & 0xff);
713 buf
[offset
++] = (lun
& 0xff);
716 spin_unlock_irq(&SE_NODE_ACL(se_sess
)->device_list_lock
);
719 * See SPC3 r07, page 159.
723 buf
[0] = ((lun_count
>> 24) & 0xff);
724 buf
[1] = ((lun_count
>> 16) & 0xff);
725 buf
[2] = ((lun_count
>> 8) & 0xff);
726 buf
[3] = (lun_count
& 0xff);
728 return PYX_TRANSPORT_SENT_TO_TRANSPORT
;
731 /* se_release_device_for_hba():
735 void se_release_device_for_hba(struct se_device
*dev
)
737 struct se_hba
*hba
= dev
->se_hba
;
739 if ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
740 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) ||
741 (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
) ||
742 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) ||
743 (dev
->dev_status
& TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
))
747 kthread_stop(dev
->process_thread
);
748 if (dev
->transport
->free_device
)
749 dev
->transport
->free_device(dev
->dev_ptr
);
752 spin_lock(&hba
->device_lock
);
753 list_del(&dev
->dev_list
);
755 spin_unlock(&hba
->device_lock
);
757 core_scsi3_free_all_registrations(dev
);
758 se_release_vpd_for_dev(dev
);
760 kfree(dev
->dev_status_queue_obj
);
761 kfree(dev
->dev_queue_obj
);
767 void se_release_vpd_for_dev(struct se_device
*dev
)
769 struct t10_vpd
*vpd
, *vpd_tmp
;
771 spin_lock(&DEV_T10_WWN(dev
)->t10_vpd_lock
);
772 list_for_each_entry_safe(vpd
, vpd_tmp
,
773 &DEV_T10_WWN(dev
)->t10_vpd_list
, vpd_list
) {
774 list_del(&vpd
->vpd_list
);
777 spin_unlock(&DEV_T10_WWN(dev
)->t10_vpd_lock
);
783 * Called with struct se_hba->device_lock held.
785 void se_clear_dev_ports(struct se_device
*dev
)
787 struct se_hba
*hba
= dev
->se_hba
;
789 struct se_portal_group
*tpg
;
790 struct se_port
*sep
, *sep_tmp
;
792 spin_lock(&dev
->se_port_lock
);
793 list_for_each_entry_safe(sep
, sep_tmp
, &dev
->dev_sep_list
, sep_list
) {
794 spin_unlock(&dev
->se_port_lock
);
795 spin_unlock(&hba
->device_lock
);
799 spin_lock(&lun
->lun_sep_lock
);
800 if (lun
->lun_se_dev
== NULL
) {
801 spin_unlock(&lun
->lun_sep_lock
);
804 spin_unlock(&lun
->lun_sep_lock
);
806 core_dev_del_lun(tpg
, lun
->unpacked_lun
);
808 spin_lock(&hba
->device_lock
);
809 spin_lock(&dev
->se_port_lock
);
811 spin_unlock(&dev
->se_port_lock
);
816 /* se_free_virtual_device():
818 * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
820 int se_free_virtual_device(struct se_device
*dev
, struct se_hba
*hba
)
822 spin_lock(&hba
->device_lock
);
823 se_clear_dev_ports(dev
);
824 spin_unlock(&hba
->device_lock
);
826 core_alua_free_lu_gp_mem(dev
);
827 se_release_device_for_hba(dev
);
832 static void se_dev_start(struct se_device
*dev
)
834 struct se_hba
*hba
= dev
->se_hba
;
836 spin_lock(&hba
->device_lock
);
837 atomic_inc(&dev
->dev_obj
.obj_access_count
);
838 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 1) {
839 if (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
) {
840 dev
->dev_status
&= ~TRANSPORT_DEVICE_DEACTIVATED
;
841 dev
->dev_status
|= TRANSPORT_DEVICE_ACTIVATED
;
842 } else if (dev
->dev_status
&
843 TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
) {
845 ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
846 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
849 spin_unlock(&hba
->device_lock
);
852 static void se_dev_stop(struct se_device
*dev
)
854 struct se_hba
*hba
= dev
->se_hba
;
856 spin_lock(&hba
->device_lock
);
857 atomic_dec(&dev
->dev_obj
.obj_access_count
);
858 if (atomic_read(&dev
->dev_obj
.obj_access_count
) == 0) {
859 if (dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) {
860 dev
->dev_status
&= ~TRANSPORT_DEVICE_ACTIVATED
;
861 dev
->dev_status
|= TRANSPORT_DEVICE_DEACTIVATED
;
862 } else if (dev
->dev_status
&
863 TRANSPORT_DEVICE_OFFLINE_ACTIVATED
) {
864 dev
->dev_status
&= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED
;
865 dev
->dev_status
|= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED
;
868 spin_unlock(&hba
->device_lock
);
871 int se_dev_check_online(struct se_device
*dev
)
875 spin_lock_irq(&dev
->dev_status_lock
);
876 ret
= ((dev
->dev_status
& TRANSPORT_DEVICE_ACTIVATED
) ||
877 (dev
->dev_status
& TRANSPORT_DEVICE_DEACTIVATED
)) ? 0 : 1;
878 spin_unlock_irq(&dev
->dev_status_lock
);
883 int se_dev_check_shutdown(struct se_device
*dev
)
887 spin_lock_irq(&dev
->dev_status_lock
);
888 ret
= (dev
->dev_status
& TRANSPORT_DEVICE_SHUTDOWN
);
889 spin_unlock_irq(&dev
->dev_status_lock
);
894 void se_dev_set_default_attribs(
895 struct se_device
*dev
,
896 struct se_dev_limits
*dev_limits
)
898 struct queue_limits
*limits
= &dev_limits
->limits
;
900 DEV_ATTRIB(dev
)->emulate_dpo
= DA_EMULATE_DPO
;
901 DEV_ATTRIB(dev
)->emulate_fua_write
= DA_EMULATE_FUA_WRITE
;
902 DEV_ATTRIB(dev
)->emulate_fua_read
= DA_EMULATE_FUA_READ
;
903 DEV_ATTRIB(dev
)->emulate_write_cache
= DA_EMULATE_WRITE_CACHE
;
904 DEV_ATTRIB(dev
)->emulate_ua_intlck_ctrl
= DA_EMULATE_UA_INTLLCK_CTRL
;
905 DEV_ATTRIB(dev
)->emulate_tas
= DA_EMULATE_TAS
;
906 DEV_ATTRIB(dev
)->emulate_tpu
= DA_EMULATE_TPU
;
907 DEV_ATTRIB(dev
)->emulate_tpws
= DA_EMULATE_TPWS
;
908 DEV_ATTRIB(dev
)->emulate_reservations
= DA_EMULATE_RESERVATIONS
;
909 DEV_ATTRIB(dev
)->emulate_alua
= DA_EMULATE_ALUA
;
910 DEV_ATTRIB(dev
)->enforce_pr_isids
= DA_ENFORCE_PR_ISIDS
;
912 * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
913 * iblock_create_virtdevice() from struct queue_limits values
914 * if blk_queue_discard()==1
916 DEV_ATTRIB(dev
)->max_unmap_lba_count
= DA_MAX_UNMAP_LBA_COUNT
;
917 DEV_ATTRIB(dev
)->max_unmap_block_desc_count
=
918 DA_MAX_UNMAP_BLOCK_DESC_COUNT
;
919 DEV_ATTRIB(dev
)->unmap_granularity
= DA_UNMAP_GRANULARITY_DEFAULT
;
920 DEV_ATTRIB(dev
)->unmap_granularity_alignment
=
921 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT
;
923 * block_size is based on subsystem plugin dependent requirements.
925 DEV_ATTRIB(dev
)->hw_block_size
= limits
->logical_block_size
;
926 DEV_ATTRIB(dev
)->block_size
= limits
->logical_block_size
;
928 * max_sectors is based on subsystem plugin dependent requirements.
930 DEV_ATTRIB(dev
)->hw_max_sectors
= limits
->max_hw_sectors
;
931 DEV_ATTRIB(dev
)->max_sectors
= limits
->max_sectors
;
933 * Set optimal_sectors from max_sectors, which can be lowered via
936 DEV_ATTRIB(dev
)->optimal_sectors
= limits
->max_sectors
;
938 * queue_depth is based on subsystem plugin dependent requirements.
940 DEV_ATTRIB(dev
)->hw_queue_depth
= dev_limits
->hw_queue_depth
;
941 DEV_ATTRIB(dev
)->queue_depth
= dev_limits
->queue_depth
;
944 int se_dev_set_task_timeout(struct se_device
*dev
, u32 task_timeout
)
946 if (task_timeout
> DA_TASK_TIMEOUT_MAX
) {
947 printk(KERN_ERR
"dev[%p]: Passed task_timeout: %u larger then"
948 " DA_TASK_TIMEOUT_MAX\n", dev
, task_timeout
);
951 DEV_ATTRIB(dev
)->task_timeout
= task_timeout
;
952 printk(KERN_INFO
"dev[%p]: Set SE Device task_timeout: %u\n",
959 int se_dev_set_max_unmap_lba_count(
960 struct se_device
*dev
,
961 u32 max_unmap_lba_count
)
963 DEV_ATTRIB(dev
)->max_unmap_lba_count
= max_unmap_lba_count
;
964 printk(KERN_INFO
"dev[%p]: Set max_unmap_lba_count: %u\n",
965 dev
, DEV_ATTRIB(dev
)->max_unmap_lba_count
);
969 int se_dev_set_max_unmap_block_desc_count(
970 struct se_device
*dev
,
971 u32 max_unmap_block_desc_count
)
973 DEV_ATTRIB(dev
)->max_unmap_block_desc_count
= max_unmap_block_desc_count
;
974 printk(KERN_INFO
"dev[%p]: Set max_unmap_block_desc_count: %u\n",
975 dev
, DEV_ATTRIB(dev
)->max_unmap_block_desc_count
);
979 int se_dev_set_unmap_granularity(
980 struct se_device
*dev
,
981 u32 unmap_granularity
)
983 DEV_ATTRIB(dev
)->unmap_granularity
= unmap_granularity
;
984 printk(KERN_INFO
"dev[%p]: Set unmap_granularity: %u\n",
985 dev
, DEV_ATTRIB(dev
)->unmap_granularity
);
989 int se_dev_set_unmap_granularity_alignment(
990 struct se_device
*dev
,
991 u32 unmap_granularity_alignment
)
993 DEV_ATTRIB(dev
)->unmap_granularity_alignment
= unmap_granularity_alignment
;
994 printk(KERN_INFO
"dev[%p]: Set unmap_granularity_alignment: %u\n",
995 dev
, DEV_ATTRIB(dev
)->unmap_granularity_alignment
);
999 int se_dev_set_emulate_dpo(struct se_device
*dev
, int flag
)
1001 if ((flag
!= 0) && (flag
!= 1)) {
1002 printk(KERN_ERR
"Illegal value %d\n", flag
);
1005 if (TRANSPORT(dev
)->dpo_emulated
== NULL
) {
1006 printk(KERN_ERR
"TRANSPORT(dev)->dpo_emulated is NULL\n");
1009 if (TRANSPORT(dev
)->dpo_emulated(dev
) == 0) {
1010 printk(KERN_ERR
"TRANSPORT(dev)->dpo_emulated not supported\n");
1013 DEV_ATTRIB(dev
)->emulate_dpo
= flag
;
1014 printk(KERN_INFO
"dev[%p]: SE Device Page Out (DPO) Emulation"
1015 " bit: %d\n", dev
, DEV_ATTRIB(dev
)->emulate_dpo
);
1019 int se_dev_set_emulate_fua_write(struct se_device
*dev
, int flag
)
1021 if ((flag
!= 0) && (flag
!= 1)) {
1022 printk(KERN_ERR
"Illegal value %d\n", flag
);
1025 if (TRANSPORT(dev
)->fua_write_emulated
== NULL
) {
1026 printk(KERN_ERR
"TRANSPORT(dev)->fua_write_emulated is NULL\n");
1029 if (TRANSPORT(dev
)->fua_write_emulated(dev
) == 0) {
1030 printk(KERN_ERR
"TRANSPORT(dev)->fua_write_emulated not supported\n");
1033 DEV_ATTRIB(dev
)->emulate_fua_write
= flag
;
1034 printk(KERN_INFO
"dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
1035 dev
, DEV_ATTRIB(dev
)->emulate_fua_write
);
1039 int se_dev_set_emulate_fua_read(struct se_device
*dev
, int flag
)
1041 if ((flag
!= 0) && (flag
!= 1)) {
1042 printk(KERN_ERR
"Illegal value %d\n", flag
);
1045 if (TRANSPORT(dev
)->fua_read_emulated
== NULL
) {
1046 printk(KERN_ERR
"TRANSPORT(dev)->fua_read_emulated is NULL\n");
1049 if (TRANSPORT(dev
)->fua_read_emulated(dev
) == 0) {
1050 printk(KERN_ERR
"TRANSPORT(dev)->fua_read_emulated not supported\n");
1053 DEV_ATTRIB(dev
)->emulate_fua_read
= flag
;
1054 printk(KERN_INFO
"dev[%p]: SE Device Forced Unit Access READs: %d\n",
1055 dev
, DEV_ATTRIB(dev
)->emulate_fua_read
);
1059 int se_dev_set_emulate_write_cache(struct se_device
*dev
, int flag
)
1061 if ((flag
!= 0) && (flag
!= 1)) {
1062 printk(KERN_ERR
"Illegal value %d\n", flag
);
1065 if (TRANSPORT(dev
)->write_cache_emulated
== NULL
) {
1066 printk(KERN_ERR
"TRANSPORT(dev)->write_cache_emulated is NULL\n");
1069 if (TRANSPORT(dev
)->write_cache_emulated(dev
) == 0) {
1070 printk(KERN_ERR
"TRANSPORT(dev)->write_cache_emulated not supported\n");
1073 DEV_ATTRIB(dev
)->emulate_write_cache
= flag
;
1074 printk(KERN_INFO
"dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
1075 dev
, DEV_ATTRIB(dev
)->emulate_write_cache
);
1079 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device
*dev
, int flag
)
1081 if ((flag
!= 0) && (flag
!= 1) && (flag
!= 2)) {
1082 printk(KERN_ERR
"Illegal value %d\n", flag
);
1086 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1087 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1088 " UA_INTRLCK_CTRL while dev_export_obj: %d count"
1090 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1093 DEV_ATTRIB(dev
)->emulate_ua_intlck_ctrl
= flag
;
1094 printk(KERN_INFO
"dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
1095 dev
, DEV_ATTRIB(dev
)->emulate_ua_intlck_ctrl
);
1100 int se_dev_set_emulate_tas(struct se_device
*dev
, int flag
)
1102 if ((flag
!= 0) && (flag
!= 1)) {
1103 printk(KERN_ERR
"Illegal value %d\n", flag
);
1107 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1108 printk(KERN_ERR
"dev[%p]: Unable to change SE Device TAS while"
1109 " dev_export_obj: %d count exists\n", dev
,
1110 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1113 DEV_ATTRIB(dev
)->emulate_tas
= flag
;
1114 printk(KERN_INFO
"dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
1115 dev
, (DEV_ATTRIB(dev
)->emulate_tas
) ? "Enabled" : "Disabled");
1120 int se_dev_set_emulate_tpu(struct se_device
*dev
, int flag
)
1122 if ((flag
!= 0) && (flag
!= 1)) {
1123 printk(KERN_ERR
"Illegal value %d\n", flag
);
1127 * We expect this value to be non-zero when generic Block Layer
1128 * Discard supported is detected iblock_create_virtdevice().
1130 if (!(DEV_ATTRIB(dev
)->max_unmap_block_desc_count
)) {
1131 printk(KERN_ERR
"Generic Block Discard not supported\n");
1135 DEV_ATTRIB(dev
)->emulate_tpu
= flag
;
1136 printk(KERN_INFO
"dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
1141 int se_dev_set_emulate_tpws(struct se_device
*dev
, int flag
)
1143 if ((flag
!= 0) && (flag
!= 1)) {
1144 printk(KERN_ERR
"Illegal value %d\n", flag
);
1148 * We expect this value to be non-zero when generic Block Layer
1149 * Discard supported is detected iblock_create_virtdevice().
1151 if (!(DEV_ATTRIB(dev
)->max_unmap_block_desc_count
)) {
1152 printk(KERN_ERR
"Generic Block Discard not supported\n");
1156 DEV_ATTRIB(dev
)->emulate_tpws
= flag
;
1157 printk(KERN_INFO
"dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
1162 int se_dev_set_enforce_pr_isids(struct se_device
*dev
, int flag
)
1164 if ((flag
!= 0) && (flag
!= 1)) {
1165 printk(KERN_ERR
"Illegal value %d\n", flag
);
1168 DEV_ATTRIB(dev
)->enforce_pr_isids
= flag
;
1169 printk(KERN_INFO
"dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev
,
1170 (DEV_ATTRIB(dev
)->enforce_pr_isids
) ? "Enabled" : "Disabled");
1175 * Note, this can only be called on unexported SE Device Object.
1177 int se_dev_set_queue_depth(struct se_device
*dev
, u32 queue_depth
)
1179 u32 orig_queue_depth
= dev
->queue_depth
;
1181 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1182 printk(KERN_ERR
"dev[%p]: Unable to change SE Device TCQ while"
1183 " dev_export_obj: %d count exists\n", dev
,
1184 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1187 if (!(queue_depth
)) {
1188 printk(KERN_ERR
"dev[%p]: Illegal ZERO value for queue"
1193 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1194 if (queue_depth
> DEV_ATTRIB(dev
)->hw_queue_depth
) {
1195 printk(KERN_ERR
"dev[%p]: Passed queue_depth: %u"
1196 " exceeds TCM/SE_Device TCQ: %u\n",
1198 DEV_ATTRIB(dev
)->hw_queue_depth
);
1202 if (queue_depth
> DEV_ATTRIB(dev
)->queue_depth
) {
1203 if (queue_depth
> DEV_ATTRIB(dev
)->hw_queue_depth
) {
1204 printk(KERN_ERR
"dev[%p]: Passed queue_depth:"
1205 " %u exceeds TCM/SE_Device MAX"
1206 " TCQ: %u\n", dev
, queue_depth
,
1207 DEV_ATTRIB(dev
)->hw_queue_depth
);
1213 DEV_ATTRIB(dev
)->queue_depth
= dev
->queue_depth
= queue_depth
;
1214 if (queue_depth
> orig_queue_depth
)
1215 atomic_add(queue_depth
- orig_queue_depth
, &dev
->depth_left
);
1216 else if (queue_depth
< orig_queue_depth
)
1217 atomic_sub(orig_queue_depth
- queue_depth
, &dev
->depth_left
);
1219 printk(KERN_INFO
"dev[%p]: SE Device TCQ Depth changed to: %u\n",
1224 int se_dev_set_max_sectors(struct se_device
*dev
, u32 max_sectors
)
1226 int force
= 0; /* Force setting for VDEVS */
1228 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1229 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1230 " max_sectors while dev_export_obj: %d count exists\n",
1231 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1234 if (!(max_sectors
)) {
1235 printk(KERN_ERR
"dev[%p]: Illegal ZERO value for"
1236 " max_sectors\n", dev
);
1239 if (max_sectors
< DA_STATUS_MAX_SECTORS_MIN
) {
1240 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u less than"
1241 " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev
, max_sectors
,
1242 DA_STATUS_MAX_SECTORS_MIN
);
1245 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1246 if (max_sectors
> DEV_ATTRIB(dev
)->hw_max_sectors
) {
1247 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1248 " greater than TCM/SE_Device max_sectors:"
1249 " %u\n", dev
, max_sectors
,
1250 DEV_ATTRIB(dev
)->hw_max_sectors
);
1254 if (!(force
) && (max_sectors
>
1255 DEV_ATTRIB(dev
)->hw_max_sectors
)) {
1256 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1257 " greater than TCM/SE_Device max_sectors"
1258 ": %u, use force=1 to override.\n", dev
,
1259 max_sectors
, DEV_ATTRIB(dev
)->hw_max_sectors
);
1262 if (max_sectors
> DA_STATUS_MAX_SECTORS_MAX
) {
1263 printk(KERN_ERR
"dev[%p]: Passed max_sectors: %u"
1264 " greater than DA_STATUS_MAX_SECTORS_MAX:"
1265 " %u\n", dev
, max_sectors
,
1266 DA_STATUS_MAX_SECTORS_MAX
);
1271 DEV_ATTRIB(dev
)->max_sectors
= max_sectors
;
1272 printk("dev[%p]: SE Device max_sectors changed to %u\n",
1277 int se_dev_set_optimal_sectors(struct se_device
*dev
, u32 optimal_sectors
)
1279 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1280 printk(KERN_ERR
"dev[%p]: Unable to change SE Device"
1281 " optimal_sectors while dev_export_obj: %d count exists\n",
1282 dev
, atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1285 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1286 printk(KERN_ERR
"dev[%p]: Passed optimal_sectors cannot be"
1287 " changed for TCM/pSCSI\n", dev
);
1290 if (optimal_sectors
> DEV_ATTRIB(dev
)->max_sectors
) {
1291 printk(KERN_ERR
"dev[%p]: Passed optimal_sectors %u cannot be"
1292 " greater than max_sectors: %u\n", dev
,
1293 optimal_sectors
, DEV_ATTRIB(dev
)->max_sectors
);
1297 DEV_ATTRIB(dev
)->optimal_sectors
= optimal_sectors
;
1298 printk(KERN_INFO
"dev[%p]: SE Device optimal_sectors changed to %u\n",
1299 dev
, optimal_sectors
);
1303 int se_dev_set_block_size(struct se_device
*dev
, u32 block_size
)
1305 if (atomic_read(&dev
->dev_export_obj
.obj_access_count
)) {
1306 printk(KERN_ERR
"dev[%p]: Unable to change SE Device block_size"
1307 " while dev_export_obj: %d count exists\n", dev
,
1308 atomic_read(&dev
->dev_export_obj
.obj_access_count
));
1312 if ((block_size
!= 512) &&
1313 (block_size
!= 1024) &&
1314 (block_size
!= 2048) &&
1315 (block_size
!= 4096)) {
1316 printk(KERN_ERR
"dev[%p]: Illegal value for block_device: %u"
1317 " for SE device, must be 512, 1024, 2048 or 4096\n",
1322 if (TRANSPORT(dev
)->transport_type
== TRANSPORT_PLUGIN_PHBA_PDEV
) {
1323 printk(KERN_ERR
"dev[%p]: Not allowed to change block_size for"
1324 " Physical Device, use for Linux/SCSI to change"
1325 " block_size for underlying hardware\n", dev
);
1329 DEV_ATTRIB(dev
)->block_size
= block_size
;
1330 printk(KERN_INFO
"dev[%p]: SE Device block_size changed to %u\n",
1335 struct se_lun
*core_dev_add_lun(
1336 struct se_portal_group
*tpg
,
1338 struct se_device
*dev
,
1341 struct se_lun
*lun_p
;
1344 if (atomic_read(&dev
->dev_access_obj
.obj_access_count
) != 0) {
1345 printk(KERN_ERR
"Unable to export struct se_device while dev_access_obj: %d\n",
1346 atomic_read(&dev
->dev_access_obj
.obj_access_count
));
1350 lun_p
= core_tpg_pre_addlun(tpg
, lun
);
1351 if ((IS_ERR(lun_p
)) || !(lun_p
))
1354 if (dev
->dev_flags
& DF_READ_ONLY
)
1355 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1357 lun_access
= TRANSPORT_LUNFLAGS_READ_WRITE
;
1359 if (core_tpg_post_addlun(tpg
, lun_p
, lun_access
, dev
) < 0)
1362 printk(KERN_INFO
"%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
1363 " CORE HBA: %u\n", TPG_TFO(tpg
)->get_fabric_name(),
1364 TPG_TFO(tpg
)->tpg_get_tag(tpg
), lun_p
->unpacked_lun
,
1365 TPG_TFO(tpg
)->get_fabric_name(), hba
->hba_id
);
1367 * Update LUN maps for dynamically added initiators when
1368 * generate_node_acl is enabled.
1370 if (TPG_TFO(tpg
)->tpg_check_demo_mode(tpg
)) {
1371 struct se_node_acl
*acl
;
1372 spin_lock_bh(&tpg
->acl_node_lock
);
1373 list_for_each_entry(acl
, &tpg
->acl_node_list
, acl_list
) {
1374 if (acl
->dynamic_node_acl
) {
1375 spin_unlock_bh(&tpg
->acl_node_lock
);
1376 core_tpg_add_node_to_devs(acl
, tpg
);
1377 spin_lock_bh(&tpg
->acl_node_lock
);
1380 spin_unlock_bh(&tpg
->acl_node_lock
);
1386 /* core_dev_del_lun():
1390 int core_dev_del_lun(
1391 struct se_portal_group
*tpg
,
1397 lun
= core_tpg_pre_dellun(tpg
, unpacked_lun
, &ret
);
1401 core_tpg_post_dellun(tpg
, lun
);
1403 printk(KERN_INFO
"%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1404 " device object\n", TPG_TFO(tpg
)->get_fabric_name(),
1405 TPG_TFO(tpg
)->tpg_get_tag(tpg
), unpacked_lun
,
1406 TPG_TFO(tpg
)->get_fabric_name());
1411 struct se_lun
*core_get_lun_from_tpg(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1415 spin_lock(&tpg
->tpg_lun_lock
);
1416 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1417 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
1418 "_PER_TPG-1: %u for Target Portal Group: %hu\n",
1419 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1420 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1421 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1422 spin_unlock(&tpg
->tpg_lun_lock
);
1425 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1427 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_FREE
) {
1428 printk(KERN_ERR
"%s Logical Unit Number: %u is not free on"
1429 " Target Portal Group: %hu, ignoring request.\n",
1430 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1431 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1432 spin_unlock(&tpg
->tpg_lun_lock
);
1435 spin_unlock(&tpg
->tpg_lun_lock
);
1440 /* core_dev_get_lun():
1444 static struct se_lun
*core_dev_get_lun(struct se_portal_group
*tpg
, u32 unpacked_lun
)
1448 spin_lock(&tpg
->tpg_lun_lock
);
1449 if (unpacked_lun
> (TRANSPORT_MAX_LUNS_PER_TPG
-1)) {
1450 printk(KERN_ERR
"%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
1451 "_TPG-1: %u for Target Portal Group: %hu\n",
1452 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1453 TRANSPORT_MAX_LUNS_PER_TPG
-1,
1454 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1455 spin_unlock(&tpg
->tpg_lun_lock
);
1458 lun
= &tpg
->tpg_lun_list
[unpacked_lun
];
1460 if (lun
->lun_status
!= TRANSPORT_LUN_STATUS_ACTIVE
) {
1461 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
1462 " Target Portal Group: %hu, ignoring request.\n",
1463 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1464 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1465 spin_unlock(&tpg
->tpg_lun_lock
);
1468 spin_unlock(&tpg
->tpg_lun_lock
);
1473 struct se_lun_acl
*core_dev_init_initiator_node_lun_acl(
1474 struct se_portal_group
*tpg
,
1476 char *initiatorname
,
1479 struct se_lun_acl
*lacl
;
1480 struct se_node_acl
*nacl
;
1482 if (strlen(initiatorname
) > TRANSPORT_IQN_LEN
) {
1483 printk(KERN_ERR
"%s InitiatorName exceeds maximum size.\n",
1484 TPG_TFO(tpg
)->get_fabric_name());
1488 nacl
= core_tpg_get_initiator_node_acl(tpg
, initiatorname
);
1493 lacl
= kzalloc(sizeof(struct se_lun_acl
), GFP_KERNEL
);
1495 printk(KERN_ERR
"Unable to allocate memory for struct se_lun_acl.\n");
1500 INIT_LIST_HEAD(&lacl
->lacl_list
);
1501 lacl
->mapped_lun
= mapped_lun
;
1502 lacl
->se_lun_nacl
= nacl
;
1503 snprintf(lacl
->initiatorname
, TRANSPORT_IQN_LEN
, "%s", initiatorname
);
1508 int core_dev_add_initiator_node_lun_acl(
1509 struct se_portal_group
*tpg
,
1510 struct se_lun_acl
*lacl
,
1515 struct se_node_acl
*nacl
;
1517 lun
= core_dev_get_lun(tpg
, unpacked_lun
);
1519 printk(KERN_ERR
"%s Logical Unit Number: %u is not active on"
1520 " Target Portal Group: %hu, ignoring request.\n",
1521 TPG_TFO(tpg
)->get_fabric_name(), unpacked_lun
,
1522 TPG_TFO(tpg
)->tpg_get_tag(tpg
));
1526 nacl
= lacl
->se_lun_nacl
;
1530 if ((lun
->lun_access
& TRANSPORT_LUNFLAGS_READ_ONLY
) &&
1531 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
))
1532 lun_access
= TRANSPORT_LUNFLAGS_READ_ONLY
;
1536 if (core_update_device_list_for_node(lun
, lacl
, lacl
->mapped_lun
,
1537 lun_access
, nacl
, tpg
, 1) < 0)
1540 spin_lock(&lun
->lun_acl_lock
);
1541 list_add_tail(&lacl
->lacl_list
, &lun
->lun_acl_list
);
1542 atomic_inc(&lun
->lun_acl_count
);
1543 smp_mb__after_atomic_inc();
1544 spin_unlock(&lun
->lun_acl_lock
);
1546 printk(KERN_INFO
"%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
1547 " InitiatorNode: %s\n", TPG_TFO(tpg
)->get_fabric_name(),
1548 TPG_TFO(tpg
)->tpg_get_tag(tpg
), unpacked_lun
, lacl
->mapped_lun
,
1549 (lun_access
& TRANSPORT_LUNFLAGS_READ_WRITE
) ? "RW" : "RO",
1550 lacl
->initiatorname
);
1552 * Check to see if there are any existing persistent reservation APTPL
1553 * pre-registrations that need to be enabled for this LUN ACL..
1555 core_scsi3_check_aptpl_registration(lun
->lun_se_dev
, tpg
, lun
, lacl
);
1559 /* core_dev_del_initiator_node_lun_acl():
1563 int core_dev_del_initiator_node_lun_acl(
1564 struct se_portal_group
*tpg
,
1566 struct se_lun_acl
*lacl
)
1568 struct se_node_acl
*nacl
;
1570 nacl
= lacl
->se_lun_nacl
;
1574 spin_lock(&lun
->lun_acl_lock
);
1575 list_del(&lacl
->lacl_list
);
1576 atomic_dec(&lun
->lun_acl_count
);
1577 smp_mb__after_atomic_dec();
1578 spin_unlock(&lun
->lun_acl_lock
);
1580 core_update_device_list_for_node(lun
, NULL
, lacl
->mapped_lun
,
1581 TRANSPORT_LUNFLAGS_NO_ACCESS
, nacl
, tpg
, 0);
1583 lacl
->se_lun
= NULL
;
1585 printk(KERN_INFO
"%s_TPG[%hu]_LUN[%u] - Removed ACL for"
1586 " InitiatorNode: %s Mapped LUN: %u\n",
1587 TPG_TFO(tpg
)->get_fabric_name(),
1588 TPG_TFO(tpg
)->tpg_get_tag(tpg
), lun
->unpacked_lun
,
1589 lacl
->initiatorname
, lacl
->mapped_lun
);
1594 void core_dev_free_initiator_node_lun_acl(
1595 struct se_portal_group
*tpg
,
1596 struct se_lun_acl
*lacl
)
1598 printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
1599 " Mapped LUN: %u\n", TPG_TFO(tpg
)->get_fabric_name(),
1600 TPG_TFO(tpg
)->tpg_get_tag(tpg
),
1601 TPG_TFO(tpg
)->get_fabric_name(),
1602 lacl
->initiatorname
, lacl
->mapped_lun
);
1607 int core_dev_setup_virtual_lun0(void)
1610 struct se_device
*dev
;
1611 struct se_subsystem_dev
*se_dev
= NULL
;
1612 struct se_subsystem_api
*t
;
1616 hba
= core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE
);
1618 return PTR_ERR(hba
);
1620 se_global
->g_lun0_hba
= hba
;
1623 se_dev
= kzalloc(sizeof(struct se_subsystem_dev
), GFP_KERNEL
);
1625 printk(KERN_ERR
"Unable to allocate memory for"
1626 " struct se_subsystem_dev\n");
1630 INIT_LIST_HEAD(&se_dev
->g_se_dev_list
);
1631 INIT_LIST_HEAD(&se_dev
->t10_wwn
.t10_vpd_list
);
1632 spin_lock_init(&se_dev
->t10_wwn
.t10_vpd_lock
);
1633 INIT_LIST_HEAD(&se_dev
->t10_reservation
.registration_list
);
1634 INIT_LIST_HEAD(&se_dev
->t10_reservation
.aptpl_reg_list
);
1635 spin_lock_init(&se_dev
->t10_reservation
.registration_lock
);
1636 spin_lock_init(&se_dev
->t10_reservation
.aptpl_reg_lock
);
1637 INIT_LIST_HEAD(&se_dev
->t10_alua
.tg_pt_gps_list
);
1638 spin_lock_init(&se_dev
->t10_alua
.tg_pt_gps_lock
);
1639 spin_lock_init(&se_dev
->se_dev_lock
);
1640 se_dev
->t10_reservation
.pr_aptpl_buf_len
= PR_APTPL_BUF_LEN
;
1641 se_dev
->t10_wwn
.t10_sub_dev
= se_dev
;
1642 se_dev
->t10_alua
.t10_sub_dev
= se_dev
;
1643 se_dev
->se_dev_attrib
.da_sub_dev
= se_dev
;
1644 se_dev
->se_dev_hba
= hba
;
1646 se_dev
->se_dev_su_ptr
= t
->allocate_virtdevice(hba
, "virt_lun0");
1647 if (!(se_dev
->se_dev_su_ptr
)) {
1648 printk(KERN_ERR
"Unable to locate subsystem dependent pointer"
1649 " from allocate_virtdevice()\n");
1653 se_global
->g_lun0_su_dev
= se_dev
;
1656 sprintf(buf
, "rd_pages=8");
1657 t
->set_configfs_dev_params(hba
, se_dev
, buf
, sizeof(buf
));
1659 dev
= t
->create_virtdevice(hba
, se_dev
, se_dev
->se_dev_su_ptr
);
1660 if (!(dev
) || IS_ERR(dev
)) {
1664 se_dev
->se_dev_ptr
= dev
;
1665 se_global
->g_lun0_dev
= dev
;
1669 se_global
->g_lun0_su_dev
= NULL
;
1671 if (se_global
->g_lun0_hba
) {
1672 core_delete_hba(se_global
->g_lun0_hba
);
1673 se_global
->g_lun0_hba
= NULL
;
1679 void core_dev_release_virtual_lun0(void)
1681 struct se_hba
*hba
= se_global
->g_lun0_hba
;
1682 struct se_subsystem_dev
*su_dev
= se_global
->g_lun0_su_dev
;
1687 if (se_global
->g_lun0_dev
)
1688 se_free_virtual_device(se_global
->g_lun0_dev
, hba
);
1691 core_delete_hba(hba
);