Commit | Line | Data |
---|---|---|
c66ac9db NB |
1 | /******************************************************************************* |
2 | * Filename: target_core_device.c (based on iscsi_target_device.c) | |
3 | * | |
4 | * This file contains the iSCSI Virtual Device and Disk Transport | |
5 | * agnostic related functions. | |
6 | * | |
7 | * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc. | |
8 | * Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved. | |
9 | * Copyright (c) 2007-2010 Rising Tide Systems | |
10 | * Copyright (c) 2008-2010 Linux-iSCSI.org | |
11 | * | |
12 | * Nicholas A. Bellinger <nab@kernel.org> | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2 of the License, or | |
17 | * (at your option) any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, write to the Free Software | |
26 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
27 | * | |
28 | ******************************************************************************/ | |
29 | ||
30 | #include <linux/net.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/delay.h> | |
33 | #include <linux/timer.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/spinlock.h> | |
c66ac9db NB |
36 | #include <linux/kthread.h> |
37 | #include <linux/in.h> | |
38 | #include <net/sock.h> | |
39 | #include <net/tcp.h> | |
40 | #include <scsi/scsi.h> | |
1078da16 | 41 | #include <scsi/scsi_device.h> |
c66ac9db NB |
42 | |
43 | #include <target/target_core_base.h> | |
44 | #include <target/target_core_device.h> | |
45 | #include <target/target_core_tpg.h> | |
46 | #include <target/target_core_transport.h> | |
47 | #include <target/target_core_fabric_ops.h> | |
48 | ||
49 | #include "target_core_alua.h" | |
50 | #include "target_core_hba.h" | |
51 | #include "target_core_pr.h" | |
52 | #include "target_core_ua.h" | |
53 | ||
54 | static void se_dev_start(struct se_device *dev); | |
55 | static void se_dev_stop(struct se_device *dev); | |
56 | ||
57 | int transport_get_lun_for_cmd( | |
58 | struct se_cmd *se_cmd, | |
59 | unsigned char *cdb, | |
60 | u32 unpacked_lun) | |
61 | { | |
62 | struct se_dev_entry *deve; | |
63 | struct se_lun *se_lun = NULL; | |
64 | struct se_session *se_sess = SE_SESS(se_cmd); | |
65 | unsigned long flags; | |
66 | int read_only = 0; | |
67 | ||
68 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
69 | deve = se_cmd->se_deve = | |
70 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | |
71 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
72 | if (se_cmd) { | |
73 | deve->total_cmds++; | |
74 | deve->total_bytes += se_cmd->data_length; | |
75 | ||
76 | if (se_cmd->data_direction == DMA_TO_DEVICE) { | |
77 | if (deve->lun_flags & | |
78 | TRANSPORT_LUNFLAGS_READ_ONLY) { | |
79 | read_only = 1; | |
80 | goto out; | |
81 | } | |
82 | deve->write_bytes += se_cmd->data_length; | |
83 | } else if (se_cmd->data_direction == | |
84 | DMA_FROM_DEVICE) { | |
85 | deve->read_bytes += se_cmd->data_length; | |
86 | } | |
87 | } | |
88 | deve->deve_cmds++; | |
89 | ||
90 | se_lun = se_cmd->se_lun = deve->se_lun; | |
91 | se_cmd->pr_res_key = deve->pr_res_key; | |
92 | se_cmd->orig_fe_lun = unpacked_lun; | |
93 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | |
94 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | |
95 | } | |
96 | out: | |
97 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
98 | ||
99 | if (!se_lun) { | |
100 | if (read_only) { | |
101 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
102 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
103 | printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN" | |
104 | " Access for 0x%08x\n", | |
105 | CMD_TFO(se_cmd)->get_fabric_name(), | |
106 | unpacked_lun); | |
107 | return -1; | |
108 | } else { | |
109 | /* | |
110 | * Use the se_portal_group->tpg_virt_lun0 to allow for | |
111 | * REPORT_LUNS, et al to be returned when no active | |
112 | * MappedLUN=0 exists for this Initiator Port. | |
113 | */ | |
114 | if (unpacked_lun != 0) { | |
115 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
116 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
117 | printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | |
118 | " Access for 0x%08x\n", | |
119 | CMD_TFO(se_cmd)->get_fabric_name(), | |
120 | unpacked_lun); | |
121 | return -1; | |
122 | } | |
123 | /* | |
124 | * Force WRITE PROTECT for virtual LUN 0 | |
125 | */ | |
126 | if ((se_cmd->data_direction != DMA_FROM_DEVICE) && | |
127 | (se_cmd->data_direction != DMA_NONE)) { | |
128 | se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED; | |
129 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
130 | return -1; | |
131 | } | |
132 | #if 0 | |
133 | printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n", | |
134 | CMD_TFO(se_cmd)->get_fabric_name()); | |
135 | #endif | |
136 | se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0; | |
137 | se_cmd->orig_fe_lun = 0; | |
138 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | |
139 | se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; | |
140 | } | |
141 | } | |
142 | /* | |
143 | * Determine if the struct se_lun is online. | |
144 | */ | |
145 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | |
146 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | |
147 | se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN; | |
148 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
149 | return -1; | |
150 | } | |
151 | ||
152 | { | |
153 | struct se_device *dev = se_lun->lun_se_dev; | |
1e7de68c | 154 | spin_lock_irq(&dev->stats_lock); |
c66ac9db NB |
155 | dev->num_cmds++; |
156 | if (se_cmd->data_direction == DMA_TO_DEVICE) | |
157 | dev->write_bytes += se_cmd->data_length; | |
158 | else if (se_cmd->data_direction == DMA_FROM_DEVICE) | |
159 | dev->read_bytes += se_cmd->data_length; | |
1e7de68c | 160 | spin_unlock_irq(&dev->stats_lock); |
c66ac9db NB |
161 | } |
162 | ||
163 | /* | |
164 | * Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used | |
165 | * for tracking state of struct se_cmds during LUN shutdown events. | |
166 | */ | |
167 | spin_lock_irqsave(&se_lun->lun_cmd_lock, flags); | |
168 | list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list); | |
169 | atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1); | |
170 | #if 0 | |
171 | printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n", | |
172 | CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun); | |
173 | #endif | |
174 | spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags); | |
175 | ||
176 | return 0; | |
177 | } | |
178 | EXPORT_SYMBOL(transport_get_lun_for_cmd); | |
179 | ||
180 | int transport_get_lun_for_tmr( | |
181 | struct se_cmd *se_cmd, | |
182 | u32 unpacked_lun) | |
183 | { | |
184 | struct se_device *dev = NULL; | |
185 | struct se_dev_entry *deve; | |
186 | struct se_lun *se_lun = NULL; | |
187 | struct se_session *se_sess = SE_SESS(se_cmd); | |
188 | struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; | |
189 | ||
190 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
191 | deve = se_cmd->se_deve = | |
192 | &SE_NODE_ACL(se_sess)->device_list[unpacked_lun]; | |
193 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
194 | se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun; | |
7fd29aa9 | 195 | dev = se_lun->lun_se_dev; |
c66ac9db NB |
196 | se_cmd->pr_res_key = deve->pr_res_key; |
197 | se_cmd->orig_fe_lun = unpacked_lun; | |
198 | se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev; | |
199 | /* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */ | |
200 | } | |
201 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
202 | ||
203 | if (!se_lun) { | |
204 | printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" | |
205 | " Access for 0x%08x\n", | |
206 | CMD_TFO(se_cmd)->get_fabric_name(), | |
207 | unpacked_lun); | |
208 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
209 | return -1; | |
210 | } | |
211 | /* | |
212 | * Determine if the struct se_lun is online. | |
213 | */ | |
214 | /* #warning FIXME: Check for LUN_RESET + UNIT Attention */ | |
215 | if (se_dev_check_online(se_lun->lun_se_dev) != 0) { | |
216 | se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; | |
217 | return -1; | |
218 | } | |
7fd29aa9 | 219 | se_tmr->tmr_dev = dev; |
c66ac9db NB |
220 | |
221 | spin_lock(&dev->se_tmr_lock); | |
222 | list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list); | |
223 | spin_unlock(&dev->se_tmr_lock); | |
224 | ||
225 | return 0; | |
226 | } | |
227 | EXPORT_SYMBOL(transport_get_lun_for_tmr); | |
228 | ||
229 | /* | |
230 | * This function is called from core_scsi3_emulate_pro_register_and_move() | |
231 | * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count | |
232 | * when a matching rtpi is found. | |
233 | */ | |
234 | struct se_dev_entry *core_get_se_deve_from_rtpi( | |
235 | struct se_node_acl *nacl, | |
236 | u16 rtpi) | |
237 | { | |
238 | struct se_dev_entry *deve; | |
239 | struct se_lun *lun; | |
240 | struct se_port *port; | |
241 | struct se_portal_group *tpg = nacl->se_tpg; | |
242 | u32 i; | |
243 | ||
244 | spin_lock_irq(&nacl->device_list_lock); | |
245 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
246 | deve = &nacl->device_list[i]; | |
247 | ||
248 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
249 | continue; | |
250 | ||
251 | lun = deve->se_lun; | |
252 | if (!(lun)) { | |
253 | printk(KERN_ERR "%s device entries device pointer is" | |
254 | " NULL, but Initiator has access.\n", | |
255 | TPG_TFO(tpg)->get_fabric_name()); | |
256 | continue; | |
257 | } | |
258 | port = lun->lun_sep; | |
259 | if (!(port)) { | |
260 | printk(KERN_ERR "%s device entries device pointer is" | |
261 | " NULL, but Initiator has access.\n", | |
262 | TPG_TFO(tpg)->get_fabric_name()); | |
263 | continue; | |
264 | } | |
265 | if (port->sep_rtpi != rtpi) | |
266 | continue; | |
267 | ||
268 | atomic_inc(&deve->pr_ref_count); | |
269 | smp_mb__after_atomic_inc(); | |
270 | spin_unlock_irq(&nacl->device_list_lock); | |
271 | ||
272 | return deve; | |
273 | } | |
274 | spin_unlock_irq(&nacl->device_list_lock); | |
275 | ||
276 | return NULL; | |
277 | } | |
278 | ||
279 | int core_free_device_list_for_node( | |
280 | struct se_node_acl *nacl, | |
281 | struct se_portal_group *tpg) | |
282 | { | |
283 | struct se_dev_entry *deve; | |
284 | struct se_lun *lun; | |
285 | u32 i; | |
286 | ||
287 | if (!nacl->device_list) | |
288 | return 0; | |
289 | ||
290 | spin_lock_irq(&nacl->device_list_lock); | |
291 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
292 | deve = &nacl->device_list[i]; | |
293 | ||
294 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
295 | continue; | |
296 | ||
297 | if (!deve->se_lun) { | |
298 | printk(KERN_ERR "%s device entries device pointer is" | |
299 | " NULL, but Initiator has access.\n", | |
300 | TPG_TFO(tpg)->get_fabric_name()); | |
301 | continue; | |
302 | } | |
303 | lun = deve->se_lun; | |
304 | ||
305 | spin_unlock_irq(&nacl->device_list_lock); | |
306 | core_update_device_list_for_node(lun, NULL, deve->mapped_lun, | |
307 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | |
308 | spin_lock_irq(&nacl->device_list_lock); | |
309 | } | |
310 | spin_unlock_irq(&nacl->device_list_lock); | |
311 | ||
312 | kfree(nacl->device_list); | |
313 | nacl->device_list = NULL; | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) | |
319 | { | |
320 | struct se_dev_entry *deve; | |
321 | ||
322 | spin_lock_irq(&se_nacl->device_list_lock); | |
323 | deve = &se_nacl->device_list[se_cmd->orig_fe_lun]; | |
324 | deve->deve_cmds--; | |
325 | spin_unlock_irq(&se_nacl->device_list_lock); | |
326 | ||
327 | return; | |
328 | } | |
329 | ||
330 | void core_update_device_list_access( | |
331 | u32 mapped_lun, | |
332 | u32 lun_access, | |
333 | struct se_node_acl *nacl) | |
334 | { | |
335 | struct se_dev_entry *deve; | |
336 | ||
337 | spin_lock_irq(&nacl->device_list_lock); | |
338 | deve = &nacl->device_list[mapped_lun]; | |
339 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | |
340 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
341 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
342 | } else { | |
343 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
344 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
345 | } | |
346 | spin_unlock_irq(&nacl->device_list_lock); | |
347 | ||
348 | return; | |
349 | } | |
350 | ||
351 | /* core_update_device_list_for_node(): | |
352 | * | |
353 | * | |
354 | */ | |
355 | int core_update_device_list_for_node( | |
356 | struct se_lun *lun, | |
357 | struct se_lun_acl *lun_acl, | |
358 | u32 mapped_lun, | |
359 | u32 lun_access, | |
360 | struct se_node_acl *nacl, | |
361 | struct se_portal_group *tpg, | |
362 | int enable) | |
363 | { | |
364 | struct se_port *port = lun->lun_sep; | |
365 | struct se_dev_entry *deve = &nacl->device_list[mapped_lun]; | |
366 | int trans = 0; | |
367 | /* | |
368 | * If the MappedLUN entry is being disabled, the entry in | |
369 | * port->sep_alua_list must be removed now before clearing the | |
370 | * struct se_dev_entry pointers below as logic in | |
371 | * core_alua_do_transition_tg_pt() depends on these being present. | |
372 | */ | |
373 | if (!(enable)) { | |
374 | /* | |
375 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | |
25985edc | 376 | * that have not been explicitly concerted to MappedLUNs -> |
29fe609d NB |
377 | * struct se_lun_acl, but we remove deve->alua_port_list from |
378 | * port->sep_alua_list. This also means that active UAs and | |
379 | * NodeACL context specific PR metadata for demo-mode | |
380 | * MappedLUN *deve will be released below.. | |
c66ac9db | 381 | */ |
c66ac9db NB |
382 | spin_lock_bh(&port->sep_alua_lock); |
383 | list_del(&deve->alua_port_list); | |
384 | spin_unlock_bh(&port->sep_alua_lock); | |
385 | } | |
386 | ||
387 | spin_lock_irq(&nacl->device_list_lock); | |
388 | if (enable) { | |
389 | /* | |
390 | * Check if the call is handling demo mode -> explict LUN ACL | |
391 | * transition. This transition must be for the same struct se_lun | |
392 | * + mapped_lun that was setup in demo mode.. | |
393 | */ | |
394 | if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) { | |
395 | if (deve->se_lun_acl != NULL) { | |
396 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | |
397 | " already set for demo mode -> explict" | |
398 | " LUN ACL transition\n"); | |
85dc98d9 | 399 | spin_unlock_irq(&nacl->device_list_lock); |
c66ac9db NB |
400 | return -1; |
401 | } | |
402 | if (deve->se_lun != lun) { | |
403 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | |
404 | " match passed struct se_lun for demo mode" | |
405 | " -> explict LUN ACL transition\n"); | |
85dc98d9 | 406 | spin_unlock_irq(&nacl->device_list_lock); |
c66ac9db NB |
407 | return -1; |
408 | } | |
409 | deve->se_lun_acl = lun_acl; | |
410 | trans = 1; | |
411 | } else { | |
412 | deve->se_lun = lun; | |
413 | deve->se_lun_acl = lun_acl; | |
414 | deve->mapped_lun = mapped_lun; | |
415 | deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS; | |
416 | } | |
417 | ||
418 | if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) { | |
419 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY; | |
420 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE; | |
421 | } else { | |
422 | deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE; | |
423 | deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY; | |
424 | } | |
425 | ||
426 | if (trans) { | |
427 | spin_unlock_irq(&nacl->device_list_lock); | |
428 | return 0; | |
429 | } | |
430 | deve->creation_time = get_jiffies_64(); | |
431 | deve->attach_count++; | |
432 | spin_unlock_irq(&nacl->device_list_lock); | |
433 | ||
434 | spin_lock_bh(&port->sep_alua_lock); | |
435 | list_add_tail(&deve->alua_port_list, &port->sep_alua_list); | |
436 | spin_unlock_bh(&port->sep_alua_lock); | |
437 | ||
438 | return 0; | |
439 | } | |
440 | /* | |
441 | * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE | |
442 | * PR operation to complete. | |
443 | */ | |
444 | spin_unlock_irq(&nacl->device_list_lock); | |
445 | while (atomic_read(&deve->pr_ref_count) != 0) | |
446 | cpu_relax(); | |
447 | spin_lock_irq(&nacl->device_list_lock); | |
448 | /* | |
449 | * Disable struct se_dev_entry LUN ACL mapping | |
450 | */ | |
451 | core_scsi3_ua_release_all(deve); | |
452 | deve->se_lun = NULL; | |
453 | deve->se_lun_acl = NULL; | |
454 | deve->lun_flags = 0; | |
455 | deve->creation_time = 0; | |
456 | deve->attach_count--; | |
457 | spin_unlock_irq(&nacl->device_list_lock); | |
458 | ||
459 | core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl); | |
460 | return 0; | |
461 | } | |
462 | ||
463 | /* core_clear_lun_from_tpg(): | |
464 | * | |
465 | * | |
466 | */ | |
467 | void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg) | |
468 | { | |
469 | struct se_node_acl *nacl; | |
470 | struct se_dev_entry *deve; | |
471 | u32 i; | |
472 | ||
473 | spin_lock_bh(&tpg->acl_node_lock); | |
474 | list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { | |
475 | spin_unlock_bh(&tpg->acl_node_lock); | |
476 | ||
477 | spin_lock_irq(&nacl->device_list_lock); | |
478 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
479 | deve = &nacl->device_list[i]; | |
480 | if (lun != deve->se_lun) | |
481 | continue; | |
482 | spin_unlock_irq(&nacl->device_list_lock); | |
483 | ||
484 | core_update_device_list_for_node(lun, NULL, | |
485 | deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, | |
486 | nacl, tpg, 0); | |
487 | ||
488 | spin_lock_irq(&nacl->device_list_lock); | |
489 | } | |
490 | spin_unlock_irq(&nacl->device_list_lock); | |
491 | ||
492 | spin_lock_bh(&tpg->acl_node_lock); | |
493 | } | |
494 | spin_unlock_bh(&tpg->acl_node_lock); | |
495 | ||
496 | return; | |
497 | } | |
498 | ||
499 | static struct se_port *core_alloc_port(struct se_device *dev) | |
500 | { | |
501 | struct se_port *port, *port_tmp; | |
502 | ||
503 | port = kzalloc(sizeof(struct se_port), GFP_KERNEL); | |
504 | if (!(port)) { | |
505 | printk(KERN_ERR "Unable to allocate struct se_port\n"); | |
506 | return NULL; | |
507 | } | |
508 | INIT_LIST_HEAD(&port->sep_alua_list); | |
509 | INIT_LIST_HEAD(&port->sep_list); | |
510 | atomic_set(&port->sep_tg_pt_secondary_offline, 0); | |
511 | spin_lock_init(&port->sep_alua_lock); | |
512 | mutex_init(&port->sep_tg_pt_md_mutex); | |
513 | ||
514 | spin_lock(&dev->se_port_lock); | |
515 | if (dev->dev_port_count == 0x0000ffff) { | |
516 | printk(KERN_WARNING "Reached dev->dev_port_count ==" | |
517 | " 0x0000ffff\n"); | |
518 | spin_unlock(&dev->se_port_lock); | |
519 | return NULL; | |
520 | } | |
521 | again: | |
522 | /* | |
523 | * Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device | |
524 | * Here is the table from spc4r17 section 7.7.3.8. | |
525 | * | |
526 | * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field | |
527 | * | |
528 | * Code Description | |
529 | * 0h Reserved | |
530 | * 1h Relative port 1, historically known as port A | |
531 | * 2h Relative port 2, historically known as port B | |
532 | * 3h to FFFFh Relative port 3 through 65 535 | |
533 | */ | |
534 | port->sep_rtpi = dev->dev_rpti_counter++; | |
535 | if (!(port->sep_rtpi)) | |
536 | goto again; | |
537 | ||
538 | list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) { | |
539 | /* | |
540 | * Make sure RELATIVE TARGET PORT IDENTIFER is unique | |
541 | * for 16-bit wrap.. | |
542 | */ | |
543 | if (port->sep_rtpi == port_tmp->sep_rtpi) | |
544 | goto again; | |
545 | } | |
546 | spin_unlock(&dev->se_port_lock); | |
547 | ||
548 | return port; | |
549 | } | |
550 | ||
551 | static void core_export_port( | |
552 | struct se_device *dev, | |
553 | struct se_portal_group *tpg, | |
554 | struct se_port *port, | |
555 | struct se_lun *lun) | |
556 | { | |
557 | struct se_subsystem_dev *su_dev = SU_DEV(dev); | |
558 | struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL; | |
559 | ||
560 | spin_lock(&dev->se_port_lock); | |
561 | spin_lock(&lun->lun_sep_lock); | |
562 | port->sep_tpg = tpg; | |
563 | port->sep_lun = lun; | |
564 | lun->lun_sep = port; | |
565 | spin_unlock(&lun->lun_sep_lock); | |
566 | ||
567 | list_add_tail(&port->sep_list, &dev->dev_sep_list); | |
568 | spin_unlock(&dev->se_port_lock); | |
569 | ||
570 | if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) { | |
571 | tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port); | |
572 | if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) { | |
573 | printk(KERN_ERR "Unable to allocate t10_alua_tg_pt" | |
574 | "_gp_member_t\n"); | |
575 | return; | |
576 | } | |
577 | spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | |
578 | __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, | |
579 | T10_ALUA(su_dev)->default_tg_pt_gp); | |
580 | spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); | |
581 | printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port" | |
582 | " Group: alua/default_tg_pt_gp\n", | |
583 | TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name()); | |
584 | } | |
585 | ||
586 | dev->dev_port_count++; | |
587 | port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */ | |
588 | } | |
589 | ||
590 | /* | |
591 | * Called with struct se_device->se_port_lock spinlock held. | |
592 | */ | |
593 | static void core_release_port(struct se_device *dev, struct se_port *port) | |
5dd7ed2e | 594 | __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock) |
c66ac9db NB |
595 | { |
596 | /* | |
597 | * Wait for any port reference for PR ALL_TG_PT=1 operation | |
598 | * to complete in __core_scsi3_alloc_registration() | |
599 | */ | |
600 | spin_unlock(&dev->se_port_lock); | |
601 | if (atomic_read(&port->sep_tg_pt_ref_cnt)) | |
602 | cpu_relax(); | |
603 | spin_lock(&dev->se_port_lock); | |
604 | ||
605 | core_alua_free_tg_pt_gp_mem(port); | |
606 | ||
607 | list_del(&port->sep_list); | |
608 | dev->dev_port_count--; | |
609 | kfree(port); | |
610 | ||
611 | return; | |
612 | } | |
613 | ||
614 | int core_dev_export( | |
615 | struct se_device *dev, | |
616 | struct se_portal_group *tpg, | |
617 | struct se_lun *lun) | |
618 | { | |
619 | struct se_port *port; | |
620 | ||
621 | port = core_alloc_port(dev); | |
622 | if (!(port)) | |
623 | return -1; | |
624 | ||
625 | lun->lun_se_dev = dev; | |
626 | se_dev_start(dev); | |
627 | ||
628 | atomic_inc(&dev->dev_export_obj.obj_access_count); | |
629 | core_export_port(dev, tpg, port, lun); | |
630 | return 0; | |
631 | } | |
632 | ||
633 | void core_dev_unexport( | |
634 | struct se_device *dev, | |
635 | struct se_portal_group *tpg, | |
636 | struct se_lun *lun) | |
637 | { | |
638 | struct se_port *port = lun->lun_sep; | |
639 | ||
640 | spin_lock(&lun->lun_sep_lock); | |
641 | if (lun->lun_se_dev == NULL) { | |
642 | spin_unlock(&lun->lun_sep_lock); | |
643 | return; | |
644 | } | |
645 | spin_unlock(&lun->lun_sep_lock); | |
646 | ||
647 | spin_lock(&dev->se_port_lock); | |
648 | atomic_dec(&dev->dev_export_obj.obj_access_count); | |
649 | core_release_port(dev, port); | |
650 | spin_unlock(&dev->se_port_lock); | |
651 | ||
652 | se_dev_stop(dev); | |
653 | lun->lun_se_dev = NULL; | |
654 | } | |
655 | ||
656 | int transport_core_report_lun_response(struct se_cmd *se_cmd) | |
657 | { | |
658 | struct se_dev_entry *deve; | |
659 | struct se_lun *se_lun; | |
660 | struct se_session *se_sess = SE_SESS(se_cmd); | |
661 | struct se_task *se_task; | |
662 | unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf; | |
1078da16 | 663 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; |
c66ac9db NB |
664 | |
665 | list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list) | |
666 | break; | |
667 | ||
668 | if (!(se_task)) { | |
669 | printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n"); | |
670 | return PYX_TRANSPORT_LU_COMM_FAILURE; | |
671 | } | |
672 | ||
673 | /* | |
674 | * If no struct se_session pointer is present, this struct se_cmd is | |
675 | * coming via a target_core_mod PASSTHROUGH op, and not through | |
676 | * a $FABRIC_MOD. In that case, report LUN=0 only. | |
677 | */ | |
678 | if (!(se_sess)) { | |
1078da16 | 679 | int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); |
c66ac9db NB |
680 | lun_count = 1; |
681 | goto done; | |
682 | } | |
683 | ||
684 | spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
685 | for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { | |
686 | deve = &SE_NODE_ACL(se_sess)->device_list[i]; | |
687 | if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) | |
688 | continue; | |
689 | se_lun = deve->se_lun; | |
690 | /* | |
691 | * We determine the correct LUN LIST LENGTH even once we | |
692 | * have reached the initial allocation length. | |
693 | * See SPC2-R20 7.19. | |
694 | */ | |
695 | lun_count++; | |
696 | if ((cdb_offset + 8) >= se_cmd->data_length) | |
697 | continue; | |
698 | ||
1078da16 NB |
699 | int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); |
700 | offset += 8; | |
c66ac9db NB |
701 | cdb_offset += 8; |
702 | } | |
703 | spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock); | |
704 | ||
705 | /* | |
706 | * See SPC3 r07, page 159. | |
707 | */ | |
708 | done: | |
709 | lun_count *= 8; | |
710 | buf[0] = ((lun_count >> 24) & 0xff); | |
711 | buf[1] = ((lun_count >> 16) & 0xff); | |
712 | buf[2] = ((lun_count >> 8) & 0xff); | |
713 | buf[3] = (lun_count & 0xff); | |
714 | ||
715 | return PYX_TRANSPORT_SENT_TO_TRANSPORT; | |
716 | } | |
717 | ||
718 | /* se_release_device_for_hba(): | |
719 | * | |
720 | * | |
721 | */ | |
722 | void se_release_device_for_hba(struct se_device *dev) | |
723 | { | |
724 | struct se_hba *hba = dev->se_hba; | |
725 | ||
726 | if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | |
727 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) || | |
728 | (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) || | |
729 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) || | |
730 | (dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED)) | |
731 | se_dev_stop(dev); | |
732 | ||
733 | if (dev->dev_ptr) { | |
734 | kthread_stop(dev->process_thread); | |
735 | if (dev->transport->free_device) | |
736 | dev->transport->free_device(dev->dev_ptr); | |
737 | } | |
738 | ||
739 | spin_lock(&hba->device_lock); | |
740 | list_del(&dev->dev_list); | |
741 | hba->dev_count--; | |
742 | spin_unlock(&hba->device_lock); | |
743 | ||
744 | core_scsi3_free_all_registrations(dev); | |
745 | se_release_vpd_for_dev(dev); | |
746 | ||
747 | kfree(dev->dev_status_queue_obj); | |
748 | kfree(dev->dev_queue_obj); | |
749 | kfree(dev); | |
750 | ||
751 | return; | |
752 | } | |
753 | ||
754 | void se_release_vpd_for_dev(struct se_device *dev) | |
755 | { | |
756 | struct t10_vpd *vpd, *vpd_tmp; | |
757 | ||
758 | spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock); | |
759 | list_for_each_entry_safe(vpd, vpd_tmp, | |
760 | &DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) { | |
761 | list_del(&vpd->vpd_list); | |
762 | kfree(vpd); | |
763 | } | |
764 | spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock); | |
765 | ||
766 | return; | |
767 | } | |
768 | ||
c66ac9db NB |
769 | /* se_free_virtual_device(): |
770 | * | |
771 | * Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers. | |
772 | */ | |
773 | int se_free_virtual_device(struct se_device *dev, struct se_hba *hba) | |
774 | { | |
05aea6e7 FC |
775 | if (!list_empty(&dev->dev_sep_list)) |
776 | dump_stack(); | |
c66ac9db NB |
777 | |
778 | core_alua_free_lu_gp_mem(dev); | |
779 | se_release_device_for_hba(dev); | |
780 | ||
781 | return 0; | |
782 | } | |
783 | ||
784 | static void se_dev_start(struct se_device *dev) | |
785 | { | |
786 | struct se_hba *hba = dev->se_hba; | |
787 | ||
788 | spin_lock(&hba->device_lock); | |
789 | atomic_inc(&dev->dev_obj.obj_access_count); | |
790 | if (atomic_read(&dev->dev_obj.obj_access_count) == 1) { | |
791 | if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) { | |
792 | dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED; | |
793 | dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED; | |
794 | } else if (dev->dev_status & | |
795 | TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) { | |
796 | dev->dev_status &= | |
797 | ~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | |
798 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | |
799 | } | |
800 | } | |
801 | spin_unlock(&hba->device_lock); | |
802 | } | |
803 | ||
804 | static void se_dev_stop(struct se_device *dev) | |
805 | { | |
806 | struct se_hba *hba = dev->se_hba; | |
807 | ||
808 | spin_lock(&hba->device_lock); | |
809 | atomic_dec(&dev->dev_obj.obj_access_count); | |
810 | if (atomic_read(&dev->dev_obj.obj_access_count) == 0) { | |
811 | if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) { | |
812 | dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED; | |
813 | dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED; | |
814 | } else if (dev->dev_status & | |
815 | TRANSPORT_DEVICE_OFFLINE_ACTIVATED) { | |
816 | dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED; | |
817 | dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED; | |
818 | } | |
819 | } | |
820 | spin_unlock(&hba->device_lock); | |
c66ac9db NB |
821 | } |
822 | ||
823 | int se_dev_check_online(struct se_device *dev) | |
824 | { | |
825 | int ret; | |
826 | ||
827 | spin_lock_irq(&dev->dev_status_lock); | |
828 | ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) || | |
829 | (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1; | |
830 | spin_unlock_irq(&dev->dev_status_lock); | |
831 | ||
832 | return ret; | |
833 | } | |
834 | ||
835 | int se_dev_check_shutdown(struct se_device *dev) | |
836 | { | |
837 | int ret; | |
838 | ||
839 | spin_lock_irq(&dev->dev_status_lock); | |
840 | ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN); | |
841 | spin_unlock_irq(&dev->dev_status_lock); | |
842 | ||
843 | return ret; | |
844 | } | |
845 | ||
846 | void se_dev_set_default_attribs( | |
847 | struct se_device *dev, | |
848 | struct se_dev_limits *dev_limits) | |
849 | { | |
850 | struct queue_limits *limits = &dev_limits->limits; | |
851 | ||
852 | DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO; | |
853 | DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE; | |
854 | DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ; | |
855 | DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE; | |
856 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL; | |
857 | DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS; | |
858 | DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU; | |
859 | DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS; | |
860 | DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS; | |
861 | DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA; | |
862 | DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS; | |
863 | /* | |
864 | * The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK | |
865 | * iblock_create_virtdevice() from struct queue_limits values | |
866 | * if blk_queue_discard()==1 | |
867 | */ | |
868 | DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; | |
869 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = | |
870 | DA_MAX_UNMAP_BLOCK_DESC_COUNT; | |
871 | DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT; | |
872 | DEV_ATTRIB(dev)->unmap_granularity_alignment = | |
873 | DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; | |
874 | /* | |
875 | * block_size is based on subsystem plugin dependent requirements. | |
876 | */ | |
877 | DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size; | |
878 | DEV_ATTRIB(dev)->block_size = limits->logical_block_size; | |
879 | /* | |
880 | * max_sectors is based on subsystem plugin dependent requirements. | |
881 | */ | |
882 | DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors; | |
883 | DEV_ATTRIB(dev)->max_sectors = limits->max_sectors; | |
884 | /* | |
885 | * Set optimal_sectors from max_sectors, which can be lowered via | |
886 | * configfs. | |
887 | */ | |
888 | DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors; | |
889 | /* | |
890 | * queue_depth is based on subsystem plugin dependent requirements. | |
891 | */ | |
892 | DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth; | |
893 | DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth; | |
894 | } | |
895 | ||
896 | int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) | |
897 | { | |
898 | if (task_timeout > DA_TASK_TIMEOUT_MAX) { | |
899 | printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then" | |
900 | " DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); | |
901 | return -1; | |
902 | } else { | |
903 | DEV_ATTRIB(dev)->task_timeout = task_timeout; | |
904 | printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n", | |
905 | dev, task_timeout); | |
906 | } | |
907 | ||
908 | return 0; | |
909 | } | |
910 | ||
911 | int se_dev_set_max_unmap_lba_count( | |
912 | struct se_device *dev, | |
913 | u32 max_unmap_lba_count) | |
914 | { | |
915 | DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count; | |
916 | printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n", | |
917 | dev, DEV_ATTRIB(dev)->max_unmap_lba_count); | |
918 | return 0; | |
919 | } | |
920 | ||
921 | int se_dev_set_max_unmap_block_desc_count( | |
922 | struct se_device *dev, | |
923 | u32 max_unmap_block_desc_count) | |
924 | { | |
925 | DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count; | |
926 | printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n", | |
927 | dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count); | |
928 | return 0; | |
929 | } | |
930 | ||
931 | int se_dev_set_unmap_granularity( | |
932 | struct se_device *dev, | |
933 | u32 unmap_granularity) | |
934 | { | |
935 | DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity; | |
936 | printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n", | |
937 | dev, DEV_ATTRIB(dev)->unmap_granularity); | |
938 | return 0; | |
939 | } | |
940 | ||
941 | int se_dev_set_unmap_granularity_alignment( | |
942 | struct se_device *dev, | |
943 | u32 unmap_granularity_alignment) | |
944 | { | |
945 | DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment; | |
946 | printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n", | |
947 | dev, DEV_ATTRIB(dev)->unmap_granularity_alignment); | |
948 | return 0; | |
949 | } | |
950 | ||
951 | int se_dev_set_emulate_dpo(struct se_device *dev, int flag) | |
952 | { | |
953 | if ((flag != 0) && (flag != 1)) { | |
954 | printk(KERN_ERR "Illegal value %d\n", flag); | |
955 | return -1; | |
956 | } | |
957 | if (TRANSPORT(dev)->dpo_emulated == NULL) { | |
958 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n"); | |
959 | return -1; | |
960 | } | |
961 | if (TRANSPORT(dev)->dpo_emulated(dev) == 0) { | |
962 | printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n"); | |
963 | return -1; | |
964 | } | |
965 | DEV_ATTRIB(dev)->emulate_dpo = flag; | |
966 | printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation" | |
967 | " bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo); | |
968 | return 0; | |
969 | } | |
970 | ||
971 | int se_dev_set_emulate_fua_write(struct se_device *dev, int flag) | |
972 | { | |
973 | if ((flag != 0) && (flag != 1)) { | |
974 | printk(KERN_ERR "Illegal value %d\n", flag); | |
975 | return -1; | |
976 | } | |
977 | if (TRANSPORT(dev)->fua_write_emulated == NULL) { | |
978 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n"); | |
979 | return -1; | |
980 | } | |
981 | if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) { | |
982 | printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n"); | |
983 | return -1; | |
984 | } | |
985 | DEV_ATTRIB(dev)->emulate_fua_write = flag; | |
986 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n", | |
987 | dev, DEV_ATTRIB(dev)->emulate_fua_write); | |
988 | return 0; | |
989 | } | |
990 | ||
991 | int se_dev_set_emulate_fua_read(struct se_device *dev, int flag) | |
992 | { | |
993 | if ((flag != 0) && (flag != 1)) { | |
994 | printk(KERN_ERR "Illegal value %d\n", flag); | |
995 | return -1; | |
996 | } | |
997 | if (TRANSPORT(dev)->fua_read_emulated == NULL) { | |
998 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n"); | |
999 | return -1; | |
1000 | } | |
1001 | if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) { | |
1002 | printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n"); | |
1003 | return -1; | |
1004 | } | |
1005 | DEV_ATTRIB(dev)->emulate_fua_read = flag; | |
1006 | printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n", | |
1007 | dev, DEV_ATTRIB(dev)->emulate_fua_read); | |
1008 | return 0; | |
1009 | } | |
1010 | ||
1011 | int se_dev_set_emulate_write_cache(struct se_device *dev, int flag) | |
1012 | { | |
1013 | if ((flag != 0) && (flag != 1)) { | |
1014 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1015 | return -1; | |
1016 | } | |
1017 | if (TRANSPORT(dev)->write_cache_emulated == NULL) { | |
1018 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n"); | |
1019 | return -1; | |
1020 | } | |
1021 | if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) { | |
1022 | printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n"); | |
1023 | return -1; | |
1024 | } | |
1025 | DEV_ATTRIB(dev)->emulate_write_cache = flag; | |
1026 | printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", | |
1027 | dev, DEV_ATTRIB(dev)->emulate_write_cache); | |
1028 | return 0; | |
1029 | } | |
1030 | ||
1031 | int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag) | |
1032 | { | |
1033 | if ((flag != 0) && (flag != 1) && (flag != 2)) { | |
1034 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1035 | return -1; | |
1036 | } | |
1037 | ||
1038 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1039 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | |
1040 | " UA_INTRLCK_CTRL while dev_export_obj: %d count" | |
1041 | " exists\n", dev, | |
1042 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1043 | return -1; | |
1044 | } | |
1045 | DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag; | |
1046 | printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", | |
1047 | dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl); | |
1048 | ||
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | int se_dev_set_emulate_tas(struct se_device *dev, int flag) | |
1053 | { | |
1054 | if ((flag != 0) && (flag != 1)) { | |
1055 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1056 | return -1; | |
1057 | } | |
1058 | ||
1059 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1060 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while" | |
1061 | " dev_export_obj: %d count exists\n", dev, | |
1062 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1063 | return -1; | |
1064 | } | |
1065 | DEV_ATTRIB(dev)->emulate_tas = flag; | |
1066 | printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", | |
1067 | dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled"); | |
1068 | ||
1069 | return 0; | |
1070 | } | |
1071 | ||
1072 | int se_dev_set_emulate_tpu(struct se_device *dev, int flag) | |
1073 | { | |
1074 | if ((flag != 0) && (flag != 1)) { | |
1075 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1076 | return -1; | |
1077 | } | |
1078 | /* | |
1079 | * We expect this value to be non-zero when generic Block Layer | |
1080 | * Discard supported is detected iblock_create_virtdevice(). | |
1081 | */ | |
1082 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | |
1083 | printk(KERN_ERR "Generic Block Discard not supported\n"); | |
1084 | return -ENOSYS; | |
1085 | } | |
1086 | ||
1087 | DEV_ATTRIB(dev)->emulate_tpu = flag; | |
1088 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", | |
1089 | dev, flag); | |
1090 | return 0; | |
1091 | } | |
1092 | ||
1093 | int se_dev_set_emulate_tpws(struct se_device *dev, int flag) | |
1094 | { | |
1095 | if ((flag != 0) && (flag != 1)) { | |
1096 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1097 | return -1; | |
1098 | } | |
1099 | /* | |
1100 | * We expect this value to be non-zero when generic Block Layer | |
1101 | * Discard supported is detected iblock_create_virtdevice(). | |
1102 | */ | |
1103 | if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) { | |
1104 | printk(KERN_ERR "Generic Block Discard not supported\n"); | |
1105 | return -ENOSYS; | |
1106 | } | |
1107 | ||
1108 | DEV_ATTRIB(dev)->emulate_tpws = flag; | |
1109 | printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", | |
1110 | dev, flag); | |
1111 | return 0; | |
1112 | } | |
1113 | ||
1114 | int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag) | |
1115 | { | |
1116 | if ((flag != 0) && (flag != 1)) { | |
1117 | printk(KERN_ERR "Illegal value %d\n", flag); | |
1118 | return -1; | |
1119 | } | |
1120 | DEV_ATTRIB(dev)->enforce_pr_isids = flag; | |
1121 | printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev, | |
1122 | (DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled"); | |
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | /* | |
1127 | * Note, this can only be called on unexported SE Device Object. | |
1128 | */ | |
1129 | int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) | |
1130 | { | |
1131 | u32 orig_queue_depth = dev->queue_depth; | |
1132 | ||
1133 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1134 | printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while" | |
1135 | " dev_export_obj: %d count exists\n", dev, | |
1136 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1137 | return -1; | |
1138 | } | |
1139 | if (!(queue_depth)) { | |
1140 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue" | |
1141 | "_depth\n", dev); | |
1142 | return -1; | |
1143 | } | |
1144 | ||
1145 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1146 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | |
1147 | printk(KERN_ERR "dev[%p]: Passed queue_depth: %u" | |
1148 | " exceeds TCM/SE_Device TCQ: %u\n", | |
1149 | dev, queue_depth, | |
1150 | DEV_ATTRIB(dev)->hw_queue_depth); | |
1151 | return -1; | |
1152 | } | |
1153 | } else { | |
1154 | if (queue_depth > DEV_ATTRIB(dev)->queue_depth) { | |
1155 | if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) { | |
1156 | printk(KERN_ERR "dev[%p]: Passed queue_depth:" | |
1157 | " %u exceeds TCM/SE_Device MAX" | |
1158 | " TCQ: %u\n", dev, queue_depth, | |
1159 | DEV_ATTRIB(dev)->hw_queue_depth); | |
1160 | return -1; | |
1161 | } | |
1162 | } | |
1163 | } | |
1164 | ||
1165 | DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth; | |
1166 | if (queue_depth > orig_queue_depth) | |
1167 | atomic_add(queue_depth - orig_queue_depth, &dev->depth_left); | |
1168 | else if (queue_depth < orig_queue_depth) | |
1169 | atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left); | |
1170 | ||
1171 | printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n", | |
1172 | dev, queue_depth); | |
1173 | return 0; | |
1174 | } | |
1175 | ||
1176 | int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors) | |
1177 | { | |
1178 | int force = 0; /* Force setting for VDEVS */ | |
1179 | ||
1180 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1181 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | |
1182 | " max_sectors while dev_export_obj: %d count exists\n", | |
1183 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1184 | return -1; | |
1185 | } | |
1186 | if (!(max_sectors)) { | |
1187 | printk(KERN_ERR "dev[%p]: Illegal ZERO value for" | |
1188 | " max_sectors\n", dev); | |
1189 | return -1; | |
1190 | } | |
1191 | if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) { | |
1192 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than" | |
1193 | " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors, | |
1194 | DA_STATUS_MAX_SECTORS_MIN); | |
1195 | return -1; | |
1196 | } | |
1197 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1198 | if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) { | |
1199 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | |
1200 | " greater than TCM/SE_Device max_sectors:" | |
1201 | " %u\n", dev, max_sectors, | |
1202 | DEV_ATTRIB(dev)->hw_max_sectors); | |
1203 | return -1; | |
1204 | } | |
1205 | } else { | |
1206 | if (!(force) && (max_sectors > | |
1207 | DEV_ATTRIB(dev)->hw_max_sectors)) { | |
1208 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | |
1209 | " greater than TCM/SE_Device max_sectors" | |
1210 | ": %u, use force=1 to override.\n", dev, | |
1211 | max_sectors, DEV_ATTRIB(dev)->hw_max_sectors); | |
1212 | return -1; | |
1213 | } | |
1214 | if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) { | |
1215 | printk(KERN_ERR "dev[%p]: Passed max_sectors: %u" | |
1216 | " greater than DA_STATUS_MAX_SECTORS_MAX:" | |
1217 | " %u\n", dev, max_sectors, | |
1218 | DA_STATUS_MAX_SECTORS_MAX); | |
1219 | return -1; | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | DEV_ATTRIB(dev)->max_sectors = max_sectors; | |
1224 | printk("dev[%p]: SE Device max_sectors changed to %u\n", | |
1225 | dev, max_sectors); | |
1226 | return 0; | |
1227 | } | |
1228 | ||
1229 | int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) | |
1230 | { | |
1231 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1232 | printk(KERN_ERR "dev[%p]: Unable to change SE Device" | |
1233 | " optimal_sectors while dev_export_obj: %d count exists\n", | |
1234 | dev, atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1235 | return -EINVAL; | |
1236 | } | |
1237 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1238 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be" | |
1239 | " changed for TCM/pSCSI\n", dev); | |
1240 | return -EINVAL; | |
1241 | } | |
1242 | if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) { | |
1243 | printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be" | |
1244 | " greater than max_sectors: %u\n", dev, | |
1245 | optimal_sectors, DEV_ATTRIB(dev)->max_sectors); | |
1246 | return -EINVAL; | |
1247 | } | |
1248 | ||
1249 | DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors; | |
1250 | printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n", | |
1251 | dev, optimal_sectors); | |
1252 | return 0; | |
1253 | } | |
1254 | ||
1255 | int se_dev_set_block_size(struct se_device *dev, u32 block_size) | |
1256 | { | |
1257 | if (atomic_read(&dev->dev_export_obj.obj_access_count)) { | |
1258 | printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size" | |
1259 | " while dev_export_obj: %d count exists\n", dev, | |
1260 | atomic_read(&dev->dev_export_obj.obj_access_count)); | |
1261 | return -1; | |
1262 | } | |
1263 | ||
1264 | if ((block_size != 512) && | |
1265 | (block_size != 1024) && | |
1266 | (block_size != 2048) && | |
1267 | (block_size != 4096)) { | |
1268 | printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u" | |
1269 | " for SE device, must be 512, 1024, 2048 or 4096\n", | |
1270 | dev, block_size); | |
1271 | return -1; | |
1272 | } | |
1273 | ||
1274 | if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) { | |
1275 | printk(KERN_ERR "dev[%p]: Not allowed to change block_size for" | |
1276 | " Physical Device, use for Linux/SCSI to change" | |
1277 | " block_size for underlying hardware\n", dev); | |
1278 | return -1; | |
1279 | } | |
1280 | ||
1281 | DEV_ATTRIB(dev)->block_size = block_size; | |
1282 | printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n", | |
1283 | dev, block_size); | |
1284 | return 0; | |
1285 | } | |
1286 | ||
1287 | struct se_lun *core_dev_add_lun( | |
1288 | struct se_portal_group *tpg, | |
1289 | struct se_hba *hba, | |
1290 | struct se_device *dev, | |
1291 | u32 lun) | |
1292 | { | |
1293 | struct se_lun *lun_p; | |
1294 | u32 lun_access = 0; | |
1295 | ||
1296 | if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) { | |
1297 | printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n", | |
1298 | atomic_read(&dev->dev_access_obj.obj_access_count)); | |
1299 | return NULL; | |
1300 | } | |
1301 | ||
1302 | lun_p = core_tpg_pre_addlun(tpg, lun); | |
1303 | if ((IS_ERR(lun_p)) || !(lun_p)) | |
1304 | return NULL; | |
1305 | ||
1306 | if (dev->dev_flags & DF_READ_ONLY) | |
1307 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
1308 | else | |
1309 | lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; | |
1310 | ||
1311 | if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0) | |
1312 | return NULL; | |
1313 | ||
1314 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from" | |
1315 | " CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(), | |
1316 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun, | |
1317 | TPG_TFO(tpg)->get_fabric_name(), hba->hba_id); | |
1318 | /* | |
1319 | * Update LUN maps for dynamically added initiators when | |
1320 | * generate_node_acl is enabled. | |
1321 | */ | |
1322 | if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) { | |
1323 | struct se_node_acl *acl; | |
1324 | spin_lock_bh(&tpg->acl_node_lock); | |
1325 | list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { | |
1326 | if (acl->dynamic_node_acl) { | |
1327 | spin_unlock_bh(&tpg->acl_node_lock); | |
1328 | core_tpg_add_node_to_devs(acl, tpg); | |
1329 | spin_lock_bh(&tpg->acl_node_lock); | |
1330 | } | |
1331 | } | |
1332 | spin_unlock_bh(&tpg->acl_node_lock); | |
1333 | } | |
1334 | ||
1335 | return lun_p; | |
1336 | } | |
1337 | ||
1338 | /* core_dev_del_lun(): | |
1339 | * | |
1340 | * | |
1341 | */ | |
1342 | int core_dev_del_lun( | |
1343 | struct se_portal_group *tpg, | |
1344 | u32 unpacked_lun) | |
1345 | { | |
1346 | struct se_lun *lun; | |
1347 | int ret = 0; | |
1348 | ||
1349 | lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret); | |
1350 | if (!(lun)) | |
1351 | return ret; | |
1352 | ||
1353 | core_tpg_post_dellun(tpg, lun); | |
1354 | ||
1355 | printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from" | |
1356 | " device object\n", TPG_TFO(tpg)->get_fabric_name(), | |
1357 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, | |
1358 | TPG_TFO(tpg)->get_fabric_name()); | |
1359 | ||
1360 | return 0; | |
1361 | } | |
1362 | ||
1363 | struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) | |
1364 | { | |
1365 | struct se_lun *lun; | |
1366 | ||
1367 | spin_lock(&tpg->tpg_lun_lock); | |
1368 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
1369 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS" | |
1370 | "_PER_TPG-1: %u for Target Portal Group: %hu\n", | |
1371 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1372 | TRANSPORT_MAX_LUNS_PER_TPG-1, | |
1373 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1374 | spin_unlock(&tpg->tpg_lun_lock); | |
1375 | return NULL; | |
1376 | } | |
1377 | lun = &tpg->tpg_lun_list[unpacked_lun]; | |
1378 | ||
1379 | if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) { | |
1380 | printk(KERN_ERR "%s Logical Unit Number: %u is not free on" | |
1381 | " Target Portal Group: %hu, ignoring request.\n", | |
1382 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1383 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1384 | spin_unlock(&tpg->tpg_lun_lock); | |
1385 | return NULL; | |
1386 | } | |
1387 | spin_unlock(&tpg->tpg_lun_lock); | |
1388 | ||
1389 | return lun; | |
1390 | } | |
1391 | ||
1392 | /* core_dev_get_lun(): | |
1393 | * | |
1394 | * | |
1395 | */ | |
1396 | static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun) | |
1397 | { | |
1398 | struct se_lun *lun; | |
1399 | ||
1400 | spin_lock(&tpg->tpg_lun_lock); | |
1401 | if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { | |
1402 | printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER" | |
1403 | "_TPG-1: %u for Target Portal Group: %hu\n", | |
1404 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1405 | TRANSPORT_MAX_LUNS_PER_TPG-1, | |
1406 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1407 | spin_unlock(&tpg->tpg_lun_lock); | |
1408 | return NULL; | |
1409 | } | |
1410 | lun = &tpg->tpg_lun_list[unpacked_lun]; | |
1411 | ||
1412 | if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { | |
1413 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | |
1414 | " Target Portal Group: %hu, ignoring request.\n", | |
1415 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1416 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1417 | spin_unlock(&tpg->tpg_lun_lock); | |
1418 | return NULL; | |
1419 | } | |
1420 | spin_unlock(&tpg->tpg_lun_lock); | |
1421 | ||
1422 | return lun; | |
1423 | } | |
1424 | ||
1425 | struct se_lun_acl *core_dev_init_initiator_node_lun_acl( | |
1426 | struct se_portal_group *tpg, | |
1427 | u32 mapped_lun, | |
1428 | char *initiatorname, | |
1429 | int *ret) | |
1430 | { | |
1431 | struct se_lun_acl *lacl; | |
1432 | struct se_node_acl *nacl; | |
1433 | ||
60d645a4 | 1434 | if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) { |
c66ac9db NB |
1435 | printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n", |
1436 | TPG_TFO(tpg)->get_fabric_name()); | |
1437 | *ret = -EOVERFLOW; | |
1438 | return NULL; | |
1439 | } | |
1440 | nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname); | |
1441 | if (!(nacl)) { | |
1442 | *ret = -EINVAL; | |
1443 | return NULL; | |
1444 | } | |
1445 | lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL); | |
1446 | if (!(lacl)) { | |
1447 | printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n"); | |
1448 | *ret = -ENOMEM; | |
1449 | return NULL; | |
1450 | } | |
1451 | ||
1452 | INIT_LIST_HEAD(&lacl->lacl_list); | |
1453 | lacl->mapped_lun = mapped_lun; | |
1454 | lacl->se_lun_nacl = nacl; | |
1455 | snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | |
1456 | ||
1457 | return lacl; | |
1458 | } | |
1459 | ||
1460 | int core_dev_add_initiator_node_lun_acl( | |
1461 | struct se_portal_group *tpg, | |
1462 | struct se_lun_acl *lacl, | |
1463 | u32 unpacked_lun, | |
1464 | u32 lun_access) | |
1465 | { | |
1466 | struct se_lun *lun; | |
1467 | struct se_node_acl *nacl; | |
1468 | ||
1469 | lun = core_dev_get_lun(tpg, unpacked_lun); | |
1470 | if (!(lun)) { | |
1471 | printk(KERN_ERR "%s Logical Unit Number: %u is not active on" | |
1472 | " Target Portal Group: %hu, ignoring request.\n", | |
1473 | TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, | |
1474 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | |
1475 | return -EINVAL; | |
1476 | } | |
1477 | ||
1478 | nacl = lacl->se_lun_nacl; | |
1479 | if (!(nacl)) | |
1480 | return -EINVAL; | |
1481 | ||
1482 | if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) && | |
1483 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE)) | |
1484 | lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; | |
1485 | ||
1486 | lacl->se_lun = lun; | |
1487 | ||
1488 | if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun, | |
1489 | lun_access, nacl, tpg, 1) < 0) | |
1490 | return -EINVAL; | |
1491 | ||
1492 | spin_lock(&lun->lun_acl_lock); | |
1493 | list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); | |
1494 | atomic_inc(&lun->lun_acl_count); | |
1495 | smp_mb__after_atomic_inc(); | |
1496 | spin_unlock(&lun->lun_acl_lock); | |
1497 | ||
1498 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " | |
1499 | " InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(), | |
1500 | TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun, | |
1501 | (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO", | |
1502 | lacl->initiatorname); | |
1503 | /* | |
1504 | * Check to see if there are any existing persistent reservation APTPL | |
1505 | * pre-registrations that need to be enabled for this LUN ACL.. | |
1506 | */ | |
1507 | core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); | |
1508 | return 0; | |
1509 | } | |
1510 | ||
1511 | /* core_dev_del_initiator_node_lun_acl(): | |
1512 | * | |
1513 | * | |
1514 | */ | |
1515 | int core_dev_del_initiator_node_lun_acl( | |
1516 | struct se_portal_group *tpg, | |
1517 | struct se_lun *lun, | |
1518 | struct se_lun_acl *lacl) | |
1519 | { | |
1520 | struct se_node_acl *nacl; | |
1521 | ||
1522 | nacl = lacl->se_lun_nacl; | |
1523 | if (!(nacl)) | |
1524 | return -EINVAL; | |
1525 | ||
1526 | spin_lock(&lun->lun_acl_lock); | |
1527 | list_del(&lacl->lacl_list); | |
1528 | atomic_dec(&lun->lun_acl_count); | |
1529 | smp_mb__after_atomic_dec(); | |
1530 | spin_unlock(&lun->lun_acl_lock); | |
1531 | ||
1532 | core_update_device_list_for_node(lun, NULL, lacl->mapped_lun, | |
1533 | TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); | |
1534 | ||
1535 | lacl->se_lun = NULL; | |
1536 | ||
1537 | printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for" | |
1538 | " InitiatorNode: %s Mapped LUN: %u\n", | |
1539 | TPG_TFO(tpg)->get_fabric_name(), | |
1540 | TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, | |
1541 | lacl->initiatorname, lacl->mapped_lun); | |
1542 | ||
1543 | return 0; | |
1544 | } | |
1545 | ||
1546 | void core_dev_free_initiator_node_lun_acl( | |
1547 | struct se_portal_group *tpg, | |
1548 | struct se_lun_acl *lacl) | |
1549 | { | |
1550 | printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s" | |
1551 | " Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(), | |
1552 | TPG_TFO(tpg)->tpg_get_tag(tpg), | |
1553 | TPG_TFO(tpg)->get_fabric_name(), | |
1554 | lacl->initiatorname, lacl->mapped_lun); | |
1555 | ||
1556 | kfree(lacl); | |
1557 | } | |
1558 | ||
1559 | int core_dev_setup_virtual_lun0(void) | |
1560 | { | |
1561 | struct se_hba *hba; | |
1562 | struct se_device *dev; | |
1563 | struct se_subsystem_dev *se_dev = NULL; | |
1564 | struct se_subsystem_api *t; | |
1565 | char buf[16]; | |
1566 | int ret; | |
1567 | ||
1568 | hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE); | |
1569 | if (IS_ERR(hba)) | |
1570 | return PTR_ERR(hba); | |
1571 | ||
1572 | se_global->g_lun0_hba = hba; | |
1573 | t = hba->transport; | |
1574 | ||
1575 | se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL); | |
1576 | if (!(se_dev)) { | |
1577 | printk(KERN_ERR "Unable to allocate memory for" | |
1578 | " struct se_subsystem_dev\n"); | |
1579 | ret = -ENOMEM; | |
1580 | goto out; | |
1581 | } | |
1582 | INIT_LIST_HEAD(&se_dev->g_se_dev_list); | |
1583 | INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list); | |
1584 | spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock); | |
1585 | INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list); | |
1586 | INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list); | |
1587 | spin_lock_init(&se_dev->t10_reservation.registration_lock); | |
1588 | spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock); | |
1589 | INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list); | |
1590 | spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock); | |
1591 | spin_lock_init(&se_dev->se_dev_lock); | |
1592 | se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN; | |
1593 | se_dev->t10_wwn.t10_sub_dev = se_dev; | |
1594 | se_dev->t10_alua.t10_sub_dev = se_dev; | |
1595 | se_dev->se_dev_attrib.da_sub_dev = se_dev; | |
1596 | se_dev->se_dev_hba = hba; | |
1597 | ||
1598 | se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0"); | |
1599 | if (!(se_dev->se_dev_su_ptr)) { | |
1600 | printk(KERN_ERR "Unable to locate subsystem dependent pointer" | |
1601 | " from allocate_virtdevice()\n"); | |
1602 | ret = -ENOMEM; | |
1603 | goto out; | |
1604 | } | |
1605 | se_global->g_lun0_su_dev = se_dev; | |
1606 | ||
1607 | memset(buf, 0, 16); | |
1608 | sprintf(buf, "rd_pages=8"); | |
1609 | t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf)); | |
1610 | ||
1611 | dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr); | |
1612 | if (!(dev) || IS_ERR(dev)) { | |
1613 | ret = -ENOMEM; | |
1614 | goto out; | |
1615 | } | |
1616 | se_dev->se_dev_ptr = dev; | |
1617 | se_global->g_lun0_dev = dev; | |
1618 | ||
1619 | return 0; | |
1620 | out: | |
1621 | se_global->g_lun0_su_dev = NULL; | |
1622 | kfree(se_dev); | |
1623 | if (se_global->g_lun0_hba) { | |
1624 | core_delete_hba(se_global->g_lun0_hba); | |
1625 | se_global->g_lun0_hba = NULL; | |
1626 | } | |
1627 | return ret; | |
1628 | } | |
1629 | ||
1630 | ||
1631 | void core_dev_release_virtual_lun0(void) | |
1632 | { | |
1633 | struct se_hba *hba = se_global->g_lun0_hba; | |
1634 | struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev; | |
1635 | ||
1636 | if (!(hba)) | |
1637 | return; | |
1638 | ||
1639 | if (se_global->g_lun0_dev) | |
1640 | se_free_virtual_device(se_global->g_lun0_dev, hba); | |
1641 | ||
1642 | kfree(su_dev); | |
1643 | core_delete_hba(hba); | |
1644 | } |