Merge ath-next from ath.git
[deliverable/linux.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2 * Filename: target_core_tpg.c
3 *
4 * This file contains generic Target Portal Group related functions.
5 *
6 * (c) Copyright 2002-2013 Datera, Inc.
7 *
8 * Nicholas A. Bellinger <nab@kernel.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
26 #include <linux/net.h>
27 #include <linux/string.h>
28 #include <linux/timer.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/in.h>
32 #include <linux/export.h>
33 #include <net/sock.h>
34 #include <net/tcp.h>
35 #include <scsi/scsi_proto.h>
36
37 #include <target/target_core_base.h>
38 #include <target/target_core_backend.h>
39 #include <target/target_core_fabric.h>
40
41 #include "target_core_internal.h"
42 #include "target_core_alua.h"
43 #include "target_core_pr.h"
44 #include "target_core_ua.h"
45
46 extern struct se_device *g_lun0_dev;
47
48 static DEFINE_SPINLOCK(tpg_lock);
49 static LIST_HEAD(tpg_list);
50
51 /* __core_tpg_get_initiator_node_acl():
52 *
53 * mutex_lock(&tpg->acl_node_mutex); must be held when calling
54 */
55 struct se_node_acl *__core_tpg_get_initiator_node_acl(
56 struct se_portal_group *tpg,
57 const char *initiatorname)
58 {
59 struct se_node_acl *acl;
60
61 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
62 if (!strcmp(acl->initiatorname, initiatorname))
63 return acl;
64 }
65
66 return NULL;
67 }
68
69 /* core_tpg_get_initiator_node_acl():
70 *
71 *
72 */
73 struct se_node_acl *core_tpg_get_initiator_node_acl(
74 struct se_portal_group *tpg,
75 unsigned char *initiatorname)
76 {
77 struct se_node_acl *acl;
78 /*
79 * Obtain se_node_acl->acl_kref using fabric driver provided
80 * initiatorname[] during node acl endpoint lookup driven by
81 * new se_session login.
82 *
83 * The reference is held until se_session shutdown -> release
84 * occurs via fabric driver invoked transport_deregister_session()
85 * or transport_free_session() code.
86 */
87 mutex_lock(&tpg->acl_node_mutex);
88 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
89 if (acl) {
90 if (!kref_get_unless_zero(&acl->acl_kref))
91 acl = NULL;
92 }
93 mutex_unlock(&tpg->acl_node_mutex);
94
95 return acl;
96 }
97 EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
98
99 void core_allocate_nexus_loss_ua(
100 struct se_node_acl *nacl)
101 {
102 struct se_dev_entry *deve;
103
104 if (!nacl)
105 return;
106
107 rcu_read_lock();
108 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
109 core_scsi3_ua_allocate(deve, 0x29,
110 ASCQ_29H_NEXUS_LOSS_OCCURRED);
111 rcu_read_unlock();
112 }
113 EXPORT_SYMBOL(core_allocate_nexus_loss_ua);
114
115 /* core_tpg_add_node_to_devs():
116 *
117 *
118 */
119 void core_tpg_add_node_to_devs(
120 struct se_node_acl *acl,
121 struct se_portal_group *tpg,
122 struct se_lun *lun_orig)
123 {
124 u32 lun_access = 0;
125 struct se_lun *lun;
126 struct se_device *dev;
127
128 mutex_lock(&tpg->tpg_lun_mutex);
129 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link) {
130 if (lun_orig && lun != lun_orig)
131 continue;
132
133 dev = rcu_dereference_check(lun->lun_se_dev,
134 lockdep_is_held(&tpg->tpg_lun_mutex));
135 /*
136 * By default in LIO-Target $FABRIC_MOD,
137 * demo_mode_write_protect is ON, or READ_ONLY;
138 */
139 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
140 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
141 } else {
142 /*
143 * Allow only optical drives to issue R/W in default RO
144 * demo mode.
145 */
146 if (dev->transport->get_device_type(dev) == TYPE_DISK)
147 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
148 else
149 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
150 }
151
152 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s"
153 " access for LUN in Demo Mode\n",
154 tpg->se_tpg_tfo->get_fabric_name(),
155 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
156 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
157 "READ-WRITE" : "READ-ONLY");
158
159 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
160 lun_access, acl, tpg);
161 /*
162 * Check to see if there are any existing persistent reservation
163 * APTPL pre-registrations that need to be enabled for this dynamic
164 * LUN ACL now..
165 */
166 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
167 lun->unpacked_lun);
168 }
169 mutex_unlock(&tpg->tpg_lun_mutex);
170 }
171
172 static void
173 target_set_nacl_queue_depth(struct se_portal_group *tpg,
174 struct se_node_acl *acl, u32 queue_depth)
175 {
176 acl->queue_depth = queue_depth;
177
178 if (!acl->queue_depth) {
179 pr_warn("Queue depth for %s Initiator Node: %s is 0,"
180 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
181 acl->initiatorname);
182 acl->queue_depth = 1;
183 }
184 }
185
186 static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
187 const unsigned char *initiatorname)
188 {
189 struct se_node_acl *acl;
190 u32 queue_depth;
191
192 acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
193 GFP_KERNEL);
194 if (!acl)
195 return NULL;
196
197 INIT_LIST_HEAD(&acl->acl_list);
198 INIT_LIST_HEAD(&acl->acl_sess_list);
199 INIT_HLIST_HEAD(&acl->lun_entry_hlist);
200 kref_init(&acl->acl_kref);
201 init_completion(&acl->acl_free_comp);
202 spin_lock_init(&acl->nacl_sess_lock);
203 mutex_init(&acl->lun_entry_mutex);
204 atomic_set(&acl->acl_pr_ref_count, 0);
205
206 if (tpg->se_tpg_tfo->tpg_get_default_depth)
207 queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
208 else
209 queue_depth = 1;
210 target_set_nacl_queue_depth(tpg, acl, queue_depth);
211
212 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
213 acl->se_tpg = tpg;
214 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
215
216 tpg->se_tpg_tfo->set_default_node_attributes(acl);
217
218 return acl;
219 }
220
221 static void target_add_node_acl(struct se_node_acl *acl)
222 {
223 struct se_portal_group *tpg = acl->se_tpg;
224
225 mutex_lock(&tpg->acl_node_mutex);
226 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
227 mutex_unlock(&tpg->acl_node_mutex);
228
229 pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
230 " Initiator Node: %s\n",
231 tpg->se_tpg_tfo->get_fabric_name(),
232 tpg->se_tpg_tfo->tpg_get_tag(tpg),
233 acl->dynamic_node_acl ? "DYNAMIC" : "",
234 acl->queue_depth,
235 tpg->se_tpg_tfo->get_fabric_name(),
236 acl->initiatorname);
237 }
238
239 bool target_tpg_has_node_acl(struct se_portal_group *tpg,
240 const char *initiatorname)
241 {
242 struct se_node_acl *acl;
243 bool found = false;
244
245 mutex_lock(&tpg->acl_node_mutex);
246 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
247 if (!strcmp(acl->initiatorname, initiatorname)) {
248 found = true;
249 break;
250 }
251 }
252 mutex_unlock(&tpg->acl_node_mutex);
253
254 return found;
255 }
256 EXPORT_SYMBOL(target_tpg_has_node_acl);
257
258 struct se_node_acl *core_tpg_check_initiator_node_acl(
259 struct se_portal_group *tpg,
260 unsigned char *initiatorname)
261 {
262 struct se_node_acl *acl;
263
264 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
265 if (acl)
266 return acl;
267
268 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
269 return NULL;
270
271 acl = target_alloc_node_acl(tpg, initiatorname);
272 if (!acl)
273 return NULL;
274 /*
275 * When allocating a dynamically generated node_acl, go ahead
276 * and take the extra kref now before returning to the fabric
277 * driver caller.
278 *
279 * Note this reference will be released at session shutdown
280 * time within transport_free_session() code.
281 */
282 kref_get(&acl->acl_kref);
283 acl->dynamic_node_acl = 1;
284
285 /*
286 * Here we only create demo-mode MappedLUNs from the active
287 * TPG LUNs if the fabric is not explicitly asking for
288 * tpg_check_demo_mode_login_only() == 1.
289 */
290 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
291 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
292 core_tpg_add_node_to_devs(acl, tpg, NULL);
293
294 target_add_node_acl(acl);
295 return acl;
296 }
297 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
298
299 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
300 {
301 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
302 cpu_relax();
303 }
304
305 struct se_node_acl *core_tpg_add_initiator_node_acl(
306 struct se_portal_group *tpg,
307 const char *initiatorname)
308 {
309 struct se_node_acl *acl;
310
311 mutex_lock(&tpg->acl_node_mutex);
312 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
313 if (acl) {
314 if (acl->dynamic_node_acl) {
315 acl->dynamic_node_acl = 0;
316 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
317 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
318 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
319 mutex_unlock(&tpg->acl_node_mutex);
320 return acl;
321 }
322
323 pr_err("ACL entry for %s Initiator"
324 " Node %s already exists for TPG %u, ignoring"
325 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
326 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
327 mutex_unlock(&tpg->acl_node_mutex);
328 return ERR_PTR(-EEXIST);
329 }
330 mutex_unlock(&tpg->acl_node_mutex);
331
332 acl = target_alloc_node_acl(tpg, initiatorname);
333 if (!acl)
334 return ERR_PTR(-ENOMEM);
335
336 target_add_node_acl(acl);
337 return acl;
338 }
339
340 void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
341 {
342 struct se_portal_group *tpg = acl->se_tpg;
343 LIST_HEAD(sess_list);
344 struct se_session *sess, *sess_tmp;
345 unsigned long flags;
346 int rc;
347
348 mutex_lock(&tpg->acl_node_mutex);
349 if (acl->dynamic_node_acl) {
350 acl->dynamic_node_acl = 0;
351 }
352 list_del(&acl->acl_list);
353 mutex_unlock(&tpg->acl_node_mutex);
354
355 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
356 acl->acl_stop = 1;
357
358 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
359 sess_acl_list) {
360 if (sess->sess_tearing_down != 0)
361 continue;
362
363 if (!target_get_session(sess))
364 continue;
365 list_move(&sess->sess_acl_list, &sess_list);
366 }
367 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
368
369 list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
370 list_del(&sess->sess_acl_list);
371
372 rc = tpg->se_tpg_tfo->shutdown_session(sess);
373 target_put_session(sess);
374 if (!rc)
375 continue;
376 target_put_session(sess);
377 }
378 target_put_nacl(acl);
379 /*
380 * Wait for last target_put_nacl() to complete in target_complete_nacl()
381 * for active fabric session transport_deregister_session() callbacks.
382 */
383 wait_for_completion(&acl->acl_free_comp);
384
385 core_tpg_wait_for_nacl_pr_ref(acl);
386 core_free_device_list_for_node(acl, tpg);
387
388 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
389 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
390 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
391 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
392
393 kfree(acl);
394 }
395
396 /* core_tpg_set_initiator_node_queue_depth():
397 *
398 *
399 */
400 int core_tpg_set_initiator_node_queue_depth(
401 struct se_node_acl *acl,
402 u32 queue_depth)
403 {
404 LIST_HEAD(sess_list);
405 struct se_portal_group *tpg = acl->se_tpg;
406 struct se_session *sess, *sess_tmp;
407 unsigned long flags;
408 int rc;
409
410 /*
411 * User has requested to change the queue depth for a Initiator Node.
412 * Change the value in the Node's struct se_node_acl, and call
413 * target_set_nacl_queue_depth() to set the new queue depth.
414 */
415 target_set_nacl_queue_depth(tpg, acl, queue_depth);
416
417 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
418 list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
419 sess_acl_list) {
420 if (sess->sess_tearing_down != 0)
421 continue;
422 if (!target_get_session(sess))
423 continue;
424 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
425
426 /*
427 * Finally call tpg->se_tpg_tfo->close_session() to force session
428 * reinstatement to occur if there is an active session for the
429 * $FABRIC_MOD Initiator Node in question.
430 */
431 rc = tpg->se_tpg_tfo->shutdown_session(sess);
432 target_put_session(sess);
433 if (!rc) {
434 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
435 continue;
436 }
437 target_put_session(sess);
438 spin_lock_irqsave(&acl->nacl_sess_lock, flags);
439 }
440 spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
441
442 pr_debug("Successfully changed queue depth to: %d for Initiator"
443 " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
444 acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
445 tpg->se_tpg_tfo->tpg_get_tag(tpg));
446
447 return 0;
448 }
449 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
450
451 /* core_tpg_set_initiator_node_tag():
452 *
453 * Initiator nodeacl tags are not used internally, but may be used by
454 * userspace to emulate aliases or groups.
455 * Returns length of newly-set tag or -EINVAL.
456 */
457 int core_tpg_set_initiator_node_tag(
458 struct se_portal_group *tpg,
459 struct se_node_acl *acl,
460 const char *new_tag)
461 {
462 if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
463 return -EINVAL;
464
465 if (!strncmp("NULL", new_tag, 4)) {
466 acl->acl_tag[0] = '\0';
467 return 0;
468 }
469
470 return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
471 }
472 EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
473
474 static void core_tpg_lun_ref_release(struct percpu_ref *ref)
475 {
476 struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
477
478 complete(&lun->lun_ref_comp);
479 }
480
481 int core_tpg_register(
482 struct se_wwn *se_wwn,
483 struct se_portal_group *se_tpg,
484 int proto_id)
485 {
486 int ret;
487
488 if (!se_tpg)
489 return -EINVAL;
490 /*
491 * For the typical case where core_tpg_register() is called by a
492 * fabric driver from target_core_fabric_ops->fabric_make_tpg()
493 * configfs context, use the original tf_ops pointer already saved
494 * by target-core in target_fabric_make_wwn().
495 *
496 * Otherwise, for special cases like iscsi-target discovery TPGs
497 * the caller is responsible for setting ->se_tpg_tfo ahead of
498 * calling core_tpg_register().
499 */
500 if (se_wwn)
501 se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
502
503 if (!se_tpg->se_tpg_tfo) {
504 pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n");
505 return -EINVAL;
506 }
507
508 INIT_HLIST_HEAD(&se_tpg->tpg_lun_hlist);
509 se_tpg->proto_id = proto_id;
510 se_tpg->se_tpg_wwn = se_wwn;
511 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
512 INIT_LIST_HEAD(&se_tpg->acl_node_list);
513 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
514 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
515 spin_lock_init(&se_tpg->session_lock);
516 mutex_init(&se_tpg->tpg_lun_mutex);
517 mutex_init(&se_tpg->acl_node_mutex);
518
519 if (se_tpg->proto_id >= 0) {
520 se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0);
521 if (IS_ERR(se_tpg->tpg_virt_lun0))
522 return PTR_ERR(se_tpg->tpg_virt_lun0);
523
524 ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0,
525 TRANSPORT_LUNFLAGS_READ_ONLY, g_lun0_dev);
526 if (ret < 0) {
527 kfree(se_tpg->tpg_virt_lun0);
528 return ret;
529 }
530 }
531
532 spin_lock_bh(&tpg_lock);
533 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
534 spin_unlock_bh(&tpg_lock);
535
536 pr_debug("TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, "
537 "Proto: %d, Portal Tag: %u\n", se_tpg->se_tpg_tfo->get_fabric_name(),
538 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) ?
539 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg) : NULL,
540 se_tpg->proto_id, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
541
542 return 0;
543 }
544 EXPORT_SYMBOL(core_tpg_register);
545
546 int core_tpg_deregister(struct se_portal_group *se_tpg)
547 {
548 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
549 struct se_node_acl *nacl, *nacl_tmp;
550 LIST_HEAD(node_list);
551
552 pr_debug("TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, "
553 "Proto: %d, Portal Tag: %u\n", tfo->get_fabric_name(),
554 tfo->tpg_get_wwn(se_tpg) ? tfo->tpg_get_wwn(se_tpg) : NULL,
555 se_tpg->proto_id, tfo->tpg_get_tag(se_tpg));
556
557 spin_lock_bh(&tpg_lock);
558 list_del(&se_tpg->se_tpg_node);
559 spin_unlock_bh(&tpg_lock);
560
561 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
562 cpu_relax();
563
564 mutex_lock(&se_tpg->acl_node_mutex);
565 list_splice_init(&se_tpg->acl_node_list, &node_list);
566 mutex_unlock(&se_tpg->acl_node_mutex);
567 /*
568 * Release any remaining demo-mode generated se_node_acl that have
569 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
570 * in transport_deregister_session().
571 */
572 list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
573 list_del(&nacl->acl_list);
574
575 core_tpg_wait_for_nacl_pr_ref(nacl);
576 core_free_device_list_for_node(nacl, se_tpg);
577 kfree(nacl);
578 }
579
580 if (se_tpg->proto_id >= 0) {
581 core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0);
582 kfree_rcu(se_tpg->tpg_virt_lun0, rcu_head);
583 }
584
585 return 0;
586 }
587 EXPORT_SYMBOL(core_tpg_deregister);
588
589 struct se_lun *core_tpg_alloc_lun(
590 struct se_portal_group *tpg,
591 u64 unpacked_lun)
592 {
593 struct se_lun *lun;
594
595 lun = kzalloc(sizeof(*lun), GFP_KERNEL);
596 if (!lun) {
597 pr_err("Unable to allocate se_lun memory\n");
598 return ERR_PTR(-ENOMEM);
599 }
600 lun->unpacked_lun = unpacked_lun;
601 lun->lun_link_magic = SE_LUN_LINK_MAGIC;
602 atomic_set(&lun->lun_acl_count, 0);
603 init_completion(&lun->lun_ref_comp);
604 INIT_LIST_HEAD(&lun->lun_deve_list);
605 INIT_LIST_HEAD(&lun->lun_dev_link);
606 atomic_set(&lun->lun_tg_pt_secondary_offline, 0);
607 spin_lock_init(&lun->lun_deve_lock);
608 mutex_init(&lun->lun_tg_pt_md_mutex);
609 INIT_LIST_HEAD(&lun->lun_tg_pt_gp_link);
610 spin_lock_init(&lun->lun_tg_pt_gp_lock);
611 lun->lun_tpg = tpg;
612
613 return lun;
614 }
615
616 int core_tpg_add_lun(
617 struct se_portal_group *tpg,
618 struct se_lun *lun,
619 u32 lun_access,
620 struct se_device *dev)
621 {
622 int ret;
623
624 ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release, 0,
625 GFP_KERNEL);
626 if (ret < 0)
627 goto out;
628
629 ret = core_alloc_rtpi(lun, dev);
630 if (ret)
631 goto out_kill_ref;
632
633 if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
634 !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
635 target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
636
637 mutex_lock(&tpg->tpg_lun_mutex);
638
639 spin_lock(&dev->se_port_lock);
640 lun->lun_index = dev->dev_index;
641 rcu_assign_pointer(lun->lun_se_dev, dev);
642 dev->export_count++;
643 list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
644 spin_unlock(&dev->se_port_lock);
645
646 if (dev->dev_flags & DF_READ_ONLY)
647 lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
648 else
649 lun->lun_access = lun_access;
650 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
651 hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
652 mutex_unlock(&tpg->tpg_lun_mutex);
653
654 return 0;
655
656 out_kill_ref:
657 percpu_ref_exit(&lun->lun_ref);
658 out:
659 return ret;
660 }
661
662 void core_tpg_remove_lun(
663 struct se_portal_group *tpg,
664 struct se_lun *lun)
665 {
666 /*
667 * rcu_dereference_raw protected by se_lun->lun_group symlink
668 * reference to se_device->dev_group.
669 */
670 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
671
672 core_clear_lun_from_tpg(lun, tpg);
673 /*
674 * Wait for any active I/O references to percpu se_lun->lun_ref to
675 * be released. Also, se_lun->lun_ref is now used by PR and ALUA
676 * logic when referencing a remote target port during ALL_TGT_PT=1
677 * and generating UNIT_ATTENTIONs for ALUA access state transition.
678 */
679 transport_clear_lun_ref(lun);
680
681 mutex_lock(&tpg->tpg_lun_mutex);
682 if (lun->lun_se_dev) {
683 target_detach_tg_pt_gp(lun);
684
685 spin_lock(&dev->se_port_lock);
686 list_del(&lun->lun_dev_link);
687 dev->export_count--;
688 rcu_assign_pointer(lun->lun_se_dev, NULL);
689 spin_unlock(&dev->se_port_lock);
690 }
691 if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
692 hlist_del_rcu(&lun->link);
693 mutex_unlock(&tpg->tpg_lun_mutex);
694
695 percpu_ref_exit(&lun->lun_ref);
696 }
This page took 0.04529 seconds and 6 git commands to generate.