1 /*******************************************************************************
2 * Filename: target_core_tpg.c
4 * This file contains generic Target Portal Group related functions.
6 * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7 * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8 * Copyright (c) 2007-2010 Rising Tide Systems
9 * Copyright (c) 2008-2010 Linux-iSCSI.org
11 * Nicholas A. Bellinger <nab@kernel.org>
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 ******************************************************************************/
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
35 #include <linux/export.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h>
43 #include <target/target_core_tpg.h>
44 #include <target/target_core_transport.h>
45 #include <target/target_core_fabric_ops.h>
47 #include "target_core_hba.h"
48 #include "target_core_stat.h"
50 extern struct se_device *g_lun0_dev;
52 static DEFINE_SPINLOCK(tpg_lock);
53 static LIST_HEAD(tpg_list);
55 /* core_clear_initiator_node_from_tpg():
59 static void core_clear_initiator_node_from_tpg(
60 struct se_node_acl *nacl,
61 struct se_portal_group *tpg)
64 struct se_dev_entry *deve;
67 spin_lock_irq(&nacl->device_list_lock);
68 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
69 deve = &nacl->device_list[i];
71 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
75 pr_err("%s device entries device pointer is"
76 " NULL, but Initiator has access.\n",
77 tpg->se_tpg_tfo->get_fabric_name());
82 spin_unlock_irq(&nacl->device_list_lock);
83 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
84 TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
86 spin_lock_irq(&nacl->device_list_lock);
88 spin_unlock_irq(&nacl->device_list_lock);
91 /* __core_tpg_get_initiator_node_acl():
93 * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
95 struct se_node_acl *__core_tpg_get_initiator_node_acl(
96 struct se_portal_group *tpg,
97 const char *initiatorname)
99 struct se_node_acl *acl;
101 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
102 if (!strcmp(acl->initiatorname, initiatorname))
109 /* core_tpg_get_initiator_node_acl():
113 struct se_node_acl *core_tpg_get_initiator_node_acl(
114 struct se_portal_group *tpg,
115 unsigned char *initiatorname)
117 struct se_node_acl *acl;
119 spin_lock_irq(&tpg->acl_node_lock);
120 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
121 spin_unlock_irq(&tpg->acl_node_lock);
126 /* core_tpg_add_node_to_devs():
130 void core_tpg_add_node_to_devs(
131 struct se_node_acl *acl,
132 struct se_portal_group *tpg)
137 struct se_device *dev;
139 spin_lock(&tpg->tpg_lun_lock);
140 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
141 lun = &tpg->tpg_lun_list[i];
142 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
145 spin_unlock(&tpg->tpg_lun_lock);
147 dev = lun->lun_se_dev;
149 * By default in LIO-Target $FABRIC_MOD,
150 * demo_mode_write_protect is ON, or READ_ONLY;
152 if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
153 if (dev->dev_flags & DF_READ_ONLY)
154 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
156 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
159 * Allow only optical drives to issue R/W in default RO
162 if (dev->transport->get_device_type(dev) == TYPE_DISK)
163 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
165 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
168 pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
169 " access for LUN in Demo Mode\n",
170 tpg->se_tpg_tfo->get_fabric_name(),
171 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
172 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
173 "READ-WRITE" : "READ-ONLY");
175 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
176 lun_access, acl, tpg, 1);
177 spin_lock(&tpg->tpg_lun_lock);
179 spin_unlock(&tpg->tpg_lun_lock);
182 /* core_set_queue_depth_for_node():
186 static int core_set_queue_depth_for_node(
187 struct se_portal_group *tpg,
188 struct se_node_acl *acl)
190 if (!acl->queue_depth) {
191 pr_err("Queue depth for %s Initiator Node: %s is 0,"
192 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
194 acl->queue_depth = 1;
200 /* core_create_device_list_for_node():
204 static int core_create_device_list_for_node(struct se_node_acl *nacl)
206 struct se_dev_entry *deve;
209 nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
210 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
211 if (!nacl->device_list) {
212 pr_err("Unable to allocate memory for"
213 " struct se_node_acl->device_list\n");
216 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
217 deve = &nacl->device_list[i];
219 atomic_set(&deve->ua_count, 0);
220 atomic_set(&deve->pr_ref_count, 0);
221 spin_lock_init(&deve->ua_lock);
222 INIT_LIST_HEAD(&deve->alua_port_list);
223 INIT_LIST_HEAD(&deve->ua_list);
229 /* core_tpg_check_initiator_node_acl()
233 struct se_node_acl *core_tpg_check_initiator_node_acl(
234 struct se_portal_group *tpg,
235 unsigned char *initiatorname)
237 struct se_node_acl *acl;
239 acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
243 if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
246 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
250 INIT_LIST_HEAD(&acl->acl_list);
251 INIT_LIST_HEAD(&acl->acl_sess_list);
252 spin_lock_init(&acl->device_list_lock);
253 spin_lock_init(&acl->nacl_sess_lock);
254 atomic_set(&acl->acl_pr_ref_count, 0);
255 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
256 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
258 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
259 spin_lock_init(&acl->stats_lock);
260 acl->dynamic_node_acl = 1;
262 tpg->se_tpg_tfo->set_default_node_attributes(acl);
264 if (core_create_device_list_for_node(acl) < 0) {
265 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
269 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
270 core_free_device_list_for_node(acl, tpg);
271 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
275 * Here we only create demo-mode MappedLUNs from the active
276 * TPG LUNs if the fabric is not explictly asking for
277 * tpg_check_demo_mode_login_only() == 1.
279 if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
280 (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
283 core_tpg_add_node_to_devs(acl, tpg);
285 spin_lock_irq(&tpg->acl_node_lock);
286 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
287 tpg->num_node_acls++;
288 spin_unlock_irq(&tpg->acl_node_lock);
290 pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
291 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
292 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
293 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
297 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
299 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
301 while (atomic_read(&nacl->acl_pr_ref_count) != 0)
305 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
310 spin_lock(&tpg->tpg_lun_lock);
311 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
312 lun = &tpg->tpg_lun_list[i];
314 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
315 (lun->lun_se_dev == NULL))
318 spin_unlock(&tpg->tpg_lun_lock);
319 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
320 spin_lock(&tpg->tpg_lun_lock);
322 spin_unlock(&tpg->tpg_lun_lock);
324 EXPORT_SYMBOL(core_tpg_clear_object_luns);
326 /* core_tpg_add_initiator_node_acl():
330 struct se_node_acl *core_tpg_add_initiator_node_acl(
331 struct se_portal_group *tpg,
332 struct se_node_acl *se_nacl,
333 const char *initiatorname,
336 struct se_node_acl *acl = NULL;
338 spin_lock_irq(&tpg->acl_node_lock);
339 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
341 if (acl->dynamic_node_acl) {
342 acl->dynamic_node_acl = 0;
343 pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
344 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
345 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
346 spin_unlock_irq(&tpg->acl_node_lock);
348 * Release the locally allocated struct se_node_acl
349 * because * core_tpg_add_initiator_node_acl() returned
350 * a pointer to an existing demo mode node ACL.
353 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
358 pr_err("ACL entry for %s Initiator"
359 " Node %s already exists for TPG %u, ignoring"
360 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
361 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
362 spin_unlock_irq(&tpg->acl_node_lock);
363 return ERR_PTR(-EEXIST);
365 spin_unlock_irq(&tpg->acl_node_lock);
368 pr_err("struct se_node_acl pointer is NULL\n");
369 return ERR_PTR(-EINVAL);
372 * For v4.x logic the se_node_acl_s is hanging off a fabric
373 * dependent structure allocated via
374 * struct target_core_fabric_ops->fabric_make_nodeacl()
378 INIT_LIST_HEAD(&acl->acl_list);
379 INIT_LIST_HEAD(&acl->acl_sess_list);
380 spin_lock_init(&acl->device_list_lock);
381 spin_lock_init(&acl->nacl_sess_lock);
382 atomic_set(&acl->acl_pr_ref_count, 0);
383 acl->queue_depth = queue_depth;
384 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
386 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
387 spin_lock_init(&acl->stats_lock);
389 tpg->se_tpg_tfo->set_default_node_attributes(acl);
391 if (core_create_device_list_for_node(acl) < 0) {
392 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
393 return ERR_PTR(-ENOMEM);
396 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
397 core_free_device_list_for_node(acl, tpg);
398 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
399 return ERR_PTR(-EINVAL);
402 spin_lock_irq(&tpg->acl_node_lock);
403 list_add_tail(&acl->acl_list, &tpg->acl_node_list);
404 tpg->num_node_acls++;
405 spin_unlock_irq(&tpg->acl_node_lock);
408 pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
409 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
410 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
411 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
415 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
417 /* core_tpg_del_initiator_node_acl():
421 int core_tpg_del_initiator_node_acl(
422 struct se_portal_group *tpg,
423 struct se_node_acl *acl,
426 struct se_session *sess, *sess_tmp;
429 spin_lock_irq(&tpg->acl_node_lock);
430 if (acl->dynamic_node_acl) {
431 acl->dynamic_node_acl = 0;
434 list_del(&acl->acl_list);
435 tpg->num_node_acls--;
436 spin_unlock_irq(&tpg->acl_node_lock);
438 spin_lock_bh(&tpg->session_lock);
439 list_for_each_entry_safe(sess, sess_tmp,
440 &tpg->tpg_sess_list, sess_list) {
441 if (sess->se_node_acl != acl)
444 * Determine if the session needs to be closed by our context.
446 if (!tpg->se_tpg_tfo->shutdown_session(sess))
449 spin_unlock_bh(&tpg->session_lock);
451 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
452 * forcefully shutdown the $FABRIC_MOD session/nexus.
454 tpg->se_tpg_tfo->close_session(sess);
456 spin_lock_bh(&tpg->session_lock);
458 spin_unlock_bh(&tpg->session_lock);
460 core_tpg_wait_for_nacl_pr_ref(acl);
461 core_clear_initiator_node_from_tpg(acl, tpg);
462 core_free_device_list_for_node(acl, tpg);
464 pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
465 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
466 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
467 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
471 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
473 /* core_tpg_set_initiator_node_queue_depth():
477 int core_tpg_set_initiator_node_queue_depth(
478 struct se_portal_group *tpg,
479 unsigned char *initiatorname,
483 struct se_session *sess, *init_sess = NULL;
484 struct se_node_acl *acl;
487 spin_lock_irq(&tpg->acl_node_lock);
488 acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
490 pr_err("Access Control List entry for %s Initiator"
491 " Node %s does not exists for TPG %hu, ignoring"
492 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
493 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
494 spin_unlock_irq(&tpg->acl_node_lock);
497 if (acl->dynamic_node_acl) {
498 acl->dynamic_node_acl = 0;
501 spin_unlock_irq(&tpg->acl_node_lock);
503 spin_lock_bh(&tpg->session_lock);
504 list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
505 if (sess->se_node_acl != acl)
509 pr_err("Unable to change queue depth for %s"
510 " Initiator Node: %s while session is"
511 " operational. To forcefully change the queue"
512 " depth and force session reinstatement"
513 " use the \"force=1\" parameter.\n",
514 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
515 spin_unlock_bh(&tpg->session_lock);
517 spin_lock_irq(&tpg->acl_node_lock);
519 acl->dynamic_node_acl = 1;
520 spin_unlock_irq(&tpg->acl_node_lock);
524 * Determine if the session needs to be closed by our context.
526 if (!tpg->se_tpg_tfo->shutdown_session(sess))
534 * User has requested to change the queue depth for a Initiator Node.
535 * Change the value in the Node's struct se_node_acl, and call
536 * core_set_queue_depth_for_node() to add the requested queue depth.
538 * Finally call tpg->se_tpg_tfo->close_session() to force session
539 * reinstatement to occur if there is an active session for the
540 * $FABRIC_MOD Initiator Node in question.
542 acl->queue_depth = queue_depth;
544 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
545 spin_unlock_bh(&tpg->session_lock);
547 * Force session reinstatement if
548 * core_set_queue_depth_for_node() failed, because we assume
549 * the $FABRIC_MOD has already the set session reinstatement
550 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
553 tpg->se_tpg_tfo->close_session(init_sess);
555 spin_lock_irq(&tpg->acl_node_lock);
557 acl->dynamic_node_acl = 1;
558 spin_unlock_irq(&tpg->acl_node_lock);
561 spin_unlock_bh(&tpg->session_lock);
563 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
564 * forcefully shutdown the $FABRIC_MOD session/nexus.
567 tpg->se_tpg_tfo->close_session(init_sess);
569 pr_debug("Successfully changed queue depth to: %d for Initiator"
570 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
571 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
572 tpg->se_tpg_tfo->tpg_get_tag(tpg));
574 spin_lock_irq(&tpg->acl_node_lock);
576 acl->dynamic_node_acl = 1;
577 spin_unlock_irq(&tpg->acl_node_lock);
581 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
583 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
585 /* Set in core_dev_setup_virtual_lun0() */
586 struct se_device *dev = g_lun0_dev;
587 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
588 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
591 lun->unpacked_lun = 0;
592 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
593 atomic_set(&lun->lun_acl_count, 0);
594 init_completion(&lun->lun_shutdown_comp);
595 INIT_LIST_HEAD(&lun->lun_acl_list);
596 INIT_LIST_HEAD(&lun->lun_cmd_list);
597 spin_lock_init(&lun->lun_acl_lock);
598 spin_lock_init(&lun->lun_cmd_lock);
599 spin_lock_init(&lun->lun_sep_lock);
601 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
608 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
610 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
612 core_tpg_post_dellun(se_tpg, lun);
615 int core_tpg_register(
616 struct target_core_fabric_ops *tfo,
617 struct se_wwn *se_wwn,
618 struct se_portal_group *se_tpg,
619 void *tpg_fabric_ptr,
625 se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
626 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
627 if (!se_tpg->tpg_lun_list) {
628 pr_err("Unable to allocate struct se_portal_group->"
633 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
634 lun = &se_tpg->tpg_lun_list[i];
635 lun->unpacked_lun = i;
636 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
637 atomic_set(&lun->lun_acl_count, 0);
638 init_completion(&lun->lun_shutdown_comp);
639 INIT_LIST_HEAD(&lun->lun_acl_list);
640 INIT_LIST_HEAD(&lun->lun_cmd_list);
641 spin_lock_init(&lun->lun_acl_lock);
642 spin_lock_init(&lun->lun_cmd_lock);
643 spin_lock_init(&lun->lun_sep_lock);
646 se_tpg->se_tpg_type = se_tpg_type;
647 se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
648 se_tpg->se_tpg_tfo = tfo;
649 se_tpg->se_tpg_wwn = se_wwn;
650 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
651 INIT_LIST_HEAD(&se_tpg->acl_node_list);
652 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
653 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
654 spin_lock_init(&se_tpg->acl_node_lock);
655 spin_lock_init(&se_tpg->session_lock);
656 spin_lock_init(&se_tpg->tpg_lun_lock);
658 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
659 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
665 spin_lock_bh(&tpg_lock);
666 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
667 spin_unlock_bh(&tpg_lock);
669 pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
670 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
671 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
672 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
673 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
677 EXPORT_SYMBOL(core_tpg_register);
679 int core_tpg_deregister(struct se_portal_group *se_tpg)
681 struct se_node_acl *nacl, *nacl_tmp;
683 pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
684 " for endpoint: %s Portal Tag %u\n",
685 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
686 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
687 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
688 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
690 spin_lock_bh(&tpg_lock);
691 list_del(&se_tpg->se_tpg_node);
692 spin_unlock_bh(&tpg_lock);
694 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
697 * Release any remaining demo-mode generated se_node_acl that have
698 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
699 * in transport_deregister_session().
701 spin_lock_irq(&se_tpg->acl_node_lock);
702 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
704 list_del(&nacl->acl_list);
705 se_tpg->num_node_acls--;
706 spin_unlock_irq(&se_tpg->acl_node_lock);
708 core_tpg_wait_for_nacl_pr_ref(nacl);
709 core_free_device_list_for_node(nacl, se_tpg);
710 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
712 spin_lock_irq(&se_tpg->acl_node_lock);
714 spin_unlock_irq(&se_tpg->acl_node_lock);
716 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
717 core_tpg_release_virtual_lun0(se_tpg);
719 se_tpg->se_tpg_fabric_ptr = NULL;
720 kfree(se_tpg->tpg_lun_list);
723 EXPORT_SYMBOL(core_tpg_deregister);
725 struct se_lun *core_tpg_pre_addlun(
726 struct se_portal_group *tpg,
731 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
732 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
733 "-1: %u for Target Portal Group: %u\n",
734 tpg->se_tpg_tfo->get_fabric_name(),
735 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
736 tpg->se_tpg_tfo->tpg_get_tag(tpg));
737 return ERR_PTR(-EOVERFLOW);
740 spin_lock(&tpg->tpg_lun_lock);
741 lun = &tpg->tpg_lun_list[unpacked_lun];
742 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
743 pr_err("TPG Logical Unit Number: %u is already active"
744 " on %s Target Portal Group: %u, ignoring request.\n",
745 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
746 tpg->se_tpg_tfo->tpg_get_tag(tpg));
747 spin_unlock(&tpg->tpg_lun_lock);
748 return ERR_PTR(-EINVAL);
750 spin_unlock(&tpg->tpg_lun_lock);
755 int core_tpg_post_addlun(
756 struct se_portal_group *tpg,
763 ret = core_dev_export(lun_ptr, tpg, lun);
767 spin_lock(&tpg->tpg_lun_lock);
768 lun->lun_access = lun_access;
769 lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
770 spin_unlock(&tpg->tpg_lun_lock);
775 static void core_tpg_shutdown_lun(
776 struct se_portal_group *tpg,
779 core_clear_lun_from_tpg(lun, tpg);
780 transport_clear_lun_from_sessions(lun);
783 struct se_lun *core_tpg_pre_dellun(
784 struct se_portal_group *tpg,
790 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
791 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
792 "-1: %u for Target Portal Group: %u\n",
793 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
794 TRANSPORT_MAX_LUNS_PER_TPG-1,
795 tpg->se_tpg_tfo->tpg_get_tag(tpg));
796 return ERR_PTR(-EOVERFLOW);
799 spin_lock(&tpg->tpg_lun_lock);
800 lun = &tpg->tpg_lun_list[unpacked_lun];
801 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
802 pr_err("%s Logical Unit Number: %u is not active on"
803 " Target Portal Group: %u, ignoring request.\n",
804 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
805 tpg->se_tpg_tfo->tpg_get_tag(tpg));
806 spin_unlock(&tpg->tpg_lun_lock);
807 return ERR_PTR(-ENODEV);
809 spin_unlock(&tpg->tpg_lun_lock);
814 int core_tpg_post_dellun(
815 struct se_portal_group *tpg,
818 core_tpg_shutdown_lun(tpg, lun);
820 core_dev_unexport(lun->lun_se_dev, tpg, lun);
822 spin_lock(&tpg->tpg_lun_lock);
823 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
824 spin_unlock(&tpg->tpg_lun_lock);