Merge branch 'devel-stable' of master.kernel.org:/home/rmk/linux-2.6-arm
[pandora-kernel.git] / drivers / target / target_core_tpg.c
1 /*******************************************************************************
2  * Filename:  target_core_tpg.c
3  *
4  * This file contains generic Target Portal Group related functions.
5  *
6  * Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
7  * Copyright (c) 2005, 2006, 2007 SBE, Inc.
8  * Copyright (c) 2007-2010 Rising Tide Systems
9  * Copyright (c) 2008-2010 Linux-iSCSI.org
10  *
11  * Nicholas A. Bellinger <nab@kernel.org>
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  *
23  * You should have received a copy of the GNU General Public License
24  * along with this program; if not, write to the Free Software
25  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26  *
27  ******************************************************************************/
28
29 #include <linux/net.h>
30 #include <linux/string.h>
31 #include <linux/timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/smp_lock.h>
35 #include <linux/in.h>
36 #include <net/sock.h>
37 #include <net/tcp.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40
41 #include <target/target_core_base.h>
42 #include <target/target_core_device.h>
43 #include <target/target_core_tpg.h>
44 #include <target/target_core_transport.h>
45 #include <target/target_core_fabric_ops.h>
46
47 #include "target_core_hba.h"
48
49 /*      core_clear_initiator_node_from_tpg():
50  *
51  *
52  */
53 static void core_clear_initiator_node_from_tpg(
54         struct se_node_acl *nacl,
55         struct se_portal_group *tpg)
56 {
57         int i;
58         struct se_dev_entry *deve;
59         struct se_lun *lun;
60         struct se_lun_acl *acl, *acl_tmp;
61
62         spin_lock_irq(&nacl->device_list_lock);
63         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
64                 deve = &nacl->device_list[i];
65
66                 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
67                         continue;
68
69                 if (!deve->se_lun) {
70                         printk(KERN_ERR "%s device entries device pointer is"
71                                 " NULL, but Initiator has access.\n",
72                                 TPG_TFO(tpg)->get_fabric_name());
73                         continue;
74                 }
75
76                 lun = deve->se_lun;
77                 spin_unlock_irq(&nacl->device_list_lock);
78                 core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
79                         TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
80
81                 spin_lock(&lun->lun_acl_lock);
82                 list_for_each_entry_safe(acl, acl_tmp,
83                                         &lun->lun_acl_list, lacl_list) {
84                         if (!(strcmp(acl->initiatorname,
85                                         nacl->initiatorname)) &&
86                              (acl->mapped_lun == deve->mapped_lun))
87                                 break;
88                 }
89
90                 if (!acl) {
91                         printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
92                                 " mapped_lun: %u\n", nacl->initiatorname,
93                                 deve->mapped_lun);
94                         spin_unlock(&lun->lun_acl_lock);
95                         spin_lock_irq(&nacl->device_list_lock);
96                         continue;
97                 }
98
99                 list_del(&acl->lacl_list);
100                 spin_unlock(&lun->lun_acl_lock);
101
102                 spin_lock_irq(&nacl->device_list_lock);
103                 kfree(acl);
104         }
105         spin_unlock_irq(&nacl->device_list_lock);
106 }
107
108 /*      __core_tpg_get_initiator_node_acl():
109  *
110  *      spin_lock_bh(&tpg->acl_node_lock); must be held when calling
111  */
112 struct se_node_acl *__core_tpg_get_initiator_node_acl(
113         struct se_portal_group *tpg,
114         const char *initiatorname)
115 {
116         struct se_node_acl *acl;
117
118         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
119                 if (!(strcmp(acl->initiatorname, initiatorname)))
120                         return acl;
121         }
122
123         return NULL;
124 }
125
126 /*      core_tpg_get_initiator_node_acl():
127  *
128  *
129  */
130 struct se_node_acl *core_tpg_get_initiator_node_acl(
131         struct se_portal_group *tpg,
132         unsigned char *initiatorname)
133 {
134         struct se_node_acl *acl;
135
136         spin_lock_bh(&tpg->acl_node_lock);
137         list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
138                 if (!(strcmp(acl->initiatorname, initiatorname)) &&
139                    (!(acl->dynamic_node_acl))) {
140                         spin_unlock_bh(&tpg->acl_node_lock);
141                         return acl;
142                 }
143         }
144         spin_unlock_bh(&tpg->acl_node_lock);
145
146         return NULL;
147 }
148
149 /*      core_tpg_add_node_to_devs():
150  *
151  *
152  */
153 void core_tpg_add_node_to_devs(
154         struct se_node_acl *acl,
155         struct se_portal_group *tpg)
156 {
157         int i = 0;
158         u32 lun_access = 0;
159         struct se_lun *lun;
160         struct se_device *dev;
161
162         spin_lock(&tpg->tpg_lun_lock);
163         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
164                 lun = &tpg->tpg_lun_list[i];
165                 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
166                         continue;
167
168                 spin_unlock(&tpg->tpg_lun_lock);
169
170                 dev = lun->lun_se_dev;
171                 /*
172                  * By default in LIO-Target $FABRIC_MOD,
173                  * demo_mode_write_protect is ON, or READ_ONLY;
174                  */
175                 if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
176                         if (dev->dev_flags & DF_READ_ONLY)
177                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
178                         else
179                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
180                 } else {
181                         /*
182                          * Allow only optical drives to issue R/W in default RO
183                          * demo mode.
184                          */
185                         if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
186                                 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
187                         else
188                                 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
189                 }
190
191                 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
192                         " access for LUN in Demo Mode\n",
193                         TPG_TFO(tpg)->get_fabric_name(),
194                         TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
195                         (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
196                         "READ-WRITE" : "READ-ONLY");
197
198                 core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
199                                 lun_access, acl, tpg, 1);
200                 spin_lock(&tpg->tpg_lun_lock);
201         }
202         spin_unlock(&tpg->tpg_lun_lock);
203 }
204
205 /*      core_set_queue_depth_for_node():
206  *
207  *
208  */
209 static int core_set_queue_depth_for_node(
210         struct se_portal_group *tpg,
211         struct se_node_acl *acl)
212 {
213         if (!acl->queue_depth) {
214                 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
215                         "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
216                         acl->initiatorname);
217                 acl->queue_depth = 1;
218         }
219
220         return 0;
221 }
222
223 /*      core_create_device_list_for_node():
224  *
225  *
226  */
227 static int core_create_device_list_for_node(struct se_node_acl *nacl)
228 {
229         struct se_dev_entry *deve;
230         int i;
231
232         nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
233                                 TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
234         if (!(nacl->device_list)) {
235                 printk(KERN_ERR "Unable to allocate memory for"
236                         " struct se_node_acl->device_list\n");
237                 return -1;
238         }
239         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
240                 deve = &nacl->device_list[i];
241
242                 atomic_set(&deve->ua_count, 0);
243                 atomic_set(&deve->pr_ref_count, 0);
244                 spin_lock_init(&deve->ua_lock);
245                 INIT_LIST_HEAD(&deve->alua_port_list);
246                 INIT_LIST_HEAD(&deve->ua_list);
247         }
248
249         return 0;
250 }
251
252 /*      core_tpg_check_initiator_node_acl()
253  *
254  *
255  */
256 struct se_node_acl *core_tpg_check_initiator_node_acl(
257         struct se_portal_group *tpg,
258         unsigned char *initiatorname)
259 {
260         struct se_node_acl *acl;
261
262         acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
263         if ((acl))
264                 return acl;
265
266         if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
267                 return NULL;
268
269         acl =  TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
270         if (!(acl))
271                 return NULL;
272
273         INIT_LIST_HEAD(&acl->acl_list);
274         INIT_LIST_HEAD(&acl->acl_sess_list);
275         spin_lock_init(&acl->device_list_lock);
276         spin_lock_init(&acl->nacl_sess_lock);
277         atomic_set(&acl->acl_pr_ref_count, 0);
278         atomic_set(&acl->mib_ref_count, 0);
279         acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
280         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
281         acl->se_tpg = tpg;
282         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
283         spin_lock_init(&acl->stats_lock);
284         acl->dynamic_node_acl = 1;
285
286         TPG_TFO(tpg)->set_default_node_attributes(acl);
287
288         if (core_create_device_list_for_node(acl) < 0) {
289                 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
290                 return NULL;
291         }
292
293         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
294                 core_free_device_list_for_node(acl, tpg);
295                 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
296                 return NULL;
297         }
298
299         core_tpg_add_node_to_devs(acl, tpg);
300
301         spin_lock_bh(&tpg->acl_node_lock);
302         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
303         tpg->num_node_acls++;
304         spin_unlock_bh(&tpg->acl_node_lock);
305
306         printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
307                 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
308                 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
309                 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
310
311         return acl;
312 }
313 EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
314
315 void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
316 {
317         while (atomic_read(&nacl->acl_pr_ref_count) != 0)
318                 cpu_relax();
319 }
320
321 void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
322 {
323         while (atomic_read(&nacl->mib_ref_count) != 0)
324                 cpu_relax();
325 }
326
327 void core_tpg_clear_object_luns(struct se_portal_group *tpg)
328 {
329         int i, ret;
330         struct se_lun *lun;
331
332         spin_lock(&tpg->tpg_lun_lock);
333         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
334                 lun = &tpg->tpg_lun_list[i];
335
336                 if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
337                     (lun->lun_se_dev == NULL))
338                         continue;
339
340                 spin_unlock(&tpg->tpg_lun_lock);
341                 ret = core_dev_del_lun(tpg, lun->unpacked_lun);
342                 spin_lock(&tpg->tpg_lun_lock);
343         }
344         spin_unlock(&tpg->tpg_lun_lock);
345 }
346 EXPORT_SYMBOL(core_tpg_clear_object_luns);
347
348 /*      core_tpg_add_initiator_node_acl():
349  *
350  *
351  */
352 struct se_node_acl *core_tpg_add_initiator_node_acl(
353         struct se_portal_group *tpg,
354         struct se_node_acl *se_nacl,
355         const char *initiatorname,
356         u32 queue_depth)
357 {
358         struct se_node_acl *acl = NULL;
359
360         spin_lock_bh(&tpg->acl_node_lock);
361         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
362         if ((acl)) {
363                 if (acl->dynamic_node_acl) {
364                         acl->dynamic_node_acl = 0;
365                         printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
366                                 " for %s\n", TPG_TFO(tpg)->get_fabric_name(),
367                                 TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
368                         spin_unlock_bh(&tpg->acl_node_lock);
369                         /*
370                          * Release the locally allocated struct se_node_acl
371                          * because * core_tpg_add_initiator_node_acl() returned
372                          * a pointer to an existing demo mode node ACL.
373                          */
374                         if (se_nacl)
375                                 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
376                                                         se_nacl);
377                         goto done;
378                 }
379
380                 printk(KERN_ERR "ACL entry for %s Initiator"
381                         " Node %s already exists for TPG %u, ignoring"
382                         " request.\n",  TPG_TFO(tpg)->get_fabric_name(),
383                         initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
384                 spin_unlock_bh(&tpg->acl_node_lock);
385                 return ERR_PTR(-EEXIST);
386         }
387         spin_unlock_bh(&tpg->acl_node_lock);
388
389         if (!(se_nacl)) {
390                 printk("struct se_node_acl pointer is NULL\n");
391                 return ERR_PTR(-EINVAL);
392         }
393         /*
394          * For v4.x logic the se_node_acl_s is hanging off a fabric
395          * dependent structure allocated via
396          * struct target_core_fabric_ops->fabric_make_nodeacl()
397          */
398         acl = se_nacl;
399
400         INIT_LIST_HEAD(&acl->acl_list);
401         INIT_LIST_HEAD(&acl->acl_sess_list);
402         spin_lock_init(&acl->device_list_lock);
403         spin_lock_init(&acl->nacl_sess_lock);
404         atomic_set(&acl->acl_pr_ref_count, 0);
405         acl->queue_depth = queue_depth;
406         snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
407         acl->se_tpg = tpg;
408         acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
409         spin_lock_init(&acl->stats_lock);
410
411         TPG_TFO(tpg)->set_default_node_attributes(acl);
412
413         if (core_create_device_list_for_node(acl) < 0) {
414                 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
415                 return ERR_PTR(-ENOMEM);
416         }
417
418         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
419                 core_free_device_list_for_node(acl, tpg);
420                 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
421                 return ERR_PTR(-EINVAL);
422         }
423
424         spin_lock_bh(&tpg->acl_node_lock);
425         list_add_tail(&acl->acl_list, &tpg->acl_node_list);
426         tpg->num_node_acls++;
427         spin_unlock_bh(&tpg->acl_node_lock);
428
429 done:
430         printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
431                 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
432                 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
433                 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
434
435         return acl;
436 }
437 EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
438
439 /*      core_tpg_del_initiator_node_acl():
440  *
441  *
442  */
443 int core_tpg_del_initiator_node_acl(
444         struct se_portal_group *tpg,
445         struct se_node_acl *acl,
446         int force)
447 {
448         struct se_session *sess, *sess_tmp;
449         int dynamic_acl = 0;
450
451         spin_lock_bh(&tpg->acl_node_lock);
452         if (acl->dynamic_node_acl) {
453                 acl->dynamic_node_acl = 0;
454                 dynamic_acl = 1;
455         }
456         list_del(&acl->acl_list);
457         tpg->num_node_acls--;
458         spin_unlock_bh(&tpg->acl_node_lock);
459
460         spin_lock_bh(&tpg->session_lock);
461         list_for_each_entry_safe(sess, sess_tmp,
462                                 &tpg->tpg_sess_list, sess_list) {
463                 if (sess->se_node_acl != acl)
464                         continue;
465                 /*
466                  * Determine if the session needs to be closed by our context.
467                  */
468                 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
469                         continue;
470
471                 spin_unlock_bh(&tpg->session_lock);
472                 /*
473                  * If the $FABRIC_MOD session for the Initiator Node ACL exists,
474                  * forcefully shutdown the $FABRIC_MOD session/nexus.
475                  */
476                 TPG_TFO(tpg)->close_session(sess);
477
478                 spin_lock_bh(&tpg->session_lock);
479         }
480         spin_unlock_bh(&tpg->session_lock);
481
482         core_tpg_wait_for_nacl_pr_ref(acl);
483         core_tpg_wait_for_mib_ref(acl);
484         core_clear_initiator_node_from_tpg(acl, tpg);
485         core_free_device_list_for_node(acl, tpg);
486
487         printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
488                 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
489                 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
490                 TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
491
492         return 0;
493 }
494 EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
495
496 /*      core_tpg_set_initiator_node_queue_depth():
497  *
498  *
499  */
500 int core_tpg_set_initiator_node_queue_depth(
501         struct se_portal_group *tpg,
502         unsigned char *initiatorname,
503         u32 queue_depth,
504         int force)
505 {
506         struct se_session *sess, *init_sess = NULL;
507         struct se_node_acl *acl;
508         int dynamic_acl = 0;
509
510         spin_lock_bh(&tpg->acl_node_lock);
511         acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
512         if (!(acl)) {
513                 printk(KERN_ERR "Access Control List entry for %s Initiator"
514                         " Node %s does not exists for TPG %hu, ignoring"
515                         " request.\n", TPG_TFO(tpg)->get_fabric_name(),
516                         initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
517                 spin_unlock_bh(&tpg->acl_node_lock);
518                 return -ENODEV;
519         }
520         if (acl->dynamic_node_acl) {
521                 acl->dynamic_node_acl = 0;
522                 dynamic_acl = 1;
523         }
524         spin_unlock_bh(&tpg->acl_node_lock);
525
526         spin_lock_bh(&tpg->session_lock);
527         list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
528                 if (sess->se_node_acl != acl)
529                         continue;
530
531                 if (!force) {
532                         printk(KERN_ERR "Unable to change queue depth for %s"
533                                 " Initiator Node: %s while session is"
534                                 " operational.  To forcefully change the queue"
535                                 " depth and force session reinstatement"
536                                 " use the \"force=1\" parameter.\n",
537                                 TPG_TFO(tpg)->get_fabric_name(), initiatorname);
538                         spin_unlock_bh(&tpg->session_lock);
539
540                         spin_lock_bh(&tpg->acl_node_lock);
541                         if (dynamic_acl)
542                                 acl->dynamic_node_acl = 1;
543                         spin_unlock_bh(&tpg->acl_node_lock);
544                         return -EEXIST;
545                 }
546                 /*
547                  * Determine if the session needs to be closed by our context.
548                  */
549                 if (!(TPG_TFO(tpg)->shutdown_session(sess)))
550                         continue;
551
552                 init_sess = sess;
553                 break;
554         }
555
556         /*
557          * User has requested to change the queue depth for a Initiator Node.
558          * Change the value in the Node's struct se_node_acl, and call
559          * core_set_queue_depth_for_node() to add the requested queue depth.
560          *
561          * Finally call  TPG_TFO(tpg)->close_session() to force session
562          * reinstatement to occur if there is an active session for the
563          * $FABRIC_MOD Initiator Node in question.
564          */
565         acl->queue_depth = queue_depth;
566
567         if (core_set_queue_depth_for_node(tpg, acl) < 0) {
568                 spin_unlock_bh(&tpg->session_lock);
569                 /*
570                  * Force session reinstatement if
571                  * core_set_queue_depth_for_node() failed, because we assume
572                  * the $FABRIC_MOD has already the set session reinstatement
573                  * bit from TPG_TFO(tpg)->shutdown_session() called above.
574                  */
575                 if (init_sess)
576                         TPG_TFO(tpg)->close_session(init_sess);
577
578                 spin_lock_bh(&tpg->acl_node_lock);
579                 if (dynamic_acl)
580                         acl->dynamic_node_acl = 1;
581                 spin_unlock_bh(&tpg->acl_node_lock);
582                 return -EINVAL;
583         }
584         spin_unlock_bh(&tpg->session_lock);
585         /*
586          * If the $FABRIC_MOD session for the Initiator Node ACL exists,
587          * forcefully shutdown the $FABRIC_MOD session/nexus.
588          */
589         if (init_sess)
590                 TPG_TFO(tpg)->close_session(init_sess);
591
592         printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
593                 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
594                 initiatorname, TPG_TFO(tpg)->get_fabric_name(),
595                 TPG_TFO(tpg)->tpg_get_tag(tpg));
596
597         spin_lock_bh(&tpg->acl_node_lock);
598         if (dynamic_acl)
599                 acl->dynamic_node_acl = 1;
600         spin_unlock_bh(&tpg->acl_node_lock);
601
602         return 0;
603 }
604 EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
605
606 static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
607 {
608         /* Set in core_dev_setup_virtual_lun0() */
609         struct se_device *dev = se_global->g_lun0_dev;
610         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
611         u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
612         int ret;
613
614         lun->unpacked_lun = 0;
615         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
616         atomic_set(&lun->lun_acl_count, 0);
617         init_completion(&lun->lun_shutdown_comp);
618         INIT_LIST_HEAD(&lun->lun_acl_list);
619         INIT_LIST_HEAD(&lun->lun_cmd_list);
620         spin_lock_init(&lun->lun_acl_lock);
621         spin_lock_init(&lun->lun_cmd_lock);
622         spin_lock_init(&lun->lun_sep_lock);
623
624         ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
625         if (ret < 0)
626                 return -1;
627
628         return 0;
629 }
630
631 static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
632 {
633         struct se_lun *lun = &se_tpg->tpg_virt_lun0;
634
635         core_tpg_post_dellun(se_tpg, lun);
636 }
637
638 int core_tpg_register(
639         struct target_core_fabric_ops *tfo,
640         struct se_wwn *se_wwn,
641         struct se_portal_group *se_tpg,
642         void *tpg_fabric_ptr,
643         int se_tpg_type)
644 {
645         struct se_lun *lun;
646         u32 i;
647
648         se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
649                                 TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
650         if (!(se_tpg->tpg_lun_list)) {
651                 printk(KERN_ERR "Unable to allocate struct se_portal_group->"
652                                 "tpg_lun_list\n");
653                 return -ENOMEM;
654         }
655
656         for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
657                 lun = &se_tpg->tpg_lun_list[i];
658                 lun->unpacked_lun = i;
659                 lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
660                 atomic_set(&lun->lun_acl_count, 0);
661                 init_completion(&lun->lun_shutdown_comp);
662                 INIT_LIST_HEAD(&lun->lun_acl_list);
663                 INIT_LIST_HEAD(&lun->lun_cmd_list);
664                 spin_lock_init(&lun->lun_acl_lock);
665                 spin_lock_init(&lun->lun_cmd_lock);
666                 spin_lock_init(&lun->lun_sep_lock);
667         }
668
669         se_tpg->se_tpg_type = se_tpg_type;
670         se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
671         se_tpg->se_tpg_tfo = tfo;
672         se_tpg->se_tpg_wwn = se_wwn;
673         atomic_set(&se_tpg->tpg_pr_ref_count, 0);
674         INIT_LIST_HEAD(&se_tpg->acl_node_list);
675         INIT_LIST_HEAD(&se_tpg->se_tpg_list);
676         INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
677         spin_lock_init(&se_tpg->acl_node_lock);
678         spin_lock_init(&se_tpg->session_lock);
679         spin_lock_init(&se_tpg->tpg_lun_lock);
680
681         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
682                 if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
683                         kfree(se_tpg);
684                         return -ENOMEM;
685                 }
686         }
687
688         spin_lock_bh(&se_global->se_tpg_lock);
689         list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
690         spin_unlock_bh(&se_global->se_tpg_lock);
691
692         printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
693                 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
694                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
695                 "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
696                 "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
697
698         return 0;
699 }
700 EXPORT_SYMBOL(core_tpg_register);
701
702 int core_tpg_deregister(struct se_portal_group *se_tpg)
703 {
704         printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
705                 " for endpoint: %s Portal Tag %u\n",
706                 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
707                 "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
708                 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
709                 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
710
711         spin_lock_bh(&se_global->se_tpg_lock);
712         list_del(&se_tpg->se_tpg_list);
713         spin_unlock_bh(&se_global->se_tpg_lock);
714
715         while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
716                 cpu_relax();
717
718         if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
719                 core_tpg_release_virtual_lun0(se_tpg);
720
721         se_tpg->se_tpg_fabric_ptr = NULL;
722         kfree(se_tpg->tpg_lun_list);
723         return 0;
724 }
725 EXPORT_SYMBOL(core_tpg_deregister);
726
727 struct se_lun *core_tpg_pre_addlun(
728         struct se_portal_group *tpg,
729         u32 unpacked_lun)
730 {
731         struct se_lun *lun;
732
733         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
734                 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
735                         "-1: %u for Target Portal Group: %u\n",
736                         TPG_TFO(tpg)->get_fabric_name(),
737                         unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
738                         TPG_TFO(tpg)->tpg_get_tag(tpg));
739                 return ERR_PTR(-EOVERFLOW);
740         }
741
742         spin_lock(&tpg->tpg_lun_lock);
743         lun = &tpg->tpg_lun_list[unpacked_lun];
744         if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
745                 printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
746                         " on %s Target Portal Group: %u, ignoring request.\n",
747                         unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
748                         TPG_TFO(tpg)->tpg_get_tag(tpg));
749                 spin_unlock(&tpg->tpg_lun_lock);
750                 return ERR_PTR(-EINVAL);
751         }
752         spin_unlock(&tpg->tpg_lun_lock);
753
754         return lun;
755 }
756
757 int core_tpg_post_addlun(
758         struct se_portal_group *tpg,
759         struct se_lun *lun,
760         u32 lun_access,
761         void *lun_ptr)
762 {
763         if (core_dev_export(lun_ptr, tpg, lun) < 0)
764                 return -1;
765
766         spin_lock(&tpg->tpg_lun_lock);
767         lun->lun_access = lun_access;
768         lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
769         spin_unlock(&tpg->tpg_lun_lock);
770
771         return 0;
772 }
773
774 static void core_tpg_shutdown_lun(
775         struct se_portal_group *tpg,
776         struct se_lun *lun)
777 {
778         core_clear_lun_from_tpg(lun, tpg);
779         transport_clear_lun_from_sessions(lun);
780 }
781
782 struct se_lun *core_tpg_pre_dellun(
783         struct se_portal_group *tpg,
784         u32 unpacked_lun,
785         int *ret)
786 {
787         struct se_lun *lun;
788
789         if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
790                 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
791                         "-1: %u for Target Portal Group: %u\n",
792                         TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
793                         TRANSPORT_MAX_LUNS_PER_TPG-1,
794                         TPG_TFO(tpg)->tpg_get_tag(tpg));
795                 return ERR_PTR(-EOVERFLOW);
796         }
797
798         spin_lock(&tpg->tpg_lun_lock);
799         lun = &tpg->tpg_lun_list[unpacked_lun];
800         if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
801                 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
802                         " Target Portal Group: %u, ignoring request.\n",
803                         TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
804                         TPG_TFO(tpg)->tpg_get_tag(tpg));
805                 spin_unlock(&tpg->tpg_lun_lock);
806                 return ERR_PTR(-ENODEV);
807         }
808         spin_unlock(&tpg->tpg_lun_lock);
809
810         return lun;
811 }
812
813 int core_tpg_post_dellun(
814         struct se_portal_group *tpg,
815         struct se_lun *lun)
816 {
817         core_tpg_shutdown_lun(tpg, lun);
818
819         core_dev_unexport(lun->lun_se_dev, tpg, lun);
820
821         spin_lock(&tpg->tpg_lun_lock);
822         lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
823         spin_unlock(&tpg->tpg_lun_lock);
824
825         return 0;
826 }