aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/target/target_core_tpg.c
diff options
context:
space:
mode:
authorAndy Grover <agrover@redhat.com>2011-07-19 04:55:10 -0400
committerNicholas Bellinger <nab@linux-iscsi.org>2011-07-22 05:37:43 -0400
commite3d6f909ed803d92a5ac9b4a2c087e0eae9b90d0 (patch)
tree2eb65e958a2cc35c896a0e184ec09edcb9076b3b /drivers/target/target_core_tpg.c
parenta8c6da90b823fb94ca76ca0df6bb44e6e205dc87 (diff)
target: Core cleanups from AGrover (round 1)
This patch contains the squashed version of a number of cleanups and minor fixes from Andy's initial series (round 1) for target core this past spring. The condensed log looks like: target: use errno values instead of returning -1 for everything target: Rename transport_calc_sg_num to transport_init_task_sg target: Fix leak in error path in transport_init_task_sg target/pscsi: Remove pscsi_get_sh() usage target: Make two runtime checks into WARN_ONs target: Remove hba queue depth and convert to spin_lock_irq usage target: dev->dev_status_queue_obj is unused target: Make struct se_queue_req.cmd type struct se_cmd * target: Remove __transport_get_qr_from_queue() target: Rename se_dev->g_se_dev_list to se_dev_node target: Remove struct se_global target: Simplify scsi mib index table code target: Make dev_queue_obj a member of se_device instead of a pointer target: remove extraneous returns at end of void functions target: Ensure transport_dump_vpd_ident_type returns null-terminated str target: Function pointers don't need to use '&' to be assigned target: Fix comment in __transport_execute_tasks() target: Misc style cleanups target: rename struct pr_reservation_template to pr_reservation target: Remove #defines that just perform indirection target: Inline transport_get_task_from_execute_queue() target: Minor header comment fixes Signed-off-by: Andy Grover <agrover@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_tpg.c')
-rw-r--r--drivers/target/target_core_tpg.c139
1 files changed, 74 insertions, 65 deletions
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 5ec745fed931..448129f74cf9 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -44,6 +44,12 @@
44#include <target/target_core_fabric_ops.h> 44#include <target/target_core_fabric_ops.h>
45 45
46#include "target_core_hba.h" 46#include "target_core_hba.h"
47#include "target_core_stat.h"
48
49extern struct se_device *g_lun0_dev;
50
51static DEFINE_SPINLOCK(tpg_lock);
52static LIST_HEAD(tpg_list);
47 53
48/* core_clear_initiator_node_from_tpg(): 54/* core_clear_initiator_node_from_tpg():
49 * 55 *
@@ -68,7 +74,7 @@ static void core_clear_initiator_node_from_tpg(
68 if (!deve->se_lun) { 74 if (!deve->se_lun) {
69 printk(KERN_ERR "%s device entries device pointer is" 75 printk(KERN_ERR "%s device entries device pointer is"
70 " NULL, but Initiator has access.\n", 76 " NULL, but Initiator has access.\n",
71 TPG_TFO(tpg)->get_fabric_name()); 77 tpg->se_tpg_tfo->get_fabric_name());
72 continue; 78 continue;
73 } 79 }
74 80
@@ -171,7 +177,7 @@ void core_tpg_add_node_to_devs(
171 * By default in LIO-Target $FABRIC_MOD, 177 * By default in LIO-Target $FABRIC_MOD,
172 * demo_mode_write_protect is ON, or READ_ONLY; 178 * demo_mode_write_protect is ON, or READ_ONLY;
173 */ 179 */
174 if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) { 180 if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) {
175 if (dev->dev_flags & DF_READ_ONLY) 181 if (dev->dev_flags & DF_READ_ONLY)
176 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 182 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
177 else 183 else
@@ -181,7 +187,7 @@ void core_tpg_add_node_to_devs(
181 * Allow only optical drives to issue R/W in default RO 187 * Allow only optical drives to issue R/W in default RO
182 * demo mode. 188 * demo mode.
183 */ 189 */
184 if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK) 190 if (dev->transport->get_device_type(dev) == TYPE_DISK)
185 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 191 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
186 else 192 else
187 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE; 193 lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
@@ -189,8 +195,8 @@ void core_tpg_add_node_to_devs(
189 195
190 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s" 196 printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
191 " access for LUN in Demo Mode\n", 197 " access for LUN in Demo Mode\n",
192 TPG_TFO(tpg)->get_fabric_name(), 198 tpg->se_tpg_tfo->get_fabric_name(),
193 TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun, 199 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
194 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ? 200 (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
195 "READ-WRITE" : "READ-ONLY"); 201 "READ-WRITE" : "READ-ONLY");
196 202
@@ -211,7 +217,7 @@ static int core_set_queue_depth_for_node(
211{ 217{
212 if (!acl->queue_depth) { 218 if (!acl->queue_depth) {
213 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0," 219 printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
214 "defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(), 220 "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
215 acl->initiatorname); 221 acl->initiatorname);
216 acl->queue_depth = 1; 222 acl->queue_depth = 1;
217 } 223 }
@@ -233,7 +239,7 @@ static int core_create_device_list_for_node(struct se_node_acl *nacl)
233 if (!(nacl->device_list)) { 239 if (!(nacl->device_list)) {
234 printk(KERN_ERR "Unable to allocate memory for" 240 printk(KERN_ERR "Unable to allocate memory for"
235 " struct se_node_acl->device_list\n"); 241 " struct se_node_acl->device_list\n");
236 return -1; 242 return -ENOMEM;
237 } 243 }
238 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 244 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
239 deve = &nacl->device_list[i]; 245 deve = &nacl->device_list[i];
@@ -262,10 +268,10 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
262 if ((acl)) 268 if ((acl))
263 return acl; 269 return acl;
264 270
265 if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg))) 271 if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)))
266 return NULL; 272 return NULL;
267 273
268 acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg); 274 acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
269 if (!(acl)) 275 if (!(acl))
270 return NULL; 276 return NULL;
271 277
@@ -274,23 +280,23 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
274 spin_lock_init(&acl->device_list_lock); 280 spin_lock_init(&acl->device_list_lock);
275 spin_lock_init(&acl->nacl_sess_lock); 281 spin_lock_init(&acl->nacl_sess_lock);
276 atomic_set(&acl->acl_pr_ref_count, 0); 282 atomic_set(&acl->acl_pr_ref_count, 0);
277 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); 283 acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
278 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 284 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
279 acl->se_tpg = tpg; 285 acl->se_tpg = tpg;
280 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 286 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
281 spin_lock_init(&acl->stats_lock); 287 spin_lock_init(&acl->stats_lock);
282 acl->dynamic_node_acl = 1; 288 acl->dynamic_node_acl = 1;
283 289
284 TPG_TFO(tpg)->set_default_node_attributes(acl); 290 tpg->se_tpg_tfo->set_default_node_attributes(acl);
285 291
286 if (core_create_device_list_for_node(acl) < 0) { 292 if (core_create_device_list_for_node(acl) < 0) {
287 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); 293 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
288 return NULL; 294 return NULL;
289 } 295 }
290 296
291 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 297 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
292 core_free_device_list_for_node(acl, tpg); 298 core_free_device_list_for_node(acl, tpg);
293 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); 299 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
294 return NULL; 300 return NULL;
295 } 301 }
296 302
@@ -302,9 +308,9 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
302 spin_unlock_bh(&tpg->acl_node_lock); 308 spin_unlock_bh(&tpg->acl_node_lock);
303 309
304 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s" 310 printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
305 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), 311 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
306 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, 312 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
307 TPG_TFO(tpg)->get_fabric_name(), initiatorname); 313 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
308 314
309 return acl; 315 return acl;
310} 316}
@@ -355,8 +361,8 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
355 if (acl->dynamic_node_acl) { 361 if (acl->dynamic_node_acl) {
356 acl->dynamic_node_acl = 0; 362 acl->dynamic_node_acl = 0;
357 printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL" 363 printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
358 " for %s\n", TPG_TFO(tpg)->get_fabric_name(), 364 " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
359 TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname); 365 tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
360 spin_unlock_bh(&tpg->acl_node_lock); 366 spin_unlock_bh(&tpg->acl_node_lock);
361 /* 367 /*
362 * Release the locally allocated struct se_node_acl 368 * Release the locally allocated struct se_node_acl
@@ -364,15 +370,15 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
364 * a pointer to an existing demo mode node ACL. 370 * a pointer to an existing demo mode node ACL.
365 */ 371 */
366 if (se_nacl) 372 if (se_nacl)
367 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, 373 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
368 se_nacl); 374 se_nacl);
369 goto done; 375 goto done;
370 } 376 }
371 377
372 printk(KERN_ERR "ACL entry for %s Initiator" 378 printk(KERN_ERR "ACL entry for %s Initiator"
373 " Node %s already exists for TPG %u, ignoring" 379 " Node %s already exists for TPG %u, ignoring"
374 " request.\n", TPG_TFO(tpg)->get_fabric_name(), 380 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
375 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); 381 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
376 spin_unlock_bh(&tpg->acl_node_lock); 382 spin_unlock_bh(&tpg->acl_node_lock);
377 return ERR_PTR(-EEXIST); 383 return ERR_PTR(-EEXIST);
378 } 384 }
@@ -400,16 +406,16 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
400 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); 406 acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
401 spin_lock_init(&acl->stats_lock); 407 spin_lock_init(&acl->stats_lock);
402 408
403 TPG_TFO(tpg)->set_default_node_attributes(acl); 409 tpg->se_tpg_tfo->set_default_node_attributes(acl);
404 410
405 if (core_create_device_list_for_node(acl) < 0) { 411 if (core_create_device_list_for_node(acl) < 0) {
406 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); 412 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
407 return ERR_PTR(-ENOMEM); 413 return ERR_PTR(-ENOMEM);
408 } 414 }
409 415
410 if (core_set_queue_depth_for_node(tpg, acl) < 0) { 416 if (core_set_queue_depth_for_node(tpg, acl) < 0) {
411 core_free_device_list_for_node(acl, tpg); 417 core_free_device_list_for_node(acl, tpg);
412 TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl); 418 tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
413 return ERR_PTR(-EINVAL); 419 return ERR_PTR(-EINVAL);
414 } 420 }
415 421
@@ -420,9 +426,9 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
420 426
421done: 427done:
422 printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" 428 printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
423 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), 429 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
424 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, 430 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
425 TPG_TFO(tpg)->get_fabric_name(), initiatorname); 431 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
426 432
427 return acl; 433 return acl;
428} 434}
@@ -457,7 +463,7 @@ int core_tpg_del_initiator_node_acl(
457 /* 463 /*
458 * Determine if the session needs to be closed by our context. 464 * Determine if the session needs to be closed by our context.
459 */ 465 */
460 if (!(TPG_TFO(tpg)->shutdown_session(sess))) 466 if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
461 continue; 467 continue;
462 468
463 spin_unlock_bh(&tpg->session_lock); 469 spin_unlock_bh(&tpg->session_lock);
@@ -465,7 +471,7 @@ int core_tpg_del_initiator_node_acl(
465 * If the $FABRIC_MOD session for the Initiator Node ACL exists, 471 * If the $FABRIC_MOD session for the Initiator Node ACL exists,
466 * forcefully shutdown the $FABRIC_MOD session/nexus. 472 * forcefully shutdown the $FABRIC_MOD session/nexus.
467 */ 473 */
468 TPG_TFO(tpg)->close_session(sess); 474 tpg->se_tpg_tfo->close_session(sess);
469 475
470 spin_lock_bh(&tpg->session_lock); 476 spin_lock_bh(&tpg->session_lock);
471 } 477 }
@@ -476,9 +482,9 @@ int core_tpg_del_initiator_node_acl(
476 core_free_device_list_for_node(acl, tpg); 482 core_free_device_list_for_node(acl, tpg);
477 483
478 printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s" 484 printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
479 " Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(), 485 " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
480 TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth, 486 tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
481 TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname); 487 tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
482 488
483 return 0; 489 return 0;
484} 490}
@@ -503,8 +509,8 @@ int core_tpg_set_initiator_node_queue_depth(
503 if (!(acl)) { 509 if (!(acl)) {
504 printk(KERN_ERR "Access Control List entry for %s Initiator" 510 printk(KERN_ERR "Access Control List entry for %s Initiator"
505 " Node %s does not exists for TPG %hu, ignoring" 511 " Node %s does not exists for TPG %hu, ignoring"
506 " request.\n", TPG_TFO(tpg)->get_fabric_name(), 512 " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
507 initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg)); 513 initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
508 spin_unlock_bh(&tpg->acl_node_lock); 514 spin_unlock_bh(&tpg->acl_node_lock);
509 return -ENODEV; 515 return -ENODEV;
510 } 516 }
@@ -525,7 +531,7 @@ int core_tpg_set_initiator_node_queue_depth(
525 " operational. To forcefully change the queue" 531 " operational. To forcefully change the queue"
526 " depth and force session reinstatement" 532 " depth and force session reinstatement"
527 " use the \"force=1\" parameter.\n", 533 " use the \"force=1\" parameter.\n",
528 TPG_TFO(tpg)->get_fabric_name(), initiatorname); 534 tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
529 spin_unlock_bh(&tpg->session_lock); 535 spin_unlock_bh(&tpg->session_lock);
530 536
531 spin_lock_bh(&tpg->acl_node_lock); 537 spin_lock_bh(&tpg->acl_node_lock);
@@ -537,7 +543,7 @@ int core_tpg_set_initiator_node_queue_depth(
537 /* 543 /*
538 * Determine if the session needs to be closed by our context. 544 * Determine if the session needs to be closed by our context.
539 */ 545 */
540 if (!(TPG_TFO(tpg)->shutdown_session(sess))) 546 if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
541 continue; 547 continue;
542 548
543 init_sess = sess; 549 init_sess = sess;
@@ -549,7 +555,7 @@ int core_tpg_set_initiator_node_queue_depth(
549 * Change the value in the Node's struct se_node_acl, and call 555 * Change the value in the Node's struct se_node_acl, and call
550 * core_set_queue_depth_for_node() to add the requested queue depth. 556 * core_set_queue_depth_for_node() to add the requested queue depth.
551 * 557 *
552 * Finally call TPG_TFO(tpg)->close_session() to force session 558 * Finally call tpg->se_tpg_tfo->close_session() to force session
553 * reinstatement to occur if there is an active session for the 559 * reinstatement to occur if there is an active session for the
554 * $FABRIC_MOD Initiator Node in question. 560 * $FABRIC_MOD Initiator Node in question.
555 */ 561 */
@@ -561,10 +567,10 @@ int core_tpg_set_initiator_node_queue_depth(
561 * Force session reinstatement if 567 * Force session reinstatement if
562 * core_set_queue_depth_for_node() failed, because we assume 568 * core_set_queue_depth_for_node() failed, because we assume
563 * the $FABRIC_MOD has already the set session reinstatement 569 * the $FABRIC_MOD has already the set session reinstatement
564 * bit from TPG_TFO(tpg)->shutdown_session() called above. 570 * bit from tpg->se_tpg_tfo->shutdown_session() called above.
565 */ 571 */
566 if (init_sess) 572 if (init_sess)
567 TPG_TFO(tpg)->close_session(init_sess); 573 tpg->se_tpg_tfo->close_session(init_sess);
568 574
569 spin_lock_bh(&tpg->acl_node_lock); 575 spin_lock_bh(&tpg->acl_node_lock);
570 if (dynamic_acl) 576 if (dynamic_acl)
@@ -578,12 +584,12 @@ int core_tpg_set_initiator_node_queue_depth(
578 * forcefully shutdown the $FABRIC_MOD session/nexus. 584 * forcefully shutdown the $FABRIC_MOD session/nexus.
579 */ 585 */
580 if (init_sess) 586 if (init_sess)
581 TPG_TFO(tpg)->close_session(init_sess); 587 tpg->se_tpg_tfo->close_session(init_sess);
582 588
583 printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator" 589 printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
584 " Node: %s on %s Target Portal Group: %u\n", queue_depth, 590 " Node: %s on %s Target Portal Group: %u\n", queue_depth,
585 initiatorname, TPG_TFO(tpg)->get_fabric_name(), 591 initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
586 TPG_TFO(tpg)->tpg_get_tag(tpg)); 592 tpg->se_tpg_tfo->tpg_get_tag(tpg));
587 593
588 spin_lock_bh(&tpg->acl_node_lock); 594 spin_lock_bh(&tpg->acl_node_lock);
589 if (dynamic_acl) 595 if (dynamic_acl)
@@ -597,7 +603,7 @@ EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
597static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg) 603static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
598{ 604{
599 /* Set in core_dev_setup_virtual_lun0() */ 605 /* Set in core_dev_setup_virtual_lun0() */
600 struct se_device *dev = se_global->g_lun0_dev; 606 struct se_device *dev = g_lun0_dev;
601 struct se_lun *lun = &se_tpg->tpg_virt_lun0; 607 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
602 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY; 608 u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
603 int ret; 609 int ret;
@@ -614,7 +620,7 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
614 620
615 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev); 621 ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
616 if (ret < 0) 622 if (ret < 0)
617 return -1; 623 return ret;
618 624
619 return 0; 625 return 0;
620} 626}
@@ -663,7 +669,7 @@ int core_tpg_register(
663 se_tpg->se_tpg_wwn = se_wwn; 669 se_tpg->se_tpg_wwn = se_wwn;
664 atomic_set(&se_tpg->tpg_pr_ref_count, 0); 670 atomic_set(&se_tpg->tpg_pr_ref_count, 0);
665 INIT_LIST_HEAD(&se_tpg->acl_node_list); 671 INIT_LIST_HEAD(&se_tpg->acl_node_list);
666 INIT_LIST_HEAD(&se_tpg->se_tpg_list); 672 INIT_LIST_HEAD(&se_tpg->se_tpg_node);
667 INIT_LIST_HEAD(&se_tpg->tpg_sess_list); 673 INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
668 spin_lock_init(&se_tpg->acl_node_lock); 674 spin_lock_init(&se_tpg->acl_node_lock);
669 spin_lock_init(&se_tpg->session_lock); 675 spin_lock_init(&se_tpg->session_lock);
@@ -676,9 +682,9 @@ int core_tpg_register(
676 } 682 }
677 } 683 }
678 684
679 spin_lock_bh(&se_global->se_tpg_lock); 685 spin_lock_bh(&tpg_lock);
680 list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list); 686 list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
681 spin_unlock_bh(&se_global->se_tpg_lock); 687 spin_unlock_bh(&tpg_lock);
682 688
683 printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for" 689 printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
684 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(), 690 " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
@@ -697,13 +703,13 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
697 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 703 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
698 " for endpoint: %s Portal Tag %u\n", 704 " for endpoint: %s Portal Tag %u\n",
699 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 705 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
700 "Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(), 706 "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
701 TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg), 707 se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
702 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg)); 708 se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
703 709
704 spin_lock_bh(&se_global->se_tpg_lock); 710 spin_lock_bh(&tpg_lock);
705 list_del(&se_tpg->se_tpg_list); 711 list_del(&se_tpg->se_tpg_node);
706 spin_unlock_bh(&se_global->se_tpg_lock); 712 spin_unlock_bh(&tpg_lock);
707 713
708 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 714 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
709 cpu_relax(); 715 cpu_relax();
@@ -721,7 +727,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
721 727
722 core_tpg_wait_for_nacl_pr_ref(nacl); 728 core_tpg_wait_for_nacl_pr_ref(nacl);
723 core_free_device_list_for_node(nacl, se_tpg); 729 core_free_device_list_for_node(nacl, se_tpg);
724 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); 730 se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
725 731
726 spin_lock_bh(&se_tpg->acl_node_lock); 732 spin_lock_bh(&se_tpg->acl_node_lock);
727 } 733 }
@@ -745,9 +751,9 @@ struct se_lun *core_tpg_pre_addlun(
745 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 751 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
746 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 752 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
747 "-1: %u for Target Portal Group: %u\n", 753 "-1: %u for Target Portal Group: %u\n",
748 TPG_TFO(tpg)->get_fabric_name(), 754 tpg->se_tpg_tfo->get_fabric_name(),
749 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, 755 unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
750 TPG_TFO(tpg)->tpg_get_tag(tpg)); 756 tpg->se_tpg_tfo->tpg_get_tag(tpg));
751 return ERR_PTR(-EOVERFLOW); 757 return ERR_PTR(-EOVERFLOW);
752 } 758 }
753 759
@@ -756,8 +762,8 @@ struct se_lun *core_tpg_pre_addlun(
756 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) { 762 if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
757 printk(KERN_ERR "TPG Logical Unit Number: %u is already active" 763 printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
758 " on %s Target Portal Group: %u, ignoring request.\n", 764 " on %s Target Portal Group: %u, ignoring request.\n",
759 unpacked_lun, TPG_TFO(tpg)->get_fabric_name(), 765 unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
760 TPG_TFO(tpg)->tpg_get_tag(tpg)); 766 tpg->se_tpg_tfo->tpg_get_tag(tpg));
761 spin_unlock(&tpg->tpg_lun_lock); 767 spin_unlock(&tpg->tpg_lun_lock);
762 return ERR_PTR(-EINVAL); 768 return ERR_PTR(-EINVAL);
763 } 769 }
@@ -772,8 +778,11 @@ int core_tpg_post_addlun(
772 u32 lun_access, 778 u32 lun_access,
773 void *lun_ptr) 779 void *lun_ptr)
774{ 780{
775 if (core_dev_export(lun_ptr, tpg, lun) < 0) 781 int ret;
776 return -1; 782
783 ret = core_dev_export(lun_ptr, tpg, lun);
784 if (ret < 0)
785 return ret;
777 786
778 spin_lock(&tpg->tpg_lun_lock); 787 spin_lock(&tpg->tpg_lun_lock);
779 lun->lun_access = lun_access; 788 lun->lun_access = lun_access;
@@ -801,9 +810,9 @@ struct se_lun *core_tpg_pre_dellun(
801 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { 810 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
802 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG" 811 printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
803 "-1: %u for Target Portal Group: %u\n", 812 "-1: %u for Target Portal Group: %u\n",
804 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, 813 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
805 TRANSPORT_MAX_LUNS_PER_TPG-1, 814 TRANSPORT_MAX_LUNS_PER_TPG-1,
806 TPG_TFO(tpg)->tpg_get_tag(tpg)); 815 tpg->se_tpg_tfo->tpg_get_tag(tpg));
807 return ERR_PTR(-EOVERFLOW); 816 return ERR_PTR(-EOVERFLOW);
808 } 817 }
809 818
@@ -812,8 +821,8 @@ struct se_lun *core_tpg_pre_dellun(
812 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) { 821 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
813 printk(KERN_ERR "%s Logical Unit Number: %u is not active on" 822 printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
814 " Target Portal Group: %u, ignoring request.\n", 823 " Target Portal Group: %u, ignoring request.\n",
815 TPG_TFO(tpg)->get_fabric_name(), unpacked_lun, 824 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
816 TPG_TFO(tpg)->tpg_get_tag(tpg)); 825 tpg->se_tpg_tfo->tpg_get_tag(tpg));
817 spin_unlock(&tpg->tpg_lun_lock); 826 spin_unlock(&tpg->tpg_lun_lock);
818 return ERR_PTR(-ENODEV); 827 return ERR_PTR(-ENODEV);
819 } 828 }