diff options
| author | Nicholas Bellinger <nab@linux-iscsi.org> | 2016-01-08 01:15:06 -0500 |
|---|---|---|
| committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2016-01-20 04:34:14 -0500 |
| commit | d36ad77f702356afb1009d2987b0ab55da4c7d57 (patch) | |
| tree | 54b5d1a821c215439380dbb09a581a5d67c93706 /drivers/target/target_core_tpg.c | |
| parent | 26a99c19f810b2593410899a5b304b21b47428a6 (diff) | |
target: Convert ACL change queue_depth se_session reference usage
This patch converts core_tpg_set_initiator_node_queue_depth()
to use struct se_node_acl->acl_sess_list when performing
explicit se_tpg_tfo->shutdown_session() for active sessions,
in order for new se_node_acl->queue_depth to take effect.
This follows how core_tpg_del_initiator_node_acl() currently
works when invoking se_tpg_tfo->shutdown-session(), and ahead
of the next patch to take se_node_acl->acl_kref during lookup,
the extra get_initiator_node_acl() can go away. In order to
achieve this, go ahead and change target_get_session() to use
kref_get_unless_zero() and propigate up the return value
to know when a session is already being released.
This is because se_node_acl->acl_group is already protecting
se_node_acl->acl_group reference via configfs, and shutdown
within core_tpg_del_initiator_node_acl() won't occur until
sys_write() to core_tpg_set_initiator_node_queue_depth()
attribute returns back to user-space.
Also, drop the left-over iscsi-target hack, and obtain
se_portal_group->session_lock in lio_tpg_shutdown_session()
internally. Remove iscsi-target wrapper and unused se_tpg +
force parameters and associated code.
Reported-by: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagig@mellanox.com>
Cc: Hannes Reinecke <hare@suse.de>
Cc: Andy Grover <agrover@redhat.com>
Cc: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target/target_core_tpg.c')
| -rw-r--r-- | drivers/target/target_core_tpg.c | 152 |
1 files changed, 45 insertions, 107 deletions
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 62103a8cbe72..67be44da29ff 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
| @@ -157,28 +157,25 @@ void core_tpg_add_node_to_devs( | |||
| 157 | mutex_unlock(&tpg->tpg_lun_mutex); | 157 | mutex_unlock(&tpg->tpg_lun_mutex); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | /* core_set_queue_depth_for_node(): | 160 | static void |
| 161 | * | 161 | target_set_nacl_queue_depth(struct se_portal_group *tpg, |
| 162 | * | 162 | struct se_node_acl *acl, u32 queue_depth) |
| 163 | */ | ||
| 164 | static int core_set_queue_depth_for_node( | ||
| 165 | struct se_portal_group *tpg, | ||
| 166 | struct se_node_acl *acl) | ||
| 167 | { | 163 | { |
| 164 | acl->queue_depth = queue_depth; | ||
| 165 | |||
| 168 | if (!acl->queue_depth) { | 166 | if (!acl->queue_depth) { |
| 169 | pr_err("Queue depth for %s Initiator Node: %s is 0," | 167 | pr_warn("Queue depth for %s Initiator Node: %s is 0," |
| 170 | "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), | 168 | "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(), |
| 171 | acl->initiatorname); | 169 | acl->initiatorname); |
| 172 | acl->queue_depth = 1; | 170 | acl->queue_depth = 1; |
| 173 | } | 171 | } |
| 174 | |||
| 175 | return 0; | ||
| 176 | } | 172 | } |
| 177 | 173 | ||
| 178 | static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, | 174 | static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, |
| 179 | const unsigned char *initiatorname) | 175 | const unsigned char *initiatorname) |
| 180 | { | 176 | { |
| 181 | struct se_node_acl *acl; | 177 | struct se_node_acl *acl; |
| 178 | u32 queue_depth; | ||
| 182 | 179 | ||
| 183 | acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), | 180 | acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size), |
| 184 | GFP_KERNEL); | 181 | GFP_KERNEL); |
| @@ -193,24 +190,20 @@ static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg, | |||
| 193 | spin_lock_init(&acl->nacl_sess_lock); | 190 | spin_lock_init(&acl->nacl_sess_lock); |
| 194 | mutex_init(&acl->lun_entry_mutex); | 191 | mutex_init(&acl->lun_entry_mutex); |
| 195 | atomic_set(&acl->acl_pr_ref_count, 0); | 192 | atomic_set(&acl->acl_pr_ref_count, 0); |
| 193 | |||
| 196 | if (tpg->se_tpg_tfo->tpg_get_default_depth) | 194 | if (tpg->se_tpg_tfo->tpg_get_default_depth) |
| 197 | acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); | 195 | queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg); |
| 198 | else | 196 | else |
| 199 | acl->queue_depth = 1; | 197 | queue_depth = 1; |
| 198 | target_set_nacl_queue_depth(tpg, acl, queue_depth); | ||
| 199 | |||
| 200 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 200 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
| 201 | acl->se_tpg = tpg; | 201 | acl->se_tpg = tpg; |
| 202 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); | 202 | acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX); |
| 203 | 203 | ||
| 204 | tpg->se_tpg_tfo->set_default_node_attributes(acl); | 204 | tpg->se_tpg_tfo->set_default_node_attributes(acl); |
| 205 | 205 | ||
| 206 | if (core_set_queue_depth_for_node(tpg, acl) < 0) | ||
| 207 | goto out_free_acl; | ||
| 208 | |||
| 209 | return acl; | 206 | return acl; |
| 210 | |||
| 211 | out_free_acl: | ||
| 212 | kfree(acl); | ||
| 213 | return NULL; | ||
| 214 | } | 207 | } |
| 215 | 208 | ||
| 216 | static void target_add_node_acl(struct se_node_acl *acl) | 209 | static void target_add_node_acl(struct se_node_acl *acl) |
| @@ -327,7 +320,8 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) | |||
| 327 | if (sess->sess_tearing_down != 0) | 320 | if (sess->sess_tearing_down != 0) |
| 328 | continue; | 321 | continue; |
| 329 | 322 | ||
| 330 | target_get_session(sess); | 323 | if (!target_get_session(sess)) |
| 324 | continue; | ||
| 331 | list_move(&sess->sess_acl_list, &sess_list); | 325 | list_move(&sess->sess_acl_list, &sess_list); |
| 332 | } | 326 | } |
| 333 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); | 327 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); |
| @@ -364,108 +358,52 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) | |||
| 364 | * | 358 | * |
| 365 | */ | 359 | */ |
| 366 | int core_tpg_set_initiator_node_queue_depth( | 360 | int core_tpg_set_initiator_node_queue_depth( |
| 367 | struct se_portal_group *tpg, | 361 | struct se_node_acl *acl, |
| 368 | unsigned char *initiatorname, | 362 | u32 queue_depth) |
| 369 | u32 queue_depth, | ||
| 370 | int force) | ||
| 371 | { | 363 | { |
| 372 | struct se_session *sess, *init_sess = NULL; | 364 | LIST_HEAD(sess_list); |
| 373 | struct se_node_acl *acl; | 365 | struct se_portal_group *tpg = acl->se_tpg; |
| 366 | struct se_session *sess, *sess_tmp; | ||
| 374 | unsigned long flags; | 367 | unsigned long flags; |
| 375 | int dynamic_acl = 0; | 368 | int rc; |
| 376 | |||
| 377 | mutex_lock(&tpg->acl_node_mutex); | ||
| 378 | acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); | ||
| 379 | if (!acl) { | ||
| 380 | pr_err("Access Control List entry for %s Initiator" | ||
| 381 | " Node %s does not exists for TPG %hu, ignoring" | ||
| 382 | " request.\n", tpg->se_tpg_tfo->get_fabric_name(), | ||
| 383 | initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); | ||
| 384 | mutex_unlock(&tpg->acl_node_mutex); | ||
| 385 | return -ENODEV; | ||
| 386 | } | ||
| 387 | if (acl->dynamic_node_acl) { | ||
| 388 | acl->dynamic_node_acl = 0; | ||
| 389 | dynamic_acl = 1; | ||
| 390 | } | ||
| 391 | mutex_unlock(&tpg->acl_node_mutex); | ||
| 392 | |||
| 393 | spin_lock_irqsave(&tpg->session_lock, flags); | ||
| 394 | list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { | ||
| 395 | if (sess->se_node_acl != acl) | ||
| 396 | continue; | ||
| 397 | |||
| 398 | if (!force) { | ||
| 399 | pr_err("Unable to change queue depth for %s" | ||
| 400 | " Initiator Node: %s while session is" | ||
| 401 | " operational. To forcefully change the queue" | ||
| 402 | " depth and force session reinstatement" | ||
| 403 | " use the \"force=1\" parameter.\n", | ||
| 404 | tpg->se_tpg_tfo->get_fabric_name(), initiatorname); | ||
| 405 | spin_unlock_irqrestore(&tpg->session_lock, flags); | ||
| 406 | |||
| 407 | mutex_lock(&tpg->acl_node_mutex); | ||
| 408 | if (dynamic_acl) | ||
| 409 | acl->dynamic_node_acl = 1; | ||
| 410 | mutex_unlock(&tpg->acl_node_mutex); | ||
| 411 | return -EEXIST; | ||
| 412 | } | ||
| 413 | /* | ||
| 414 | * Determine if the session needs to be closed by our context. | ||
| 415 | */ | ||
| 416 | if (!tpg->se_tpg_tfo->shutdown_session(sess)) | ||
| 417 | continue; | ||
| 418 | |||
| 419 | init_sess = sess; | ||
| 420 | break; | ||
| 421 | } | ||
| 422 | 369 | ||
| 423 | /* | 370 | /* |
| 424 | * User has requested to change the queue depth for a Initiator Node. | 371 | * User has requested to change the queue depth for a Initiator Node. |
| 425 | * Change the value in the Node's struct se_node_acl, and call | 372 | * Change the value in the Node's struct se_node_acl, and call |
| 426 | * core_set_queue_depth_for_node() to add the requested queue depth. | 373 | * target_set_nacl_queue_depth() to set the new queue depth. |
| 427 | * | ||
| 428 | * Finally call tpg->se_tpg_tfo->close_session() to force session | ||
| 429 | * reinstatement to occur if there is an active session for the | ||
| 430 | * $FABRIC_MOD Initiator Node in question. | ||
| 431 | */ | 374 | */ |
| 432 | acl->queue_depth = queue_depth; | 375 | target_set_nacl_queue_depth(tpg, acl, queue_depth); |
| 376 | |||
| 377 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); | ||
| 378 | list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list, | ||
| 379 | sess_acl_list) { | ||
| 380 | if (sess->sess_tearing_down != 0) | ||
| 381 | continue; | ||
| 382 | if (!target_get_session(sess)) | ||
| 383 | continue; | ||
| 384 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); | ||
| 433 | 385 | ||
| 434 | if (core_set_queue_depth_for_node(tpg, acl) < 0) { | ||
| 435 | spin_unlock_irqrestore(&tpg->session_lock, flags); | ||
| 436 | /* | 386 | /* |
| 437 | * Force session reinstatement if | 387 | * Finally call tpg->se_tpg_tfo->close_session() to force session |
| 438 | * core_set_queue_depth_for_node() failed, because we assume | 388 | * reinstatement to occur if there is an active session for the |
| 439 | * the $FABRIC_MOD has already the set session reinstatement | 389 | * $FABRIC_MOD Initiator Node in question. |
| 440 | * bit from tpg->se_tpg_tfo->shutdown_session() called above. | ||
| 441 | */ | 390 | */ |
| 442 | if (init_sess) | 391 | rc = tpg->se_tpg_tfo->shutdown_session(sess); |
| 443 | tpg->se_tpg_tfo->close_session(init_sess); | 392 | target_put_session(sess); |
| 444 | 393 | if (!rc) { | |
| 445 | mutex_lock(&tpg->acl_node_mutex); | 394 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); |
| 446 | if (dynamic_acl) | 395 | continue; |
| 447 | acl->dynamic_node_acl = 1; | 396 | } |
| 448 | mutex_unlock(&tpg->acl_node_mutex); | 397 | target_put_session(sess); |
| 449 | return -EINVAL; | 398 | spin_lock_irqsave(&acl->nacl_sess_lock, flags); |
| 450 | } | 399 | } |
| 451 | spin_unlock_irqrestore(&tpg->session_lock, flags); | 400 | spin_unlock_irqrestore(&acl->nacl_sess_lock, flags); |
| 452 | /* | ||
| 453 | * If the $FABRIC_MOD session for the Initiator Node ACL exists, | ||
| 454 | * forcefully shutdown the $FABRIC_MOD session/nexus. | ||
| 455 | */ | ||
| 456 | if (init_sess) | ||
| 457 | tpg->se_tpg_tfo->close_session(init_sess); | ||
| 458 | 401 | ||
| 459 | pr_debug("Successfully changed queue depth to: %d for Initiator" | 402 | pr_debug("Successfully changed queue depth to: %d for Initiator" |
| 460 | " Node: %s on %s Target Portal Group: %u\n", queue_depth, | 403 | " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth, |
| 461 | initiatorname, tpg->se_tpg_tfo->get_fabric_name(), | 404 | acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(), |
| 462 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); | 405 | tpg->se_tpg_tfo->tpg_get_tag(tpg)); |
| 463 | 406 | ||
| 464 | mutex_lock(&tpg->acl_node_mutex); | ||
| 465 | if (dynamic_acl) | ||
| 466 | acl->dynamic_node_acl = 1; | ||
| 467 | mutex_unlock(&tpg->acl_node_mutex); | ||
| 468 | |||
| 469 | return 0; | 407 | return 0; |
| 470 | } | 408 | } |
| 471 | EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); | 409 | EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth); |
