diff options
author | Roland Dreier <roland@purestorage.com> | 2011-09-29 01:12:07 -0400 |
---|---|---|
committer | Nicholas Bellinger <nab@linux-iscsi.org> | 2011-10-23 23:17:51 -0400 |
commit | 79a7fef26431830e22e282053d050af790117db8 (patch) | |
tree | 85919eb3addb0b488f9d5361a375c961e5e311a3 /drivers/target | |
parent | 9375b1bfd2555c8bc828d394a4419a212b46ba71 (diff) |
target: Prevent cmd->se_queue_node double add
This patch addresses a bug with the lio-core-2.6.git conversion of
transport_add_cmd_to_queue() to use a single embedded list_head, instead
of individual struct se_queue_req allocations allowing a single se_cmd to
be added to the queue mulitple times. This was changed in the following:
commit 2a9e4d5ca5d99f4c600578d6285d45142e7e5208
Author: Andy Grover <agrover@redhat.com>
Date: Tue Apr 26 17:45:51 2011 -0700
target: Embed qr in struct se_cmd
The problem is that some target code still assumes performing multiple
adds is allowed via transport_add_cmd_to_queue(), which ends up causing
list corruption in qobj->qobj_list code. This patch addresses this
by removing an existing struct se_cmd from the list before the add, and
removes an unnecessary list walk in transport_remove_cmd_from_queue()
It also changes cmd->t_transport_queue_active to use explict sets intead
of increment/decrement to prevent confusion during exception path handling.
Signed-off-by: Roland Dreier <roland@purestorage.com>
Cc: Andy Grover <agrover@redhat.com>
Cc: stable@kernel.org
Signed-off-by: Nicholas Bellinger <nab@risingtidesystems.com>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_tmr.c | 2 | ||||
-rw-r--r-- | drivers/target/target_core_transport.c | 31 |
2 files changed, 16 insertions, 17 deletions
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 662473c5404..98b12a8923f 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -339,7 +339,7 @@ int core_tmr_lun_reset( | |||
339 | 339 | ||
340 | atomic_dec(&cmd->t_transport_queue_active); | 340 | atomic_dec(&cmd->t_transport_queue_active); |
341 | atomic_dec(&qobj->queue_cnt); | 341 | atomic_dec(&qobj->queue_cnt); |
342 | list_del(&cmd->se_queue_node); | 342 | list_del_init(&cmd->se_queue_node); |
343 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 343 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
344 | 344 | ||
345 | pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" | 345 | pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:" |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4b2d6bf87a1..046da7f3823 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -620,8 +620,6 @@ static void transport_add_cmd_to_queue( | |||
620 | struct se_queue_obj *qobj = &dev->dev_queue_obj; | 620 | struct se_queue_obj *qobj = &dev->dev_queue_obj; |
621 | unsigned long flags; | 621 | unsigned long flags; |
622 | 622 | ||
623 | INIT_LIST_HEAD(&cmd->se_queue_node); | ||
624 | |||
625 | if (t_state) { | 623 | if (t_state) { |
626 | spin_lock_irqsave(&cmd->t_state_lock, flags); | 624 | spin_lock_irqsave(&cmd->t_state_lock, flags); |
627 | cmd->t_state = t_state; | 625 | cmd->t_state = t_state; |
@@ -630,15 +628,21 @@ static void transport_add_cmd_to_queue( | |||
630 | } | 628 | } |
631 | 629 | ||
632 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 630 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
631 | |||
632 | /* If the cmd is already on the list, remove it before we add it */ | ||
633 | if (!list_empty(&cmd->se_queue_node)) | ||
634 | list_del(&cmd->se_queue_node); | ||
635 | else | ||
636 | atomic_inc(&qobj->queue_cnt); | ||
637 | |||
633 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { | 638 | if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { |
634 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; | 639 | cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; |
635 | list_add(&cmd->se_queue_node, &qobj->qobj_list); | 640 | list_add(&cmd->se_queue_node, &qobj->qobj_list); |
636 | } else | 641 | } else |
637 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); | 642 | list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); |
638 | atomic_inc(&cmd->t_transport_queue_active); | 643 | atomic_set(&cmd->t_transport_queue_active, 1); |
639 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 644 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
640 | 645 | ||
641 | atomic_inc(&qobj->queue_cnt); | ||
642 | wake_up_interruptible(&qobj->thread_wq); | 646 | wake_up_interruptible(&qobj->thread_wq); |
643 | } | 647 | } |
644 | 648 | ||
@@ -655,9 +659,9 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |||
655 | } | 659 | } |
656 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); | 660 | cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); |
657 | 661 | ||
658 | atomic_dec(&cmd->t_transport_queue_active); | 662 | atomic_set(&cmd->t_transport_queue_active, 0); |
659 | 663 | ||
660 | list_del(&cmd->se_queue_node); | 664 | list_del_init(&cmd->se_queue_node); |
661 | atomic_dec(&qobj->queue_cnt); | 665 | atomic_dec(&qobj->queue_cnt); |
662 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 666 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
663 | 667 | ||
@@ -667,7 +671,6 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj) | |||
667 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | 671 | static void transport_remove_cmd_from_queue(struct se_cmd *cmd, |
668 | struct se_queue_obj *qobj) | 672 | struct se_queue_obj *qobj) |
669 | { | 673 | { |
670 | struct se_cmd *t; | ||
671 | unsigned long flags; | 674 | unsigned long flags; |
672 | 675 | ||
673 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); | 676 | spin_lock_irqsave(&qobj->cmd_queue_lock, flags); |
@@ -675,14 +678,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd, | |||
675 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 678 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
676 | return; | 679 | return; |
677 | } | 680 | } |
678 | 681 | atomic_set(&cmd->t_transport_queue_active, 0); | |
679 | list_for_each_entry(t, &qobj->qobj_list, se_queue_node) | 682 | atomic_dec(&qobj->queue_cnt); |
680 | if (t == cmd) { | 683 | list_del_init(&cmd->se_queue_node); |
681 | atomic_dec(&cmd->t_transport_queue_active); | ||
682 | atomic_dec(&qobj->queue_cnt); | ||
683 | list_del(&cmd->se_queue_node); | ||
684 | break; | ||
685 | } | ||
686 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); | 684 | spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); |
687 | 685 | ||
688 | if (atomic_read(&cmd->t_transport_queue_active)) { | 686 | if (atomic_read(&cmd->t_transport_queue_active)) { |
@@ -1066,7 +1064,7 @@ static void transport_release_all_cmds(struct se_device *dev) | |||
1066 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, | 1064 | list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, |
1067 | se_queue_node) { | 1065 | se_queue_node) { |
1068 | t_state = cmd->t_state; | 1066 | t_state = cmd->t_state; |
1069 | list_del(&cmd->se_queue_node); | 1067 | list_del_init(&cmd->se_queue_node); |
1070 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, | 1068 | spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, |
1071 | flags); | 1069 | flags); |
1072 | 1070 | ||
@@ -1597,6 +1595,7 @@ void transport_init_se_cmd( | |||
1597 | INIT_LIST_HEAD(&cmd->se_delayed_node); | 1595 | INIT_LIST_HEAD(&cmd->se_delayed_node); |
1598 | INIT_LIST_HEAD(&cmd->se_ordered_node); | 1596 | INIT_LIST_HEAD(&cmd->se_ordered_node); |
1599 | INIT_LIST_HEAD(&cmd->se_qf_node); | 1597 | INIT_LIST_HEAD(&cmd->se_qf_node); |
1598 | INIT_LIST_HEAD(&cmd->se_queue_node); | ||
1600 | 1599 | ||
1601 | INIT_LIST_HEAD(&cmd->t_task_list); | 1600 | INIT_LIST_HEAD(&cmd->t_task_list); |
1602 | init_completion(&cmd->transport_lun_fe_stop_comp); | 1601 | init_completion(&cmd->transport_lun_fe_stop_comp); |