diff options
author | Ishai Rabinovitz <ishai@mellanox.co.il> | 2006-09-07 08:00:01 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-09-14 16:51:40 -0400 |
commit | add7afc756eddd5d02fd986d19e6300b3e1a5ae8 (patch) | |
tree | 78b79105cae9166ed194180dc9d6f3c8d4dc8fb3 /drivers/infiniband | |
parent | 63b98080daa35f0d682db04f4fb7ada010888752 (diff) |
IB/srp: Don't schedule reconnect from srp
If there is a problem in the connection, the SCSI mid-layer will
eventually call srp_reset_host(), which will call srp_reconnect(), so
we do not need to schedule a call to srp_reconnect_work() from
srp_completion().
Removing this prevents srp_reset_host() from failing if a reconnect
scheduled from srp_completion() is already in progress, which in turn
was causing crashes as both SCSI midlayer and srp_reconnect() were
cancelling commands.
Signed-off-by: Ishai Rabinovitz <ishai@mellanox.co.il>
Signed-off-by: Michael S. Tsirkin <mst@mellanox.co.il>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 14 |
1 files changed, 0 insertions, 14 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 8257d5a2c8f8..fd8344cdc0db 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -799,13 +799,6 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
799 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | 799 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); |
800 | } | 800 | } |
801 | 801 | ||
802 | static void srp_reconnect_work(void *target_ptr) | ||
803 | { | ||
804 | struct srp_target_port *target = target_ptr; | ||
805 | |||
806 | srp_reconnect_target(target); | ||
807 | } | ||
808 | |||
809 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | 802 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) |
810 | { | 803 | { |
811 | struct srp_iu *iu; | 804 | struct srp_iu *iu; |
@@ -858,7 +851,6 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr) | |||
858 | { | 851 | { |
859 | struct srp_target_port *target = target_ptr; | 852 | struct srp_target_port *target = target_ptr; |
860 | struct ib_wc wc; | 853 | struct ib_wc wc; |
861 | unsigned long flags; | ||
862 | 854 | ||
863 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 855 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
864 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 856 | while (ib_poll_cq(cq, 1, &wc) > 0) { |
@@ -866,10 +858,6 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr) | |||
866 | printk(KERN_ERR PFX "failed %s status %d\n", | 858 | printk(KERN_ERR PFX "failed %s status %d\n", |
867 | wc.wr_id & SRP_OP_RECV ? "receive" : "send", | 859 | wc.wr_id & SRP_OP_RECV ? "receive" : "send", |
868 | wc.status); | 860 | wc.status); |
869 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
870 | if (target->state == SRP_TARGET_LIVE) | ||
871 | schedule_work(&target->work); | ||
872 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
873 | break; | 861 | break; |
874 | } | 862 | } |
875 | 863 | ||
@@ -1705,8 +1693,6 @@ static ssize_t srp_create_target(struct class_device *class_dev, | |||
1705 | target->scsi_host = target_host; | 1693 | target->scsi_host = target_host; |
1706 | target->srp_host = host; | 1694 | target->srp_host = host; |
1707 | 1695 | ||
1708 | INIT_WORK(&target->work, srp_reconnect_work, target); | ||
1709 | |||
1710 | INIT_LIST_HEAD(&target->free_reqs); | 1696 | INIT_LIST_HEAD(&target->free_reqs); |
1711 | INIT_LIST_HEAD(&target->req_queue); | 1697 | INIT_LIST_HEAD(&target->req_queue); |
1712 | for (i = 0; i < SRP_SQ_SIZE; ++i) { | 1698 | for (i = 0; i < SRP_SQ_SIZE; ++i) { |