diff options
author | Bart Van Assche <bvanassche@acm.org> | 2010-11-26 15:08:38 -0500 |
---|---|---|
committer | David Dillow <dillowda@ornl.gov> | 2011-01-10 15:44:50 -0500 |
commit | e9684678221441f886b4d7c74f8770bb0981737a (patch) | |
tree | 76ed83fb7dd41c84c2b8e08b785a802e99c6e790 /drivers/infiniband | |
parent | 94a9174c630c8465ed9e97ecd242993429930c05 (diff) |
IB/srp: stop sharing the host lock with SCSI
We don't need protection against the SCSI stack, so use our own lock to
allow parallel progress on separate CPUs.
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
[ broken out and small cleanups by David Dillow ]
Signed-off-by: David Dillow <dillowda@ornl.gov>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 46 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 2 |
2 files changed, 25 insertions, 23 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index e76fe54faeea..8691fc83f70b 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -447,12 +447,12 @@ static bool srp_change_state(struct srp_target_port *target, | |||
447 | { | 447 | { |
448 | bool changed = false; | 448 | bool changed = false; |
449 | 449 | ||
450 | spin_lock_irq(target->scsi_host->host_lock); | 450 | spin_lock_irq(&target->lock); |
451 | if (target->state == old) { | 451 | if (target->state == old) { |
452 | target->state = new; | 452 | target->state = new; |
453 | changed = true; | 453 | changed = true; |
454 | } | 454 | } |
455 | spin_unlock_irq(target->scsi_host->host_lock); | 455 | spin_unlock_irq(&target->lock); |
456 | return changed; | 456 | return changed; |
457 | } | 457 | } |
458 | 458 | ||
@@ -555,11 +555,11 @@ static void srp_remove_req(struct srp_target_port *target, | |||
555 | unsigned long flags; | 555 | unsigned long flags; |
556 | 556 | ||
557 | srp_unmap_data(req->scmnd, target, req); | 557 | srp_unmap_data(req->scmnd, target, req); |
558 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | 558 | spin_lock_irqsave(&target->lock, flags); |
559 | target->req_lim += req_lim_delta; | 559 | target->req_lim += req_lim_delta; |
560 | req->scmnd = NULL; | 560 | req->scmnd = NULL; |
561 | list_add_tail(&req->list, &target->free_reqs); | 561 | list_add_tail(&req->list, &target->free_reqs); |
562 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | 562 | spin_unlock_irqrestore(&target->lock, flags); |
563 | } | 563 | } |
564 | 564 | ||
565 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) | 565 | static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) |
@@ -634,13 +634,13 @@ err: | |||
634 | * Schedule our work inside the lock to avoid a race with | 634 | * Schedule our work inside the lock to avoid a race with |
635 | * the flush_scheduled_work() in srp_remove_one(). | 635 | * the flush_scheduled_work() in srp_remove_one(). |
636 | */ | 636 | */ |
637 | spin_lock_irq(target->scsi_host->host_lock); | 637 | spin_lock_irq(&target->lock); |
638 | if (target->state == SRP_TARGET_CONNECTING) { | 638 | if (target->state == SRP_TARGET_CONNECTING) { |
639 | target->state = SRP_TARGET_DEAD; | 639 | target->state = SRP_TARGET_DEAD; |
640 | INIT_WORK(&target->work, srp_remove_work); | 640 | INIT_WORK(&target->work, srp_remove_work); |
641 | schedule_work(&target->work); | 641 | schedule_work(&target->work); |
642 | } | 642 | } |
643 | spin_unlock_irq(target->scsi_host->host_lock); | 643 | spin_unlock_irq(&target->lock); |
644 | 644 | ||
645 | return ret; | 645 | return ret; |
646 | } | 646 | } |
@@ -829,17 +829,16 @@ static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, | |||
829 | { | 829 | { |
830 | unsigned long flags; | 830 | unsigned long flags; |
831 | 831 | ||
832 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | 832 | spin_lock_irqsave(&target->lock, flags); |
833 | list_add(&iu->list, &target->free_tx); | 833 | list_add(&iu->list, &target->free_tx); |
834 | if (iu_type != SRP_IU_RSP) | 834 | if (iu_type != SRP_IU_RSP) |
835 | ++target->req_lim; | 835 | ++target->req_lim; |
836 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | 836 | spin_unlock_irqrestore(&target->lock, flags); |
837 | } | 837 | } |
838 | 838 | ||
839 | /* | 839 | /* |
840 | * Must be called with target->scsi_host->host_lock held to protect | 840 | * Must be called with target->lock held to protect req_lim and free_tx. |
841 | * req_lim and free_tx. If IU is not sent, it must be returned using | 841 | * If IU is not sent, it must be returned using srp_put_tx_iu(). |
842 | * srp_put_tx_iu(). | ||
843 | * | 842 | * |
844 | * Note: | 843 | * Note: |
845 | * An upper limit for the number of allocated information units for each | 844 | * An upper limit for the number of allocated information units for each |
@@ -920,9 +919,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
920 | unsigned long flags; | 919 | unsigned long flags; |
921 | 920 | ||
922 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { | 921 | if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { |
923 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | 922 | spin_lock_irqsave(&target->lock, flags); |
924 | target->req_lim += be32_to_cpu(rsp->req_lim_delta); | 923 | target->req_lim += be32_to_cpu(rsp->req_lim_delta); |
925 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | 924 | spin_unlock_irqrestore(&target->lock, flags); |
926 | 925 | ||
927 | target->tsk_mgmt_status = -1; | 926 | target->tsk_mgmt_status = -1; |
928 | if (be32_to_cpu(rsp->resp_data_len) >= 4) | 927 | if (be32_to_cpu(rsp->resp_data_len) >= 4) |
@@ -963,10 +962,10 @@ static int srp_response_common(struct srp_target_port *target, s32 req_delta, | |||
963 | struct srp_iu *iu; | 962 | struct srp_iu *iu; |
964 | int err; | 963 | int err; |
965 | 964 | ||
966 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | 965 | spin_lock_irqsave(&target->lock, flags); |
967 | target->req_lim += req_delta; | 966 | target->req_lim += req_delta; |
968 | iu = __srp_get_tx_iu(target, SRP_IU_RSP); | 967 | iu = __srp_get_tx_iu(target, SRP_IU_RSP); |
969 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | 968 | spin_unlock_irqrestore(&target->lock, flags); |
970 | 969 | ||
971 | if (!iu) { | 970 | if (!iu) { |
972 | shost_printk(KERN_ERR, target->scsi_host, PFX | 971 | shost_printk(KERN_ERR, target->scsi_host, PFX |
@@ -1131,14 +1130,14 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) | |||
1131 | return 0; | 1130 | return 0; |
1132 | } | 1131 | } |
1133 | 1132 | ||
1134 | spin_lock_irqsave(shost->host_lock, flags); | 1133 | spin_lock_irqsave(&target->lock, flags); |
1135 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); | 1134 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
1136 | if (iu) { | 1135 | if (iu) { |
1137 | req = list_first_entry(&target->free_reqs, struct srp_request, | 1136 | req = list_first_entry(&target->free_reqs, struct srp_request, |
1138 | list); | 1137 | list); |
1139 | list_del(&req->list); | 1138 | list_del(&req->list); |
1140 | } | 1139 | } |
1141 | spin_unlock_irqrestore(shost->host_lock, flags); | 1140 | spin_unlock_irqrestore(&target->lock, flags); |
1142 | 1141 | ||
1143 | if (!iu) | 1142 | if (!iu) |
1144 | goto err; | 1143 | goto err; |
@@ -1184,9 +1183,9 @@ err_unmap: | |||
1184 | err_iu: | 1183 | err_iu: |
1185 | srp_put_tx_iu(target, iu, SRP_IU_CMD); | 1184 | srp_put_tx_iu(target, iu, SRP_IU_CMD); |
1186 | 1185 | ||
1187 | spin_lock_irqsave(shost->host_lock, flags); | 1186 | spin_lock_irqsave(&target->lock, flags); |
1188 | list_add(&req->list, &target->free_reqs); | 1187 | list_add(&req->list, &target->free_reqs); |
1189 | spin_unlock_irqrestore(shost->host_lock, flags); | 1188 | spin_unlock_irqrestore(&target->lock, flags); |
1190 | 1189 | ||
1191 | err: | 1190 | err: |
1192 | return SCSI_MLQUEUE_HOST_BUSY; | 1191 | return SCSI_MLQUEUE_HOST_BUSY; |
@@ -1451,9 +1450,9 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
1451 | 1450 | ||
1452 | init_completion(&target->tsk_mgmt_done); | 1451 | init_completion(&target->tsk_mgmt_done); |
1453 | 1452 | ||
1454 | spin_lock_irq(target->scsi_host->host_lock); | 1453 | spin_lock_irq(&target->lock); |
1455 | iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); | 1454 | iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); |
1456 | spin_unlock_irq(target->scsi_host->host_lock); | 1455 | spin_unlock_irq(&target->lock); |
1457 | 1456 | ||
1458 | if (!iu) | 1457 | if (!iu) |
1459 | return -1; | 1458 | return -1; |
@@ -1957,6 +1956,7 @@ static ssize_t srp_create_target(struct device *dev, | |||
1957 | target->scsi_host = target_host; | 1956 | target->scsi_host = target_host; |
1958 | target->srp_host = host; | 1957 | target->srp_host = host; |
1959 | 1958 | ||
1959 | spin_lock_init(&target->lock); | ||
1960 | INIT_LIST_HEAD(&target->free_tx); | 1960 | INIT_LIST_HEAD(&target->free_tx); |
1961 | INIT_LIST_HEAD(&target->free_reqs); | 1961 | INIT_LIST_HEAD(&target->free_reqs); |
1962 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { | 1962 | for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) { |
@@ -2186,9 +2186,9 @@ static void srp_remove_one(struct ib_device *device) | |||
2186 | */ | 2186 | */ |
2187 | spin_lock(&host->target_lock); | 2187 | spin_lock(&host->target_lock); |
2188 | list_for_each_entry(target, &host->target_list, list) { | 2188 | list_for_each_entry(target, &host->target_list, list) { |
2189 | spin_lock_irq(target->scsi_host->host_lock); | 2189 | spin_lock_irq(&target->lock); |
2190 | target->state = SRP_TARGET_REMOVED; | 2190 | target->state = SRP_TARGET_REMOVED; |
2191 | spin_unlock_irq(target->scsi_host->host_lock); | 2191 | spin_unlock_irq(&target->lock); |
2192 | } | 2192 | } |
2193 | spin_unlock(&host->target_lock); | 2193 | spin_unlock(&host->target_lock); |
2194 | 2194 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 81686eee7e62..acb435d3c1e3 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -144,6 +144,8 @@ struct srp_target_port { | |||
144 | 144 | ||
145 | struct srp_iu *rx_ring[SRP_RQ_SIZE]; | 145 | struct srp_iu *rx_ring[SRP_RQ_SIZE]; |
146 | 146 | ||
147 | spinlock_t lock; | ||
148 | |||
147 | struct list_head free_tx; | 149 | struct list_head free_tx; |
148 | struct srp_iu *tx_ring[SRP_SQ_SIZE]; | 150 | struct srp_iu *tx_ring[SRP_SQ_SIZE]; |
149 | 151 | ||