diff options
author | Bart Van Assche <bvanassche@acm.org> | 2010-11-26 14:37:47 -0500 |
---|---|---|
committer | David Dillow <dillowda@ornl.gov> | 2011-01-10 15:44:49 -0500 |
commit | 76c75b258f1fe6abac6af2356989ad4d6518886e (patch) | |
tree | 754db154ad76e44b5ecc6da71097a6f1c096ec6f /drivers/infiniband | |
parent | 536ae14e7588e85203d4b4147c041309be5b3efb (diff) |
IB/srp: reduce local coverage for command submission and EH
We only need locks to protect our lists and number of credits available.
By pre-consuming the credit for the request, we can reduce our lock
coverage to just those areas. If we don't actually send the request,
we'll need to put the credit back into the pool.
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
[ broken out and small cleanups by David Dillow ]
Signed-off-by: David Dillow <dillowda@ornl.gov>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 124 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 1 |
2 files changed, 67 insertions, 58 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 2aff8814f2c5..e5bd181dbce5 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -818,9 +818,24 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
818 | } | 818 | } |
819 | 819 | ||
820 | /* | 820 | /* |
821 | * Return an IU and possible credit to the free pool | ||
822 | */ | ||
823 | static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu, | ||
824 | enum srp_iu_type iu_type) | ||
825 | { | ||
826 | unsigned long flags; | ||
827 | |||
828 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
829 | list_add(&iu->list, &target->free_tx); | ||
830 | if (iu_type != SRP_IU_RSP) | ||
831 | ++target->req_lim; | ||
832 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
833 | } | ||
834 | |||
835 | /* | ||
821 | * Must be called with target->scsi_host->host_lock held to protect | 836 | * Must be called with target->scsi_host->host_lock held to protect |
822 | * req_lim and free_tx. Lock cannot be dropped between call here and | 837 | * req_lim and free_tx. If IU is not sent, it must be returned using |
823 | * call to __srp_post_send(). | 838 | * srp_put_tx_iu(). |
824 | * | 839 | * |
825 | * Note: | 840 | * Note: |
826 | * An upper limit for the number of allocated information units for each | 841 | * An upper limit for the number of allocated information units for each |
@@ -843,26 +858,25 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | |||
843 | return NULL; | 858 | return NULL; |
844 | 859 | ||
845 | /* Initiator responses to target requests do not consume credits */ | 860 | /* Initiator responses to target requests do not consume credits */ |
846 | if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) { | 861 | if (iu_type != SRP_IU_RSP) { |
847 | ++target->zero_req_lim; | 862 | if (target->req_lim <= rsv) { |
848 | return NULL; | 863 | ++target->zero_req_lim; |
864 | return NULL; | ||
865 | } | ||
866 | |||
867 | --target->req_lim; | ||
849 | } | 868 | } |
850 | 869 | ||
851 | iu = list_first_entry(&target->free_tx, struct srp_iu, list); | 870 | iu = list_first_entry(&target->free_tx, struct srp_iu, list); |
852 | iu->type = iu_type; | 871 | list_del(&iu->list); |
853 | return iu; | 872 | return iu; |
854 | } | 873 | } |
855 | 874 | ||
856 | /* | 875 | static int srp_post_send(struct srp_target_port *target, |
857 | * Must be called with target->scsi_host->host_lock held to protect | 876 | struct srp_iu *iu, int len) |
858 | * req_lim and free_tx. | ||
859 | */ | ||
860 | static int __srp_post_send(struct srp_target_port *target, | ||
861 | struct srp_iu *iu, int len) | ||
862 | { | 877 | { |
863 | struct ib_sge list; | 878 | struct ib_sge list; |
864 | struct ib_send_wr wr, *bad_wr; | 879 | struct ib_send_wr wr, *bad_wr; |
865 | int ret = 0; | ||
866 | 880 | ||
867 | list.addr = iu->dma; | 881 | list.addr = iu->dma; |
868 | list.length = len; | 882 | list.length = len; |
@@ -875,15 +889,7 @@ static int __srp_post_send(struct srp_target_port *target, | |||
875 | wr.opcode = IB_WR_SEND; | 889 | wr.opcode = IB_WR_SEND; |
876 | wr.send_flags = IB_SEND_SIGNALED; | 890 | wr.send_flags = IB_SEND_SIGNALED; |
877 | 891 | ||
878 | ret = ib_post_send(target->qp, &wr, &bad_wr); | 892 | return ib_post_send(target->qp, &wr, &bad_wr); |
879 | |||
880 | if (!ret) { | ||
881 | list_del(&iu->list); | ||
882 | if (iu->type != SRP_IU_RSP) | ||
883 | --target->req_lim; | ||
884 | } | ||
885 | |||
886 | return ret; | ||
887 | } | 893 | } |
888 | 894 | ||
889 | static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) | 895 | static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu) |
@@ -953,34 +959,33 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | |||
953 | static int srp_response_common(struct srp_target_port *target, s32 req_delta, | 959 | static int srp_response_common(struct srp_target_port *target, s32 req_delta, |
954 | void *rsp, int len) | 960 | void *rsp, int len) |
955 | { | 961 | { |
956 | struct ib_device *dev; | 962 | struct ib_device *dev = target->srp_host->srp_dev->dev; |
957 | unsigned long flags; | 963 | unsigned long flags; |
958 | struct srp_iu *iu; | 964 | struct srp_iu *iu; |
959 | int err = 1; | 965 | int err; |
960 | |||
961 | dev = target->srp_host->srp_dev->dev; | ||
962 | 966 | ||
963 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | 967 | spin_lock_irqsave(target->scsi_host->host_lock, flags); |
964 | target->req_lim += req_delta; | 968 | target->req_lim += req_delta; |
965 | |||
966 | iu = __srp_get_tx_iu(target, SRP_IU_RSP); | 969 | iu = __srp_get_tx_iu(target, SRP_IU_RSP); |
970 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
971 | |||
967 | if (!iu) { | 972 | if (!iu) { |
968 | shost_printk(KERN_ERR, target->scsi_host, PFX | 973 | shost_printk(KERN_ERR, target->scsi_host, PFX |
969 | "no IU available to send response\n"); | 974 | "no IU available to send response\n"); |
970 | goto out; | 975 | return 1; |
971 | } | 976 | } |
972 | 977 | ||
973 | ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); | 978 | ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); |
974 | memcpy(iu->buf, rsp, len); | 979 | memcpy(iu->buf, rsp, len); |
975 | ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); | 980 | ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); |
976 | 981 | ||
977 | err = __srp_post_send(target, iu, len); | 982 | err = srp_post_send(target, iu, len); |
978 | if (err) | 983 | if (err) { |
979 | shost_printk(KERN_ERR, target->scsi_host, PFX | 984 | shost_printk(KERN_ERR, target->scsi_host, PFX |
980 | "unable to post response: %d\n", err); | 985 | "unable to post response: %d\n", err); |
986 | srp_put_tx_iu(target, iu, SRP_IU_RSP); | ||
987 | } | ||
981 | 988 | ||
982 | out: | ||
983 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
984 | return err; | 989 | return err; |
985 | } | 990 | } |
986 | 991 | ||
@@ -1107,14 +1112,14 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1107 | } | 1112 | } |
1108 | } | 1113 | } |
1109 | 1114 | ||
1110 | static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | 1115 | static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd) |
1111 | void (*done)(struct scsi_cmnd *)) | ||
1112 | { | 1116 | { |
1113 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 1117 | struct srp_target_port *target = host_to_target(shost); |
1114 | struct srp_request *req; | 1118 | struct srp_request *req; |
1115 | struct srp_iu *iu; | 1119 | struct srp_iu *iu; |
1116 | struct srp_cmd *cmd; | 1120 | struct srp_cmd *cmd; |
1117 | struct ib_device *dev; | 1121 | struct ib_device *dev; |
1122 | unsigned long flags; | ||
1118 | int len; | 1123 | int len; |
1119 | 1124 | ||
1120 | if (target->state == SRP_TARGET_CONNECTING) | 1125 | if (target->state == SRP_TARGET_CONNECTING) |
@@ -1123,11 +1128,19 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | |||
1123 | if (target->state == SRP_TARGET_DEAD || | 1128 | if (target->state == SRP_TARGET_DEAD || |
1124 | target->state == SRP_TARGET_REMOVED) { | 1129 | target->state == SRP_TARGET_REMOVED) { |
1125 | scmnd->result = DID_BAD_TARGET << 16; | 1130 | scmnd->result = DID_BAD_TARGET << 16; |
1126 | done(scmnd); | 1131 | scmnd->scsi_done(scmnd); |
1127 | return 0; | 1132 | return 0; |
1128 | } | 1133 | } |
1129 | 1134 | ||
1135 | spin_lock_irqsave(shost->host_lock, flags); | ||
1130 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); | 1136 | iu = __srp_get_tx_iu(target, SRP_IU_CMD); |
1137 | if (iu) { | ||
1138 | req = list_first_entry(&target->free_reqs, struct srp_request, | ||
1139 | list); | ||
1140 | list_del(&req->list); | ||
1141 | } | ||
1142 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1143 | |||
1131 | if (!iu) | 1144 | if (!iu) |
1132 | goto err; | 1145 | goto err; |
1133 | 1146 | ||
@@ -1135,9 +1148,6 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | |||
1135 | ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, | 1148 | ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, |
1136 | DMA_TO_DEVICE); | 1149 | DMA_TO_DEVICE); |
1137 | 1150 | ||
1138 | req = list_first_entry(&target->free_reqs, struct srp_request, list); | ||
1139 | |||
1140 | scmnd->scsi_done = done; | ||
1141 | scmnd->result = 0; | 1151 | scmnd->result = 0; |
1142 | scmnd->host_scribble = (void *) req; | 1152 | scmnd->host_scribble = (void *) req; |
1143 | 1153 | ||
@@ -1156,30 +1166,33 @@ static int srp_queuecommand_lck(struct scsi_cmnd *scmnd, | |||
1156 | if (len < 0) { | 1166 | if (len < 0) { |
1157 | shost_printk(KERN_ERR, target->scsi_host, | 1167 | shost_printk(KERN_ERR, target->scsi_host, |
1158 | PFX "Failed to map data\n"); | 1168 | PFX "Failed to map data\n"); |
1159 | goto err; | 1169 | goto err_iu; |
1160 | } | 1170 | } |
1161 | 1171 | ||
1162 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, | 1172 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, |
1163 | DMA_TO_DEVICE); | 1173 | DMA_TO_DEVICE); |
1164 | 1174 | ||
1165 | if (__srp_post_send(target, iu, len)) { | 1175 | if (srp_post_send(target, iu, len)) { |
1166 | shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); | 1176 | shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); |
1167 | goto err_unmap; | 1177 | goto err_unmap; |
1168 | } | 1178 | } |
1169 | 1179 | ||
1170 | list_del(&req->list); | ||
1171 | |||
1172 | return 0; | 1180 | return 0; |
1173 | 1181 | ||
1174 | err_unmap: | 1182 | err_unmap: |
1175 | srp_unmap_data(scmnd, target, req); | 1183 | srp_unmap_data(scmnd, target, req); |
1176 | 1184 | ||
1185 | err_iu: | ||
1186 | srp_put_tx_iu(target, iu, SRP_IU_CMD); | ||
1187 | |||
1188 | spin_lock_irqsave(shost->host_lock, flags); | ||
1189 | list_add(&req->list, &target->free_reqs); | ||
1190 | spin_unlock_irqrestore(shost->host_lock, flags); | ||
1191 | |||
1177 | err: | 1192 | err: |
1178 | return SCSI_MLQUEUE_HOST_BUSY; | 1193 | return SCSI_MLQUEUE_HOST_BUSY; |
1179 | } | 1194 | } |
1180 | 1195 | ||
1181 | static DEF_SCSI_QCMD(srp_queuecommand) | ||
1182 | |||
1183 | static int srp_alloc_iu_bufs(struct srp_target_port *target) | 1196 | static int srp_alloc_iu_bufs(struct srp_target_port *target) |
1184 | { | 1197 | { |
1185 | int i; | 1198 | int i; |
@@ -1433,17 +1446,18 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
1433 | struct srp_iu *iu; | 1446 | struct srp_iu *iu; |
1434 | struct srp_tsk_mgmt *tsk_mgmt; | 1447 | struct srp_tsk_mgmt *tsk_mgmt; |
1435 | 1448 | ||
1436 | spin_lock_irq(target->scsi_host->host_lock); | ||
1437 | |||
1438 | if (target->state == SRP_TARGET_DEAD || | 1449 | if (target->state == SRP_TARGET_DEAD || |
1439 | target->state == SRP_TARGET_REMOVED) | 1450 | target->state == SRP_TARGET_REMOVED) |
1440 | goto out; | 1451 | return -1; |
1441 | 1452 | ||
1442 | init_completion(&target->tsk_mgmt_done); | 1453 | init_completion(&target->tsk_mgmt_done); |
1443 | 1454 | ||
1455 | spin_lock_irq(target->scsi_host->host_lock); | ||
1444 | iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); | 1456 | iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT); |
1457 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1458 | |||
1445 | if (!iu) | 1459 | if (!iu) |
1446 | goto out; | 1460 | return -1; |
1447 | 1461 | ||
1448 | ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, | 1462 | ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, |
1449 | DMA_TO_DEVICE); | 1463 | DMA_TO_DEVICE); |
@@ -1458,20 +1472,16 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target, | |||
1458 | 1472 | ||
1459 | ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, | 1473 | ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, |
1460 | DMA_TO_DEVICE); | 1474 | DMA_TO_DEVICE); |
1461 | if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) | 1475 | if (srp_post_send(target, iu, sizeof *tsk_mgmt)) { |
1462 | goto out; | 1476 | srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT); |
1463 | 1477 | return -1; | |
1464 | spin_unlock_irq(target->scsi_host->host_lock); | 1478 | } |
1465 | 1479 | ||
1466 | if (!wait_for_completion_timeout(&target->tsk_mgmt_done, | 1480 | if (!wait_for_completion_timeout(&target->tsk_mgmt_done, |
1467 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) | 1481 | msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS))) |
1468 | return -1; | 1482 | return -1; |
1469 | 1483 | ||
1470 | return 0; | 1484 | return 0; |
1471 | |||
1472 | out: | ||
1473 | spin_unlock_irq(target->scsi_host->host_lock); | ||
1474 | return -1; | ||
1475 | } | 1485 | } |
1476 | 1486 | ||
1477 | static int srp_abort(struct scsi_cmnd *scmnd) | 1487 | static int srp_abort(struct scsi_cmnd *scmnd) |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index 924d8e9c6672..81686eee7e62 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -168,7 +168,6 @@ struct srp_iu { | |||
168 | void *buf; | 168 | void *buf; |
169 | size_t size; | 169 | size_t size; |
170 | enum dma_data_direction direction; | 170 | enum dma_data_direction direction; |
171 | enum srp_iu_type type; | ||
172 | }; | 171 | }; |
173 | 172 | ||
174 | #endif /* IB_SRP_H */ | 173 | #endif /* IB_SRP_H */ |