diff options
author | Bart Van Assche <bvanassche@acm.org> | 2010-07-30 06:59:05 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-08-04 14:47:39 -0400 |
commit | c996bb47bb419b7c2f75499e11750142775e5da9 (patch) | |
tree | 8eadd7bb19484cc69fff17a218171f36085978ab /drivers/infiniband/ulp | |
parent | 7a7008110b94dfaa90db4b0cc5b0c3f964c80506 (diff) |
IB/srp: Make receive buffer handling more robust
The current strategy in ib_srp for posting receive buffers is:
* Post one buffer after channel establishment.
* Post one buffer before sending an SRP_CMD or SRP_TSK_MGMT to the target.
As a result, only the first non-SRP_RSP information unit from the
target will be processed. If that first information unit is an
SRP_T_LOGOUT, it will be processed. On the other hand, if the
initiator receives an SRP_CRED_REQ or SRP_AER_REQ before it receives a
SRP_T_LOGOUT, the SRP_T_LOGOUT won't be processed.
We can fix this inconsistency by changing the strategy for posting
receive buffers to:
* Post all receive buffers after channel establishment.
* After a receive buffer has been consumed and processed, post it again.
A side effect is that the ib_post_recv() call is moved out of the SCSI
command processing path. Since __srp_post_recv() is not called
directly any more, get rid of it and move the code directly into
srp_post_recv(). Also, move srp_post_recv() up in the file to avoid a
forward declaration.
Signed-off-by: Bart Van Assche <bart.vanassche@gmail.com>
Acked-by: David Dillow <dave@thedillows.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 89 |
1 files changed, 44 insertions, 45 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 4675defb374c..ffdd2d181f65 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -811,6 +811,38 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target, | |||
811 | return len; | 811 | return len; |
812 | } | 812 | } |
813 | 813 | ||
814 | static int srp_post_recv(struct srp_target_port *target) | ||
815 | { | ||
816 | unsigned long flags; | ||
817 | struct srp_iu *iu; | ||
818 | struct ib_sge list; | ||
819 | struct ib_recv_wr wr, *bad_wr; | ||
820 | unsigned int next; | ||
821 | int ret; | ||
822 | |||
823 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
824 | |||
825 | next = target->rx_head & (SRP_RQ_SIZE - 1); | ||
826 | wr.wr_id = next; | ||
827 | iu = target->rx_ring[next]; | ||
828 | |||
829 | list.addr = iu->dma; | ||
830 | list.length = iu->size; | ||
831 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
832 | |||
833 | wr.next = NULL; | ||
834 | wr.sg_list = &list; | ||
835 | wr.num_sge = 1; | ||
836 | |||
837 | ret = ib_post_recv(target->qp, &wr, &bad_wr); | ||
838 | if (!ret) | ||
839 | ++target->rx_head; | ||
840 | |||
841 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
842 | |||
843 | return ret; | ||
844 | } | ||
845 | |||
814 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) | 846 | static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) |
815 | { | 847 | { |
816 | struct srp_request *req; | 848 | struct srp_request *req; |
@@ -868,6 +900,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
868 | { | 900 | { |
869 | struct ib_device *dev; | 901 | struct ib_device *dev; |
870 | struct srp_iu *iu; | 902 | struct srp_iu *iu; |
903 | int res; | ||
871 | u8 opcode; | 904 | u8 opcode; |
872 | 905 | ||
873 | iu = target->rx_ring[wc->wr_id]; | 906 | iu = target->rx_ring[wc->wr_id]; |
@@ -904,6 +937,11 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
904 | 937 | ||
905 | ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, | 938 | ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len, |
906 | DMA_FROM_DEVICE); | 939 | DMA_FROM_DEVICE); |
940 | |||
941 | res = srp_post_recv(target); | ||
942 | if (res != 0) | ||
943 | shost_printk(KERN_ERR, target->scsi_host, | ||
944 | PFX "Recv failed with error code %d\n", res); | ||
907 | } | 945 | } |
908 | 946 | ||
909 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) | 947 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) |
@@ -943,45 +981,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
943 | } | 981 | } |
944 | } | 982 | } |
945 | 983 | ||
946 | static int __srp_post_recv(struct srp_target_port *target) | ||
947 | { | ||
948 | struct srp_iu *iu; | ||
949 | struct ib_sge list; | ||
950 | struct ib_recv_wr wr, *bad_wr; | ||
951 | unsigned int next; | ||
952 | int ret; | ||
953 | |||
954 | next = target->rx_head & (SRP_RQ_SIZE - 1); | ||
955 | wr.wr_id = next; | ||
956 | iu = target->rx_ring[next]; | ||
957 | |||
958 | list.addr = iu->dma; | ||
959 | list.length = iu->size; | ||
960 | list.lkey = target->srp_host->srp_dev->mr->lkey; | ||
961 | |||
962 | wr.next = NULL; | ||
963 | wr.sg_list = &list; | ||
964 | wr.num_sge = 1; | ||
965 | |||
966 | ret = ib_post_recv(target->qp, &wr, &bad_wr); | ||
967 | if (!ret) | ||
968 | ++target->rx_head; | ||
969 | |||
970 | return ret; | ||
971 | } | ||
972 | |||
973 | static int srp_post_recv(struct srp_target_port *target) | ||
974 | { | ||
975 | unsigned long flags; | ||
976 | int ret; | ||
977 | |||
978 | spin_lock_irqsave(target->scsi_host->host_lock, flags); | ||
979 | ret = __srp_post_recv(target); | ||
980 | spin_unlock_irqrestore(target->scsi_host->host_lock, flags); | ||
981 | |||
982 | return ret; | ||
983 | } | ||
984 | |||
985 | /* | 984 | /* |
986 | * Must be called with target->scsi_host->host_lock held to protect | 985 | * Must be called with target->scsi_host->host_lock held to protect |
987 | * req_lim and tx_head. Lock cannot be dropped between call here and | 986 | * req_lim and tx_head. Lock cannot be dropped between call here and |
@@ -1091,11 +1090,6 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd, | |||
1091 | goto err; | 1090 | goto err; |
1092 | } | 1091 | } |
1093 | 1092 | ||
1094 | if (__srp_post_recv(target)) { | ||
1095 | shost_printk(KERN_ERR, target->scsi_host, PFX "Recv failed\n"); | ||
1096 | goto err_unmap; | ||
1097 | } | ||
1098 | |||
1099 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, | 1093 | ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len, |
1100 | DMA_TO_DEVICE); | 1094 | DMA_TO_DEVICE); |
1101 | 1095 | ||
@@ -1238,6 +1232,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1238 | int attr_mask = 0; | 1232 | int attr_mask = 0; |
1239 | int comp = 0; | 1233 | int comp = 0; |
1240 | int opcode = 0; | 1234 | int opcode = 0; |
1235 | int i; | ||
1241 | 1236 | ||
1242 | switch (event->event) { | 1237 | switch (event->event) { |
1243 | case IB_CM_REQ_ERROR: | 1238 | case IB_CM_REQ_ERROR: |
@@ -1287,7 +1282,11 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) | |||
1287 | if (target->status) | 1282 | if (target->status) |
1288 | break; | 1283 | break; |
1289 | 1284 | ||
1290 | target->status = srp_post_recv(target); | 1285 | for (i = 0; i < SRP_RQ_SIZE; i++) { |
1286 | target->status = srp_post_recv(target); | ||
1287 | if (target->status) | ||
1288 | break; | ||
1289 | } | ||
1291 | if (target->status) | 1290 | if (target->status) |
1292 | break; | 1291 | break; |
1293 | 1292 | ||