aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp
diff options
context:
space:
mode:
authorDavid Dillow <dillowda@ornl.gov>2010-10-08 14:40:47 -0400
committerRoland Dreier <rolandd@cisco.com>2010-10-23 01:19:10 -0400
commitbb12588a38e6db85e01dceadff7bc161fc92e7d2 (patch)
tree6cf9f3b90cc1118aa2fcd716537916524384d428 /drivers/infiniband/ulp
parentdd5e6e38b2b8bd8bf71cae800e2b613e85ef1522 (diff)
IB/srp: Implement SRP_CRED_REQ and SRP_AER_REQ
This patch adds support for SRP_CRED_REQ to avoid a lockup by targets that use that mechanism to return credits to the initiator. This prevents a lockup observed in the field where we would never add the credits from the SRP_CRED_REQ to our current count, and would therefore never send another command to the target. Minimal support for SRP_AER_REQ is also added, as these messages can also be used to convey additional credits to the initiator. Based upon extensive debugging and code by Bart Van Assche and a bug report by Chris Worley. Signed-off-by: David Dillow <dillowda@ornl.gov> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/ulp')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c105
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h8
2 files changed, 103 insertions, 10 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index b8b09a4f012a..a54eee9324b6 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -83,6 +83,10 @@ static void srp_remove_one(struct ib_device *device);
83static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); 83static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
84static void srp_send_completion(struct ib_cq *cq, void *target_ptr); 84static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
85static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); 85static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
86static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
87 enum srp_iu_type iu_type);
88static int __srp_post_send(struct srp_target_port *target,
89 struct srp_iu *iu, int len);
86 90
87static struct scsi_transport_template *ib_srp_transport_template; 91static struct scsi_transport_template *ib_srp_transport_template;
88 92
@@ -896,6 +900,71 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
896 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 900 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
897} 901}
898 902
903static int srp_response_common(struct srp_target_port *target, s32 req_delta,
904 void *rsp, int len)
905{
906 struct ib_device *dev;
907 unsigned long flags;
908 struct srp_iu *iu;
909 int err = 1;
910
911 dev = target->srp_host->srp_dev->dev;
912
913 spin_lock_irqsave(target->scsi_host->host_lock, flags);
914 target->req_lim += req_delta;
915
916 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
917 if (!iu) {
918 shost_printk(KERN_ERR, target->scsi_host, PFX
919 "no IU available to send response\n");
920 goto out;
921 }
922
923 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
924 memcpy(iu->buf, rsp, len);
925 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
926
927 err = __srp_post_send(target, iu, len);
928 if (err)
929 shost_printk(KERN_ERR, target->scsi_host, PFX
930 "unable to post response: %d\n", err);
931
932out:
933 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
934 return err;
935}
936
937static void srp_process_cred_req(struct srp_target_port *target,
938 struct srp_cred_req *req)
939{
940 struct srp_cred_rsp rsp = {
941 .opcode = SRP_CRED_RSP,
942 .tag = req->tag,
943 };
944 s32 delta = be32_to_cpu(req->req_lim_delta);
945
946 if (srp_response_common(target, delta, &rsp, sizeof rsp))
947 shost_printk(KERN_ERR, target->scsi_host, PFX
948 "problems processing SRP_CRED_REQ\n");
949}
950
951static void srp_process_aer_req(struct srp_target_port *target,
952 struct srp_aer_req *req)
953{
954 struct srp_aer_rsp rsp = {
955 .opcode = SRP_AER_RSP,
956 .tag = req->tag,
957 };
958 s32 delta = be32_to_cpu(req->req_lim_delta);
959
960 shost_printk(KERN_ERR, target->scsi_host, PFX
961 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
962
963 if (srp_response_common(target, delta, &rsp, sizeof rsp))
964 shost_printk(KERN_ERR, target->scsi_host, PFX
965 "problems processing SRP_AER_REQ\n");
966}
967
899static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 968static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
900{ 969{
901 struct ib_device *dev; 970 struct ib_device *dev;
@@ -923,6 +992,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
923 srp_process_rsp(target, iu->buf); 992 srp_process_rsp(target, iu->buf);
924 break; 993 break;
925 994
995 case SRP_CRED_REQ:
996 srp_process_cred_req(target, iu->buf);
997 break;
998
999 case SRP_AER_REQ:
1000 srp_process_aer_req(target, iu->buf);
1001 break;
1002
926 case SRP_T_LOGOUT: 1003 case SRP_T_LOGOUT:
927 /* XXX Handle target logout */ 1004 /* XXX Handle target logout */
928 shost_printk(KERN_WARNING, target->scsi_host, 1005 shost_printk(KERN_WARNING, target->scsi_host,
@@ -985,23 +1062,36 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
985 * Must be called with target->scsi_host->host_lock held to protect 1062 * Must be called with target->scsi_host->host_lock held to protect
986 * req_lim and tx_head. Lock cannot be dropped between call here and 1063 * req_lim and tx_head. Lock cannot be dropped between call here and
987 * call to __srp_post_send(). 1064 * call to __srp_post_send().
1065 *
1066 * Note:
1067 * An upper limit for the number of allocated information units for each
1068 * request type is:
1069 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1070 * more than Scsi_Host.can_queue requests.
1071 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1072 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1073 * one unanswered SRP request to an initiator.
988 */ 1074 */
989static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 1075static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
990 enum srp_request_type req_type) 1076 enum srp_iu_type iu_type)
991{ 1077{
992 s32 rsv = (req_type == SRP_REQ_TASK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE; 1078 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1079 struct srp_iu *iu;
993 1080
994 srp_send_completion(target->send_cq, target); 1081 srp_send_completion(target->send_cq, target);
995 1082
996 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 1083 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
997 return NULL; 1084 return NULL;
998 1085
999 if (target->req_lim <= rsv) { 1086 /* Initiator responses to target requests do not consume credits */
1087 if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
1000 ++target->zero_req_lim; 1088 ++target->zero_req_lim;
1001 return NULL; 1089 return NULL;
1002 } 1090 }
1003 1091
1004 return target->tx_ring[target->tx_head & SRP_SQ_MASK]; 1092 iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
1093 iu->type = iu_type;
1094 return iu;
1005} 1095}
1006 1096
1007/* 1097/*
@@ -1030,7 +1120,8 @@ static int __srp_post_send(struct srp_target_port *target,
1030 1120
1031 if (!ret) { 1121 if (!ret) {
1032 ++target->tx_head; 1122 ++target->tx_head;
1033 --target->req_lim; 1123 if (iu->type != SRP_IU_RSP)
1124 --target->req_lim;
1034 } 1125 }
1035 1126
1036 return ret; 1127 return ret;
@@ -1056,7 +1147,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
1056 return 0; 1147 return 0;
1057 } 1148 }
1058 1149
1059 iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL); 1150 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1060 if (!iu) 1151 if (!iu)
1061 goto err; 1152 goto err;
1062 1153
@@ -1363,7 +1454,7 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1363 1454
1364 init_completion(&req->done); 1455 init_completion(&req->done);
1365 1456
1366 iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT); 1457 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1367 if (!iu) 1458 if (!iu)
1368 goto out; 1459 goto out;
1369 1460
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 7a959d5f2fa6..ed0dce9e479f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -82,9 +82,10 @@ enum srp_target_state {
82 SRP_TARGET_REMOVED 82 SRP_TARGET_REMOVED
83}; 83};
84 84
85enum srp_request_type { 85enum srp_iu_type {
86 SRP_REQ_NORMAL, 86 SRP_IU_CMD,
87 SRP_REQ_TASK_MGMT, 87 SRP_IU_TSK_MGMT,
88 SRP_IU_RSP,
88}; 89};
89 90
90struct srp_device { 91struct srp_device {
@@ -171,6 +172,7 @@ struct srp_iu {
171 void *buf; 172 void *buf;
172 size_t size; 173 size_t size;
173 enum dma_data_direction direction; 174 enum dma_data_direction direction;
175 enum srp_iu_type type;
174}; 176};
175 177
176#endif /* IB_SRP_H */ 178#endif /* IB_SRP_H */