aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/srp/ib_srp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/ulp/srp/ib_srp.c')
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c236
1 files changed, 168 insertions, 68 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7f8f16bad753..cfc1d65c4577 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -291,7 +291,7 @@ static void srp_free_target_ib(struct srp_target_port *target)
291 291
292 for (i = 0; i < SRP_RQ_SIZE; ++i) 292 for (i = 0; i < SRP_RQ_SIZE; ++i)
293 srp_free_iu(target->srp_host, target->rx_ring[i]); 293 srp_free_iu(target->srp_host, target->rx_ring[i]);
294 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 294 for (i = 0; i < SRP_SQ_SIZE; ++i)
295 srp_free_iu(target->srp_host, target->tx_ring[i]); 295 srp_free_iu(target->srp_host, target->tx_ring[i]);
296} 296}
297 297
@@ -811,6 +811,75 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
811 return len; 811 return len;
812} 812}
813 813
814/*
815 * Must be called with target->scsi_host->host_lock held to protect
816 * req_lim and tx_head. Lock cannot be dropped between call here and
817 * call to __srp_post_send().
818 *
819 * Note:
820 * An upper limit for the number of allocated information units for each
821 * request type is:
822 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
823 * more than Scsi_Host.can_queue requests.
824 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
825 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
826 * one unanswered SRP request to an initiator.
827 */
828static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
829 enum srp_iu_type iu_type)
830{
831 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
832 struct srp_iu *iu;
833
834 srp_send_completion(target->send_cq, target);
835
836 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
837 return NULL;
838
839 /* Initiator responses to target requests do not consume credits */
840 if (target->req_lim <= rsv && iu_type != SRP_IU_RSP) {
841 ++target->zero_req_lim;
842 return NULL;
843 }
844
845 iu = target->tx_ring[target->tx_head & SRP_SQ_MASK];
846 iu->type = iu_type;
847 return iu;
848}
849
850/*
851 * Must be called with target->scsi_host->host_lock held to protect
852 * req_lim and tx_head.
853 */
854static int __srp_post_send(struct srp_target_port *target,
855 struct srp_iu *iu, int len)
856{
857 struct ib_sge list;
858 struct ib_send_wr wr, *bad_wr;
859 int ret = 0;
860
861 list.addr = iu->dma;
862 list.length = len;
863 list.lkey = target->srp_host->srp_dev->mr->lkey;
864
865 wr.next = NULL;
866 wr.wr_id = target->tx_head & SRP_SQ_MASK;
867 wr.sg_list = &list;
868 wr.num_sge = 1;
869 wr.opcode = IB_WR_SEND;
870 wr.send_flags = IB_SEND_SIGNALED;
871
872 ret = ib_post_send(target->qp, &wr, &bad_wr);
873
874 if (!ret) {
875 ++target->tx_head;
876 if (iu->type != SRP_IU_RSP)
877 --target->req_lim;
878 }
879
880 return ret;
881}
882
814static int srp_post_recv(struct srp_target_port *target) 883static int srp_post_recv(struct srp_target_port *target)
815{ 884{
816 unsigned long flags; 885 unsigned long flags;
@@ -822,7 +891,7 @@ static int srp_post_recv(struct srp_target_port *target)
822 891
823 spin_lock_irqsave(target->scsi_host->host_lock, flags); 892 spin_lock_irqsave(target->scsi_host->host_lock, flags);
824 893
825 next = target->rx_head & (SRP_RQ_SIZE - 1); 894 next = target->rx_head & SRP_RQ_MASK;
826 wr.wr_id = next; 895 wr.wr_id = next;
827 iu = target->rx_ring[next]; 896 iu = target->rx_ring[next];
828 897
@@ -896,6 +965,71 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
896 spin_unlock_irqrestore(target->scsi_host->host_lock, flags); 965 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
897} 966}
898 967
968static int srp_response_common(struct srp_target_port *target, s32 req_delta,
969 void *rsp, int len)
970{
971 struct ib_device *dev;
972 unsigned long flags;
973 struct srp_iu *iu;
974 int err = 1;
975
976 dev = target->srp_host->srp_dev->dev;
977
978 spin_lock_irqsave(target->scsi_host->host_lock, flags);
979 target->req_lim += req_delta;
980
981 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
982 if (!iu) {
983 shost_printk(KERN_ERR, target->scsi_host, PFX
984 "no IU available to send response\n");
985 goto out;
986 }
987
988 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
989 memcpy(iu->buf, rsp, len);
990 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
991
992 err = __srp_post_send(target, iu, len);
993 if (err)
994 shost_printk(KERN_ERR, target->scsi_host, PFX
995 "unable to post response: %d\n", err);
996
997out:
998 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
999 return err;
1000}
1001
1002static void srp_process_cred_req(struct srp_target_port *target,
1003 struct srp_cred_req *req)
1004{
1005 struct srp_cred_rsp rsp = {
1006 .opcode = SRP_CRED_RSP,
1007 .tag = req->tag,
1008 };
1009 s32 delta = be32_to_cpu(req->req_lim_delta);
1010
1011 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1012 shost_printk(KERN_ERR, target->scsi_host, PFX
1013 "problems processing SRP_CRED_REQ\n");
1014}
1015
1016static void srp_process_aer_req(struct srp_target_port *target,
1017 struct srp_aer_req *req)
1018{
1019 struct srp_aer_rsp rsp = {
1020 .opcode = SRP_AER_RSP,
1021 .tag = req->tag,
1022 };
1023 s32 delta = be32_to_cpu(req->req_lim_delta);
1024
1025 shost_printk(KERN_ERR, target->scsi_host, PFX
1026 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1027
1028 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1029 shost_printk(KERN_ERR, target->scsi_host, PFX
1030 "problems processing SRP_AER_REQ\n");
1031}
1032
899static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) 1033static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
900{ 1034{
901 struct ib_device *dev; 1035 struct ib_device *dev;
@@ -923,6 +1057,14 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
923 srp_process_rsp(target, iu->buf); 1057 srp_process_rsp(target, iu->buf);
924 break; 1058 break;
925 1059
1060 case SRP_CRED_REQ:
1061 srp_process_cred_req(target, iu->buf);
1062 break;
1063
1064 case SRP_AER_REQ:
1065 srp_process_aer_req(target, iu->buf);
1066 break;
1067
926 case SRP_T_LOGOUT: 1068 case SRP_T_LOGOUT:
927 /* XXX Handle target logout */ 1069 /* XXX Handle target logout */
928 shost_printk(KERN_WARNING, target->scsi_host, 1070 shost_printk(KERN_WARNING, target->scsi_host,
@@ -981,61 +1123,6 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
981 } 1123 }
982} 1124}
983 1125
984/*
985 * Must be called with target->scsi_host->host_lock held to protect
986 * req_lim and tx_head. Lock cannot be dropped between call here and
987 * call to __srp_post_send().
988 */
989static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
990 enum srp_request_type req_type)
991{
992 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
993
994 srp_send_completion(target->send_cq, target);
995
996 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
997 return NULL;
998
999 if (target->req_lim < min) {
1000 ++target->zero_req_lim;
1001 return NULL;
1002 }
1003
1004 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
1005}
1006
1007/*
1008 * Must be called with target->scsi_host->host_lock held to protect
1009 * req_lim and tx_head.
1010 */
1011static int __srp_post_send(struct srp_target_port *target,
1012 struct srp_iu *iu, int len)
1013{
1014 struct ib_sge list;
1015 struct ib_send_wr wr, *bad_wr;
1016 int ret = 0;
1017
1018 list.addr = iu->dma;
1019 list.length = len;
1020 list.lkey = target->srp_host->srp_dev->mr->lkey;
1021
1022 wr.next = NULL;
1023 wr.wr_id = target->tx_head & SRP_SQ_SIZE;
1024 wr.sg_list = &list;
1025 wr.num_sge = 1;
1026 wr.opcode = IB_WR_SEND;
1027 wr.send_flags = IB_SEND_SIGNALED;
1028
1029 ret = ib_post_send(target->qp, &wr, &bad_wr);
1030
1031 if (!ret) {
1032 ++target->tx_head;
1033 --target->req_lim;
1034 }
1035
1036 return ret;
1037}
1038
1039static int srp_queuecommand(struct scsi_cmnd *scmnd, 1126static int srp_queuecommand(struct scsi_cmnd *scmnd,
1040 void (*done)(struct scsi_cmnd *)) 1127 void (*done)(struct scsi_cmnd *))
1041{ 1128{
@@ -1056,7 +1143,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
1056 return 0; 1143 return 0;
1057 } 1144 }
1058 1145
1059 iu = __srp_get_tx_iu(target, SRP_REQ_NORMAL); 1146 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1060 if (!iu) 1147 if (!iu)
1061 goto err; 1148 goto err;
1062 1149
@@ -1064,7 +1151,7 @@ static int srp_queuecommand(struct scsi_cmnd *scmnd,
1064 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len, 1151 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1065 DMA_TO_DEVICE); 1152 DMA_TO_DEVICE);
1066 1153
1067 req = list_entry(target->free_reqs.next, struct srp_request, list); 1154 req = list_first_entry(&target->free_reqs, struct srp_request, list);
1068 1155
1069 scmnd->scsi_done = done; 1156 scmnd->scsi_done = done;
1070 scmnd->result = 0; 1157 scmnd->result = 0;
@@ -1121,7 +1208,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
1121 goto err; 1208 goto err;
1122 } 1209 }
1123 1210
1124 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1211 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1125 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1212 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1126 srp_max_iu_len, 1213 srp_max_iu_len,
1127 GFP_KERNEL, DMA_TO_DEVICE); 1214 GFP_KERNEL, DMA_TO_DEVICE);
@@ -1137,7 +1224,7 @@ err:
1137 target->rx_ring[i] = NULL; 1224 target->rx_ring[i] = NULL;
1138 } 1225 }
1139 1226
1140 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1227 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1141 srp_free_iu(target->srp_host, target->tx_ring[i]); 1228 srp_free_iu(target->srp_host, target->tx_ring[i]);
1142 target->tx_ring[i] = NULL; 1229 target->tx_ring[i] = NULL;
1143 } 1230 }
@@ -1252,8 +1339,13 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1252 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len); 1339 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1253 target->req_lim = be32_to_cpu(rsp->req_lim_delta); 1340 target->req_lim = be32_to_cpu(rsp->req_lim_delta);
1254 1341
1255 target->scsi_host->can_queue = min(target->req_lim, 1342 /*
1256 target->scsi_host->can_queue); 1343 * Reserve credits for task management so we don't
1344 * bounce requests back to the SCSI mid-layer.
1345 */
1346 target->scsi_host->can_queue
1347 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1348 target->scsi_host->can_queue);
1257 } else { 1349 } else {
1258 shost_printk(KERN_WARNING, target->scsi_host, 1350 shost_printk(KERN_WARNING, target->scsi_host,
1259 PFX "Unhandled RSP opcode %#x\n", opcode); 1351 PFX "Unhandled RSP opcode %#x\n", opcode);
@@ -1350,6 +1442,7 @@ static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1350static int srp_send_tsk_mgmt(struct srp_target_port *target, 1442static int srp_send_tsk_mgmt(struct srp_target_port *target,
1351 struct srp_request *req, u8 func) 1443 struct srp_request *req, u8 func)
1352{ 1444{
1445 struct ib_device *dev = target->srp_host->srp_dev->dev;
1353 struct srp_iu *iu; 1446 struct srp_iu *iu;
1354 struct srp_tsk_mgmt *tsk_mgmt; 1447 struct srp_tsk_mgmt *tsk_mgmt;
1355 1448
@@ -1363,10 +1456,12 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1363 1456
1364 init_completion(&req->done); 1457 init_completion(&req->done);
1365 1458
1366 iu = __srp_get_tx_iu(target, SRP_REQ_TASK_MGMT); 1459 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1367 if (!iu) 1460 if (!iu)
1368 goto out; 1461 goto out;
1369 1462
1463 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1464 DMA_TO_DEVICE);
1370 tsk_mgmt = iu->buf; 1465 tsk_mgmt = iu->buf;
1371 memset(tsk_mgmt, 0, sizeof *tsk_mgmt); 1466 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1372 1467
@@ -1376,6 +1471,8 @@ static int srp_send_tsk_mgmt(struct srp_target_port *target,
1376 tsk_mgmt->tsk_mgmt_func = func; 1471 tsk_mgmt->tsk_mgmt_func = func;
1377 tsk_mgmt->task_tag = req->index; 1472 tsk_mgmt->task_tag = req->index;
1378 1473
1474 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1475 DMA_TO_DEVICE);
1379 if (__srp_post_send(target, iu, sizeof *tsk_mgmt)) 1476 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1380 goto out; 1477 goto out;
1381 1478
@@ -1626,9 +1723,9 @@ static struct scsi_host_template srp_template = {
1626 .eh_abort_handler = srp_abort, 1723 .eh_abort_handler = srp_abort,
1627 .eh_device_reset_handler = srp_reset_device, 1724 .eh_device_reset_handler = srp_reset_device,
1628 .eh_host_reset_handler = srp_reset_host, 1725 .eh_host_reset_handler = srp_reset_host,
1629 .can_queue = SRP_SQ_SIZE, 1726 .can_queue = SRP_CMD_SQ_SIZE,
1630 .this_id = -1, 1727 .this_id = -1,
1631 .cmd_per_lun = SRP_SQ_SIZE, 1728 .cmd_per_lun = SRP_CMD_SQ_SIZE,
1632 .use_clustering = ENABLE_CLUSTERING, 1729 .use_clustering = ENABLE_CLUSTERING,
1633 .shost_attrs = srp_host_attrs 1730 .shost_attrs = srp_host_attrs
1634}; 1731};
@@ -1813,7 +1910,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
1813 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); 1910 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1814 goto out; 1911 goto out;
1815 } 1912 }
1816 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); 1913 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
1817 break; 1914 break;
1818 1915
1819 case SRP_OPT_IO_CLASS: 1916 case SRP_OPT_IO_CLASS:
@@ -1891,7 +1988,7 @@ static ssize_t srp_create_target(struct device *dev,
1891 1988
1892 INIT_LIST_HEAD(&target->free_reqs); 1989 INIT_LIST_HEAD(&target->free_reqs);
1893 INIT_LIST_HEAD(&target->req_queue); 1990 INIT_LIST_HEAD(&target->req_queue);
1894 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1991 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1895 target->req_ring[i].index = i; 1992 target->req_ring[i].index = i;
1896 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1993 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1897 } 1994 }
@@ -2159,6 +2256,9 @@ static int __init srp_init_module(void)
2159{ 2256{
2160 int ret; 2257 int ret;
2161 2258
2259 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
2260 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
2261
2162 if (srp_sg_tablesize > 255) { 2262 if (srp_sg_tablesize > 255) {
2163 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2263 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2164 srp_sg_tablesize = 255; 2264 srp_sg_tablesize = 255;