aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2010-08-30 15:27:20 -0400
committerRoland Dreier <rolandd@cisco.com>2010-10-23 01:19:10 -0400
commitdd5e6e38b2b8bd8bf71cae800e2b613e85ef1522 (patch)
treeda3ec14113d5f7b235df82de87ec9831e4991a1c
parentfb50a83d8c7cf8c6548166fe6775ad4f41e5ab72 (diff)
IB/srp: Preparation for transmit ring response allocation
The transmit ring in ib_srp (srp_target.tx_ring) is currently only used for allocating requests sent by the initiator to the target. This patch prepares using that ring for allocation of both requests and responses. Also, this patch differentiates the uses of SRP_SQ_SIZE, increases the size of the IB send completion queue by one element and reserves one transmit ring slot for SRP_TSK_MGMT requests. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Signed-off-by: David Dillow <dillowda@ornl.gov> Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c27
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h13
2 files changed, 25 insertions, 15 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7f8f16bad753..b8b09a4f012a 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -291,7 +291,7 @@ static void srp_free_target_ib(struct srp_target_port *target)
291 291
292 for (i = 0; i < SRP_RQ_SIZE; ++i) 292 for (i = 0; i < SRP_RQ_SIZE; ++i)
293 srp_free_iu(target->srp_host, target->rx_ring[i]); 293 srp_free_iu(target->srp_host, target->rx_ring[i]);
294 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) 294 for (i = 0; i < SRP_SQ_SIZE; ++i)
295 srp_free_iu(target->srp_host, target->tx_ring[i]); 295 srp_free_iu(target->srp_host, target->tx_ring[i]);
296} 296}
297 297
@@ -822,7 +822,7 @@ static int srp_post_recv(struct srp_target_port *target)
822 822
823 spin_lock_irqsave(target->scsi_host->host_lock, flags); 823 spin_lock_irqsave(target->scsi_host->host_lock, flags);
824 824
825 next = target->rx_head & (SRP_RQ_SIZE - 1); 825 next = target->rx_head & SRP_RQ_MASK;
826 wr.wr_id = next; 826 wr.wr_id = next;
827 iu = target->rx_ring[next]; 827 iu = target->rx_ring[next];
828 828
@@ -989,19 +989,19 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
989static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, 989static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
990 enum srp_request_type req_type) 990 enum srp_request_type req_type)
991{ 991{
992 s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; 992 s32 rsv = (req_type == SRP_REQ_TASK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
993 993
994 srp_send_completion(target->send_cq, target); 994 srp_send_completion(target->send_cq, target);
995 995
996 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) 996 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
997 return NULL; 997 return NULL;
998 998
999 if (target->req_lim < min) { 999 if (target->req_lim <= rsv) {
1000 ++target->zero_req_lim; 1000 ++target->zero_req_lim;
1001 return NULL; 1001 return NULL;
1002 } 1002 }
1003 1003
1004 return target->tx_ring[target->tx_head & SRP_SQ_SIZE]; 1004 return target->tx_ring[target->tx_head & SRP_SQ_MASK];
1005} 1005}
1006 1006
1007/* 1007/*
@@ -1020,7 +1020,7 @@ static int __srp_post_send(struct srp_target_port *target,
1020 list.lkey = target->srp_host->srp_dev->mr->lkey; 1020 list.lkey = target->srp_host->srp_dev->mr->lkey;
1021 1021
1022 wr.next = NULL; 1022 wr.next = NULL;
1023 wr.wr_id = target->tx_head & SRP_SQ_SIZE; 1023 wr.wr_id = target->tx_head & SRP_SQ_MASK;
1024 wr.sg_list = &list; 1024 wr.sg_list = &list;
1025 wr.num_sge = 1; 1025 wr.num_sge = 1;
1026 wr.opcode = IB_WR_SEND; 1026 wr.opcode = IB_WR_SEND;
@@ -1121,7 +1121,7 @@ static int srp_alloc_iu_bufs(struct srp_target_port *target)
1121 goto err; 1121 goto err;
1122 } 1122 }
1123 1123
1124 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1124 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1125 target->tx_ring[i] = srp_alloc_iu(target->srp_host, 1125 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1126 srp_max_iu_len, 1126 srp_max_iu_len,
1127 GFP_KERNEL, DMA_TO_DEVICE); 1127 GFP_KERNEL, DMA_TO_DEVICE);
@@ -1137,7 +1137,7 @@ err:
1137 target->rx_ring[i] = NULL; 1137 target->rx_ring[i] = NULL;
1138 } 1138 }
1139 1139
1140 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) { 1140 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1141 srp_free_iu(target->srp_host, target->tx_ring[i]); 1141 srp_free_iu(target->srp_host, target->tx_ring[i]);
1142 target->tx_ring[i] = NULL; 1142 target->tx_ring[i] = NULL;
1143 } 1143 }
@@ -1626,9 +1626,9 @@ static struct scsi_host_template srp_template = {
1626 .eh_abort_handler = srp_abort, 1626 .eh_abort_handler = srp_abort,
1627 .eh_device_reset_handler = srp_reset_device, 1627 .eh_device_reset_handler = srp_reset_device,
1628 .eh_host_reset_handler = srp_reset_host, 1628 .eh_host_reset_handler = srp_reset_host,
1629 .can_queue = SRP_SQ_SIZE, 1629 .can_queue = SRP_CMD_SQ_SIZE,
1630 .this_id = -1, 1630 .this_id = -1,
1631 .cmd_per_lun = SRP_SQ_SIZE, 1631 .cmd_per_lun = SRP_CMD_SQ_SIZE,
1632 .use_clustering = ENABLE_CLUSTERING, 1632 .use_clustering = ENABLE_CLUSTERING,
1633 .shost_attrs = srp_host_attrs 1633 .shost_attrs = srp_host_attrs
1634}; 1634};
@@ -1813,7 +1813,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target)
1813 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); 1813 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1814 goto out; 1814 goto out;
1815 } 1815 }
1816 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE); 1816 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
1817 break; 1817 break;
1818 1818
1819 case SRP_OPT_IO_CLASS: 1819 case SRP_OPT_IO_CLASS:
@@ -1891,7 +1891,7 @@ static ssize_t srp_create_target(struct device *dev,
1891 1891
1892 INIT_LIST_HEAD(&target->free_reqs); 1892 INIT_LIST_HEAD(&target->free_reqs);
1893 INIT_LIST_HEAD(&target->req_queue); 1893 INIT_LIST_HEAD(&target->req_queue);
1894 for (i = 0; i < SRP_SQ_SIZE; ++i) { 1894 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1895 target->req_ring[i].index = i; 1895 target->req_ring[i].index = i;
1896 list_add_tail(&target->req_ring[i].list, &target->free_reqs); 1896 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1897 } 1897 }
@@ -2159,6 +2159,9 @@ static int __init srp_init_module(void)
2159{ 2159{
2160 int ret; 2160 int ret;
2161 2161
2162 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_SQ_SIZE);
2163 BUILD_BUG_ON_NOT_POWER_OF_2(SRP_RQ_SIZE);
2164
2162 if (srp_sg_tablesize > 255) { 2165 if (srp_sg_tablesize > 255) {
2163 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n"); 2166 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2164 srp_sg_tablesize = 255; 2167 srp_sg_tablesize = 255;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index 5a80eac6fdaa..7a959d5f2fa6 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -59,7 +59,14 @@ enum {
59 59
60 SRP_RQ_SHIFT = 6, 60 SRP_RQ_SHIFT = 6,
61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, 61 SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
62 SRP_SQ_SIZE = SRP_RQ_SIZE - 1, 62 SRP_RQ_MASK = SRP_RQ_SIZE - 1,
63
64 SRP_SQ_SIZE = SRP_RQ_SIZE,
65 SRP_SQ_MASK = SRP_SQ_SIZE - 1,
66 SRP_RSP_SQ_SIZE = 1,
67 SRP_REQ_SQ_SIZE = SRP_SQ_SIZE - SRP_RSP_SQ_SIZE,
68 SRP_TSK_MGMT_SQ_SIZE = 1,
69 SRP_CMD_SQ_SIZE = SRP_REQ_SQ_SIZE - SRP_TSK_MGMT_SQ_SIZE,
63 70
64 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), 71 SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
65 72
@@ -144,11 +151,11 @@ struct srp_target_port {
144 151
145 unsigned tx_head; 152 unsigned tx_head;
146 unsigned tx_tail; 153 unsigned tx_tail;
147 struct srp_iu *tx_ring[SRP_SQ_SIZE + 1]; 154 struct srp_iu *tx_ring[SRP_SQ_SIZE];
148 155
149 struct list_head free_reqs; 156 struct list_head free_reqs;
150 struct list_head req_queue; 157 struct list_head req_queue;
151 struct srp_request req_ring[SRP_SQ_SIZE]; 158 struct srp_request req_ring[SRP_CMD_SQ_SIZE];
152 159
153 struct work_struct work; 160 struct work_struct work;
154 161