diff options
author | Bart Van Assche <bart.vanassche@gmail.com> | 2010-02-02 14:23:54 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-03-02 02:51:38 -0500 |
commit | 9c03dc9f19351edf25c1107e3cfd3cc538c7ab9e (patch) | |
tree | b96f2a43eeeed19d60dc0e6337d9fc91f1f6d804 /drivers/infiniband | |
parent | 676ad585531e965416fd958747894541dabcec96 (diff) |
IB/srp: Split send and recieve CQs to reduce number of interrupts
We can reduce the number of IB interrupts from two interrupts per
srp_queuecommand() call to one by using separate CQs for send and
receive completions and processing send completions by polling every
time a TX IU is allocated.
Receive completion events still trigger an interrupt.
Signed-off-by: Bart Van Assche <bart.vanassche@gmail.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 72 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 6 |
2 files changed, 53 insertions, 25 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 54c8fe25c423..441ea7c2e7c4 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds, | |||
80 | 80 | ||
81 | static void srp_add_one(struct ib_device *device); | 81 | static void srp_add_one(struct ib_device *device); |
82 | static void srp_remove_one(struct ib_device *device); | 82 | static void srp_remove_one(struct ib_device *device); |
83 | static void srp_completion(struct ib_cq *cq, void *target_ptr); | 83 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); |
84 | static void srp_send_completion(struct ib_cq *cq, void *target_ptr); | ||
84 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); | 85 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); |
85 | 86 | ||
86 | static struct scsi_transport_template *ib_srp_transport_template; | 87 | static struct scsi_transport_template *ib_srp_transport_template; |
@@ -227,14 +228,22 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
227 | if (!init_attr) | 228 | if (!init_attr) |
228 | return -ENOMEM; | 229 | return -ENOMEM; |
229 | 230 | ||
230 | target->cq = ib_create_cq(target->srp_host->srp_dev->dev, | 231 | target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, |
231 | srp_completion, NULL, target, SRP_CQ_SIZE, 0); | 232 | srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); |
232 | if (IS_ERR(target->cq)) { | 233 | if (IS_ERR(target->recv_cq)) { |
233 | ret = PTR_ERR(target->cq); | 234 | ret = PTR_ERR(target->recv_cq); |
234 | goto out; | 235 | goto out; |
235 | } | 236 | } |
236 | 237 | ||
237 | ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); | 238 | target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev, |
239 | srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); | ||
240 | if (IS_ERR(target->send_cq)) { | ||
241 | ret = PTR_ERR(target->send_cq); | ||
242 | ib_destroy_cq(target->recv_cq); | ||
243 | goto out; | ||
244 | } | ||
245 | |||
246 | ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP); | ||
238 | 247 | ||
239 | init_attr->event_handler = srp_qp_event; | 248 | init_attr->event_handler = srp_qp_event; |
240 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; | 249 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; |
@@ -243,20 +252,22 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
243 | init_attr->cap.max_send_sge = 1; | 252 | init_attr->cap.max_send_sge = 1; |
244 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | 253 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; |
245 | init_attr->qp_type = IB_QPT_RC; | 254 | init_attr->qp_type = IB_QPT_RC; |
246 | init_attr->send_cq = target->cq; | 255 | init_attr->send_cq = target->send_cq; |
247 | init_attr->recv_cq = target->cq; | 256 | init_attr->recv_cq = target->recv_cq; |
248 | 257 | ||
249 | target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); | 258 | target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); |
250 | if (IS_ERR(target->qp)) { | 259 | if (IS_ERR(target->qp)) { |
251 | ret = PTR_ERR(target->qp); | 260 | ret = PTR_ERR(target->qp); |
252 | ib_destroy_cq(target->cq); | 261 | ib_destroy_cq(target->send_cq); |
262 | ib_destroy_cq(target->recv_cq); | ||
253 | goto out; | 263 | goto out; |
254 | } | 264 | } |
255 | 265 | ||
256 | ret = srp_init_qp(target, target->qp); | 266 | ret = srp_init_qp(target, target->qp); |
257 | if (ret) { | 267 | if (ret) { |
258 | ib_destroy_qp(target->qp); | 268 | ib_destroy_qp(target->qp); |
259 | ib_destroy_cq(target->cq); | 269 | ib_destroy_cq(target->send_cq); |
270 | ib_destroy_cq(target->recv_cq); | ||
260 | goto out; | 271 | goto out; |
261 | } | 272 | } |
262 | 273 | ||
@@ -270,7 +281,8 @@ static void srp_free_target_ib(struct srp_target_port *target) | |||
270 | int i; | 281 | int i; |
271 | 282 | ||
272 | ib_destroy_qp(target->qp); | 283 | ib_destroy_qp(target->qp); |
273 | ib_destroy_cq(target->cq); | 284 | ib_destroy_cq(target->send_cq); |
285 | ib_destroy_cq(target->recv_cq); | ||
274 | 286 | ||
275 | for (i = 0; i < SRP_RQ_SIZE; ++i) | 287 | for (i = 0; i < SRP_RQ_SIZE; ++i) |
276 | srp_free_iu(target->srp_host, target->rx_ring[i]); | 288 | srp_free_iu(target->srp_host, target->rx_ring[i]); |
@@ -568,7 +580,9 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
568 | if (ret) | 580 | if (ret) |
569 | goto err; | 581 | goto err; |
570 | 582 | ||
571 | while (ib_poll_cq(target->cq, 1, &wc) > 0) | 583 | while (ib_poll_cq(target->recv_cq, 1, &wc) > 0) |
584 | ; /* nothing */ | ||
585 | while (ib_poll_cq(target->send_cq, 1, &wc) > 0) | ||
572 | ; /* nothing */ | 586 | ; /* nothing */ |
573 | 587 | ||
574 | spin_lock_irq(target->scsi_host->host_lock); | 588 | spin_lock_irq(target->scsi_host->host_lock); |
@@ -851,7 +865,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
851 | struct srp_iu *iu; | 865 | struct srp_iu *iu; |
852 | u8 opcode; | 866 | u8 opcode; |
853 | 867 | ||
854 | iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; | 868 | iu = target->rx_ring[wc->wr_id]; |
855 | 869 | ||
856 | dev = target->srp_host->srp_dev->dev; | 870 | dev = target->srp_host->srp_dev->dev; |
857 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, | 871 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, |
@@ -898,7 +912,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
898 | DMA_FROM_DEVICE); | 912 | DMA_FROM_DEVICE); |
899 | } | 913 | } |
900 | 914 | ||
901 | static void srp_completion(struct ib_cq *cq, void *target_ptr) | 915 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) |
902 | { | 916 | { |
903 | struct srp_target_port *target = target_ptr; | 917 | struct srp_target_port *target = target_ptr; |
904 | struct ib_wc wc; | 918 | struct ib_wc wc; |
@@ -907,17 +921,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr) | |||
907 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 921 | while (ib_poll_cq(cq, 1, &wc) > 0) { |
908 | if (wc.status) { | 922 | if (wc.status) { |
909 | shost_printk(KERN_ERR, target->scsi_host, | 923 | shost_printk(KERN_ERR, target->scsi_host, |
910 | PFX "failed %s status %d\n", | 924 | PFX "failed receive status %d\n", |
911 | wc.wr_id & SRP_OP_RECV ? "receive" : "send", | ||
912 | wc.status); | 925 | wc.status); |
913 | target->qp_in_error = 1; | 926 | target->qp_in_error = 1; |
914 | break; | 927 | break; |
915 | } | 928 | } |
916 | 929 | ||
917 | if (wc.wr_id & SRP_OP_RECV) | 930 | srp_handle_recv(target, &wc); |
918 | srp_handle_recv(target, &wc); | 931 | } |
919 | else | 932 | } |
920 | ++target->tx_tail; | 933 | |
934 | static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | ||
935 | { | ||
936 | struct srp_target_port *target = target_ptr; | ||
937 | struct ib_wc wc; | ||
938 | |||
939 | while (ib_poll_cq(cq, 1, &wc) > 0) { | ||
940 | if (wc.status) { | ||
941 | shost_printk(KERN_ERR, target->scsi_host, | ||
942 | PFX "failed send status %d\n", | ||
943 | wc.status); | ||
944 | target->qp_in_error = 1; | ||
945 | break; | ||
946 | } | ||
947 | |||
948 | ++target->tx_tail; | ||
921 | } | 949 | } |
922 | } | 950 | } |
923 | 951 | ||
@@ -930,7 +958,7 @@ static int __srp_post_recv(struct srp_target_port *target) | |||
930 | int ret; | 958 | int ret; |
931 | 959 | ||
932 | next = target->rx_head & (SRP_RQ_SIZE - 1); | 960 | next = target->rx_head & (SRP_RQ_SIZE - 1); |
933 | wr.wr_id = next | SRP_OP_RECV; | 961 | wr.wr_id = next; |
934 | iu = target->rx_ring[next]; | 962 | iu = target->rx_ring[next]; |
935 | 963 | ||
936 | list.addr = iu->dma; | 964 | list.addr = iu->dma; |
@@ -970,6 +998,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | |||
970 | { | 998 | { |
971 | s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; | 999 | s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; |
972 | 1000 | ||
1001 | srp_send_completion(target->send_cq, target); | ||
1002 | |||
973 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | 1003 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) |
974 | return NULL; | 1004 | return NULL; |
975 | 1005 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index e185b907fc12..5a80eac6fdaa 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -60,7 +60,6 @@ enum { | |||
60 | SRP_RQ_SHIFT = 6, | 60 | SRP_RQ_SHIFT = 6, |
61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, | 61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, |
62 | SRP_SQ_SIZE = SRP_RQ_SIZE - 1, | 62 | SRP_SQ_SIZE = SRP_RQ_SIZE - 1, |
63 | SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE, | ||
64 | 63 | ||
65 | SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), | 64 | SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), |
66 | 65 | ||
@@ -69,8 +68,6 @@ enum { | |||
69 | SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 | 68 | SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 |
70 | }; | 69 | }; |
71 | 70 | ||
72 | #define SRP_OP_RECV (1 << 31) | ||
73 | |||
74 | enum srp_target_state { | 71 | enum srp_target_state { |
75 | SRP_TARGET_LIVE, | 72 | SRP_TARGET_LIVE, |
76 | SRP_TARGET_CONNECTING, | 73 | SRP_TARGET_CONNECTING, |
@@ -133,7 +130,8 @@ struct srp_target_port { | |||
133 | int path_query_id; | 130 | int path_query_id; |
134 | 131 | ||
135 | struct ib_cm_id *cm_id; | 132 | struct ib_cm_id *cm_id; |
136 | struct ib_cq *cq; | 133 | struct ib_cq *recv_cq; |
134 | struct ib_cq *send_cq; | ||
137 | struct ib_qp *qp; | 135 | struct ib_qp *qp; |
138 | 136 | ||
139 | int max_ti_iu_len; | 137 | int max_ti_iu_len; |