diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.c | 91 | ||||
-rw-r--r-- | drivers/infiniband/ulp/srp/ib_srp.h | 6 |
2 files changed, 65 insertions, 32 deletions
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 54c8fe25c423..ed3f9ebae882 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds, | |||
80 | 80 | ||
81 | static void srp_add_one(struct ib_device *device); | 81 | static void srp_add_one(struct ib_device *device); |
82 | static void srp_remove_one(struct ib_device *device); | 82 | static void srp_remove_one(struct ib_device *device); |
83 | static void srp_completion(struct ib_cq *cq, void *target_ptr); | 83 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr); |
84 | static void srp_send_completion(struct ib_cq *cq, void *target_ptr); | ||
84 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); | 85 | static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event); |
85 | 86 | ||
86 | static struct scsi_transport_template *ib_srp_transport_template; | 87 | static struct scsi_transport_template *ib_srp_transport_template; |
@@ -227,14 +228,21 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
227 | if (!init_attr) | 228 | if (!init_attr) |
228 | return -ENOMEM; | 229 | return -ENOMEM; |
229 | 230 | ||
230 | target->cq = ib_create_cq(target->srp_host->srp_dev->dev, | 231 | target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev, |
231 | srp_completion, NULL, target, SRP_CQ_SIZE, 0); | 232 | srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0); |
232 | if (IS_ERR(target->cq)) { | 233 | if (IS_ERR(target->recv_cq)) { |
233 | ret = PTR_ERR(target->cq); | 234 | ret = PTR_ERR(target->recv_cq); |
234 | goto out; | 235 | goto err; |
235 | } | 236 | } |
236 | 237 | ||
237 | ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP); | 238 | target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev, |
239 | srp_send_completion, NULL, target, SRP_SQ_SIZE, 0); | ||
240 | if (IS_ERR(target->send_cq)) { | ||
241 | ret = PTR_ERR(target->send_cq); | ||
242 | goto err_recv_cq; | ||
243 | } | ||
244 | |||
245 | ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP); | ||
238 | 246 | ||
239 | init_attr->event_handler = srp_qp_event; | 247 | init_attr->event_handler = srp_qp_event; |
240 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; | 248 | init_attr->cap.max_send_wr = SRP_SQ_SIZE; |
@@ -243,24 +251,32 @@ static int srp_create_target_ib(struct srp_target_port *target) | |||
243 | init_attr->cap.max_send_sge = 1; | 251 | init_attr->cap.max_send_sge = 1; |
244 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | 252 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; |
245 | init_attr->qp_type = IB_QPT_RC; | 253 | init_attr->qp_type = IB_QPT_RC; |
246 | init_attr->send_cq = target->cq; | 254 | init_attr->send_cq = target->send_cq; |
247 | init_attr->recv_cq = target->cq; | 255 | init_attr->recv_cq = target->recv_cq; |
248 | 256 | ||
249 | target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); | 257 | target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr); |
250 | if (IS_ERR(target->qp)) { | 258 | if (IS_ERR(target->qp)) { |
251 | ret = PTR_ERR(target->qp); | 259 | ret = PTR_ERR(target->qp); |
252 | ib_destroy_cq(target->cq); | 260 | goto err_send_cq; |
253 | goto out; | ||
254 | } | 261 | } |
255 | 262 | ||
256 | ret = srp_init_qp(target, target->qp); | 263 | ret = srp_init_qp(target, target->qp); |
257 | if (ret) { | 264 | if (ret) |
258 | ib_destroy_qp(target->qp); | 265 | goto err_qp; |
259 | ib_destroy_cq(target->cq); | ||
260 | goto out; | ||
261 | } | ||
262 | 266 | ||
263 | out: | 267 | kfree(init_attr); |
268 | return 0; | ||
269 | |||
270 | err_qp: | ||
271 | ib_destroy_qp(target->qp); | ||
272 | |||
273 | err_send_cq: | ||
274 | ib_destroy_cq(target->send_cq); | ||
275 | |||
276 | err_recv_cq: | ||
277 | ib_destroy_cq(target->recv_cq); | ||
278 | |||
279 | err: | ||
264 | kfree(init_attr); | 280 | kfree(init_attr); |
265 | return ret; | 281 | return ret; |
266 | } | 282 | } |
@@ -270,7 +286,8 @@ static void srp_free_target_ib(struct srp_target_port *target) | |||
270 | int i; | 286 | int i; |
271 | 287 | ||
272 | ib_destroy_qp(target->qp); | 288 | ib_destroy_qp(target->qp); |
273 | ib_destroy_cq(target->cq); | 289 | ib_destroy_cq(target->send_cq); |
290 | ib_destroy_cq(target->recv_cq); | ||
274 | 291 | ||
275 | for (i = 0; i < SRP_RQ_SIZE; ++i) | 292 | for (i = 0; i < SRP_RQ_SIZE; ++i) |
276 | srp_free_iu(target->srp_host, target->rx_ring[i]); | 293 | srp_free_iu(target->srp_host, target->rx_ring[i]); |
@@ -568,7 +585,9 @@ static int srp_reconnect_target(struct srp_target_port *target) | |||
568 | if (ret) | 585 | if (ret) |
569 | goto err; | 586 | goto err; |
570 | 587 | ||
571 | while (ib_poll_cq(target->cq, 1, &wc) > 0) | 588 | while (ib_poll_cq(target->recv_cq, 1, &wc) > 0) |
589 | ; /* nothing */ | ||
590 | while (ib_poll_cq(target->send_cq, 1, &wc) > 0) | ||
572 | ; /* nothing */ | 591 | ; /* nothing */ |
573 | 592 | ||
574 | spin_lock_irq(target->scsi_host->host_lock); | 593 | spin_lock_irq(target->scsi_host->host_lock); |
@@ -851,7 +870,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
851 | struct srp_iu *iu; | 870 | struct srp_iu *iu; |
852 | u8 opcode; | 871 | u8 opcode; |
853 | 872 | ||
854 | iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV]; | 873 | iu = target->rx_ring[wc->wr_id]; |
855 | 874 | ||
856 | dev = target->srp_host->srp_dev->dev; | 875 | dev = target->srp_host->srp_dev->dev; |
857 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, | 876 | ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len, |
@@ -898,7 +917,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | |||
898 | DMA_FROM_DEVICE); | 917 | DMA_FROM_DEVICE); |
899 | } | 918 | } |
900 | 919 | ||
901 | static void srp_completion(struct ib_cq *cq, void *target_ptr) | 920 | static void srp_recv_completion(struct ib_cq *cq, void *target_ptr) |
902 | { | 921 | { |
903 | struct srp_target_port *target = target_ptr; | 922 | struct srp_target_port *target = target_ptr; |
904 | struct ib_wc wc; | 923 | struct ib_wc wc; |
@@ -907,17 +926,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr) | |||
907 | while (ib_poll_cq(cq, 1, &wc) > 0) { | 926 | while (ib_poll_cq(cq, 1, &wc) > 0) { |
908 | if (wc.status) { | 927 | if (wc.status) { |
909 | shost_printk(KERN_ERR, target->scsi_host, | 928 | shost_printk(KERN_ERR, target->scsi_host, |
910 | PFX "failed %s status %d\n", | 929 | PFX "failed receive status %d\n", |
911 | wc.wr_id & SRP_OP_RECV ? "receive" : "send", | ||
912 | wc.status); | 930 | wc.status); |
913 | target->qp_in_error = 1; | 931 | target->qp_in_error = 1; |
914 | break; | 932 | break; |
915 | } | 933 | } |
916 | 934 | ||
917 | if (wc.wr_id & SRP_OP_RECV) | 935 | srp_handle_recv(target, &wc); |
918 | srp_handle_recv(target, &wc); | 936 | } |
919 | else | 937 | } |
920 | ++target->tx_tail; | 938 | |
939 | static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | ||
940 | { | ||
941 | struct srp_target_port *target = target_ptr; | ||
942 | struct ib_wc wc; | ||
943 | |||
944 | while (ib_poll_cq(cq, 1, &wc) > 0) { | ||
945 | if (wc.status) { | ||
946 | shost_printk(KERN_ERR, target->scsi_host, | ||
947 | PFX "failed send status %d\n", | ||
948 | wc.status); | ||
949 | target->qp_in_error = 1; | ||
950 | break; | ||
951 | } | ||
952 | |||
953 | ++target->tx_tail; | ||
921 | } | 954 | } |
922 | } | 955 | } |
923 | 956 | ||
@@ -930,7 +963,7 @@ static int __srp_post_recv(struct srp_target_port *target) | |||
930 | int ret; | 963 | int ret; |
931 | 964 | ||
932 | next = target->rx_head & (SRP_RQ_SIZE - 1); | 965 | next = target->rx_head & (SRP_RQ_SIZE - 1); |
933 | wr.wr_id = next | SRP_OP_RECV; | 966 | wr.wr_id = next; |
934 | iu = target->rx_ring[next]; | 967 | iu = target->rx_ring[next]; |
935 | 968 | ||
936 | list.addr = iu->dma; | 969 | list.addr = iu->dma; |
@@ -970,6 +1003,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target, | |||
970 | { | 1003 | { |
971 | s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; | 1004 | s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2; |
972 | 1005 | ||
1006 | srp_send_completion(target->send_cq, target); | ||
1007 | |||
973 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) | 1008 | if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE) |
974 | return NULL; | 1009 | return NULL; |
975 | 1010 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h index e185b907fc12..5a80eac6fdaa 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.h +++ b/drivers/infiniband/ulp/srp/ib_srp.h | |||
@@ -60,7 +60,6 @@ enum { | |||
60 | SRP_RQ_SHIFT = 6, | 60 | SRP_RQ_SHIFT = 6, |
61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, | 61 | SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT, |
62 | SRP_SQ_SIZE = SRP_RQ_SIZE - 1, | 62 | SRP_SQ_SIZE = SRP_RQ_SIZE - 1, |
63 | SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE, | ||
64 | 63 | ||
65 | SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), | 64 | SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1), |
66 | 65 | ||
@@ -69,8 +68,6 @@ enum { | |||
69 | SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 | 68 | SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4 |
70 | }; | 69 | }; |
71 | 70 | ||
72 | #define SRP_OP_RECV (1 << 31) | ||
73 | |||
74 | enum srp_target_state { | 71 | enum srp_target_state { |
75 | SRP_TARGET_LIVE, | 72 | SRP_TARGET_LIVE, |
76 | SRP_TARGET_CONNECTING, | 73 | SRP_TARGET_CONNECTING, |
@@ -133,7 +130,8 @@ struct srp_target_port { | |||
133 | int path_query_id; | 130 | int path_query_id; |
134 | 131 | ||
135 | struct ib_cm_id *cm_id; | 132 | struct ib_cm_id *cm_id; |
136 | struct ib_cq *cq; | 133 | struct ib_cq *recv_cq; |
134 | struct ib_cq *send_cq; | ||
137 | struct ib_qp *qp; | 135 | struct ib_qp *qp; |
138 | 136 | ||
139 | int max_ti_iu_len; | 137 | int max_ti_iu_len; |