aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx5/gsi.c156
1 files changed, 150 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx5/gsi.c b/drivers/infiniband/hw/mlx5/gsi.c
index 1648f539c836..8d040626abb2 100644
--- a/drivers/infiniband/hw/mlx5/gsi.c
+++ b/drivers/infiniband/hw/mlx5/gsi.c
@@ -32,6 +32,13 @@
32 32
33#include "mlx5_ib.h" 33#include "mlx5_ib.h"
34 34
35struct mlx5_ib_gsi_wr {
36 struct ib_cqe cqe;
37 struct ib_wc wc;
38 int send_flags;
39 bool completed:1;
40};
41
35struct mlx5_ib_gsi_qp { 42struct mlx5_ib_gsi_qp {
36 struct ib_qp ibqp; 43 struct ib_qp ibqp;
37 struct ib_qp *rx_qp; 44 struct ib_qp *rx_qp;
@@ -40,9 +47,13 @@ struct mlx5_ib_gsi_qp {
40 enum ib_sig_type sq_sig_type; 47 enum ib_sig_type sq_sig_type;
41 /* Serialize qp state modifications */ 48 /* Serialize qp state modifications */
42 struct mutex mutex; 49 struct mutex mutex;
50 struct ib_cq *cq;
51 struct mlx5_ib_gsi_wr *outstanding_wrs;
52 u32 outstanding_pi, outstanding_ci;
43 int num_qps; 53 int num_qps;
44 /* Protects access to the tx_qps. Post send operations synchronize 54 /* Protects access to the tx_qps. Post send operations synchronize
45 * with tx_qp creation in setup_qp(). 55 * with tx_qp creation in setup_qp(). Also protects the
56 * outstanding_wrs array and indices.
46 */ 57 */
47 spinlock_t lock; 58 spinlock_t lock;
48 struct ib_qp **tx_qps; 59 struct ib_qp **tx_qps;
@@ -58,6 +69,57 @@ static bool mlx5_ib_deth_sqpn_cap(struct mlx5_ib_dev *dev)
58 return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn); 69 return MLX5_CAP_GEN(dev->mdev, set_deth_sqpn);
59} 70}
60 71
72static u32 next_outstanding(struct mlx5_ib_gsi_qp *gsi, u32 index)
73{
74 return ++index % gsi->cap.max_send_wr;
75}
76
77#define for_each_outstanding_wr(gsi, index) \
78 for (index = gsi->outstanding_ci; index != gsi->outstanding_pi; \
79 index = next_outstanding(gsi, index))
80
81/* Call with gsi->lock locked */
82static void generate_completions(struct mlx5_ib_gsi_qp *gsi)
83{
84 struct ib_cq *gsi_cq = gsi->ibqp.send_cq;
85 struct mlx5_ib_gsi_wr *wr;
86 u32 index;
87
88 for_each_outstanding_wr(gsi, index) {
89 wr = &gsi->outstanding_wrs[index];
90
91 if (!wr->completed)
92 break;
93
94 if (gsi->sq_sig_type == IB_SIGNAL_ALL_WR ||
95 wr->send_flags & IB_SEND_SIGNALED)
96 WARN_ON_ONCE(mlx5_ib_generate_wc(gsi_cq, &wr->wc));
97
98 wr->completed = false;
99 }
100
101 gsi->outstanding_ci = index;
102}
103
104static void handle_single_completion(struct ib_cq *cq, struct ib_wc *wc)
105{
106 struct mlx5_ib_gsi_qp *gsi = cq->cq_context;
107 struct mlx5_ib_gsi_wr *wr =
108 container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
109 u64 wr_id;
110 unsigned long flags;
111
112 spin_lock_irqsave(&gsi->lock, flags);
113 wr->completed = true;
114 wr_id = wr->wc.wr_id;
115 wr->wc = *wc;
116 wr->wc.wr_id = wr_id;
117 wr->wc.qp = &gsi->ibqp;
118
119 generate_completions(gsi);
120 spin_unlock_irqrestore(&gsi->lock, flags);
121}
122
61struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd, 123struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
62 struct ib_qp_init_attr *init_attr) 124 struct ib_qp_init_attr *init_attr)
63{ 125{
@@ -88,6 +150,14 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
88 goto err_free; 150 goto err_free;
89 } 151 }
90 152
153 gsi->outstanding_wrs = kcalloc(init_attr->cap.max_send_wr,
154 sizeof(*gsi->outstanding_wrs),
155 GFP_KERNEL);
156 if (!gsi->outstanding_wrs) {
157 ret = -ENOMEM;
158 goto err_free_tx;
159 }
160
91 mutex_init(&gsi->mutex); 161 mutex_init(&gsi->mutex);
92 162
93 mutex_lock(&dev->devr.mutex); 163 mutex_lock(&dev->devr.mutex);
@@ -96,7 +166,7 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
96 mlx5_ib_warn(dev, "GSI QP already exists on port %d\n", 166 mlx5_ib_warn(dev, "GSI QP already exists on port %d\n",
97 port_num); 167 port_num);
98 ret = -EBUSY; 168 ret = -EBUSY;
99 goto err_free_tx; 169 goto err_free_wrs;
100 } 170 }
101 gsi->num_qps = num_qps; 171 gsi->num_qps = num_qps;
102 spin_lock_init(&gsi->lock); 172 spin_lock_init(&gsi->lock);
@@ -106,13 +176,23 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
106 gsi->ibqp.qp_num = 1; 176 gsi->ibqp.qp_num = 1;
107 gsi->port_num = port_num; 177 gsi->port_num = port_num;
108 178
179 gsi->cq = ib_alloc_cq(pd->device, gsi, init_attr->cap.max_send_wr, 0,
180 IB_POLL_SOFTIRQ);
181 if (IS_ERR(gsi->cq)) {
182 mlx5_ib_warn(dev, "unable to create send CQ for GSI QP. error %ld\n",
183 PTR_ERR(gsi->cq));
184 ret = PTR_ERR(gsi->cq);
185 goto err_free_wrs;
186 }
187
109 hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI; 188 hw_init_attr.qp_type = MLX5_IB_QPT_HW_GSI;
189 hw_init_attr.send_cq = gsi->cq;
110 gsi->rx_qp = ib_create_qp(pd, &hw_init_attr); 190 gsi->rx_qp = ib_create_qp(pd, &hw_init_attr);
111 if (IS_ERR(gsi->rx_qp)) { 191 if (IS_ERR(gsi->rx_qp)) {
112 mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n", 192 mlx5_ib_warn(dev, "unable to create hardware GSI QP. error %ld\n",
113 PTR_ERR(gsi->rx_qp)); 193 PTR_ERR(gsi->rx_qp));
114 ret = PTR_ERR(gsi->rx_qp); 194 ret = PTR_ERR(gsi->rx_qp);
115 goto err_free_tx; 195 goto err_destroy_cq;
116 } 196 }
117 197
118 dev->devr.ports[init_attr->port_num - 1].gsi = gsi; 198 dev->devr.ports[init_attr->port_num - 1].gsi = gsi;
@@ -121,8 +201,12 @@ struct ib_qp *mlx5_ib_gsi_create_qp(struct ib_pd *pd,
121 201
122 return &gsi->ibqp; 202 return &gsi->ibqp;
123 203
124err_free_tx: 204err_destroy_cq:
205 ib_free_cq(gsi->cq);
206err_free_wrs:
125 mutex_unlock(&dev->devr.mutex); 207 mutex_unlock(&dev->devr.mutex);
208 kfree(gsi->outstanding_wrs);
209err_free_tx:
126 kfree(gsi->tx_qps); 210 kfree(gsi->tx_qps);
127err_free: 211err_free:
128 kfree(gsi); 212 kfree(gsi);
@@ -158,6 +242,9 @@ int mlx5_ib_gsi_destroy_qp(struct ib_qp *qp)
158 gsi->tx_qps[qp_index] = NULL; 242 gsi->tx_qps[qp_index] = NULL;
159 } 243 }
160 244
245 ib_free_cq(gsi->cq);
246
247 kfree(gsi->outstanding_wrs);
161 kfree(gsi->tx_qps); 248 kfree(gsi->tx_qps);
162 kfree(gsi); 249 kfree(gsi);
163 250
@@ -170,7 +257,7 @@ static struct ib_qp *create_gsi_ud_qp(struct mlx5_ib_gsi_qp *gsi)
170 struct ib_qp_init_attr init_attr = { 257 struct ib_qp_init_attr init_attr = {
171 .event_handler = gsi->rx_qp->event_handler, 258 .event_handler = gsi->rx_qp->event_handler,
172 .qp_context = gsi->rx_qp->qp_context, 259 .qp_context = gsi->rx_qp->qp_context,
173 .send_cq = gsi->rx_qp->send_cq, 260 .send_cq = gsi->cq,
174 .recv_cq = gsi->rx_qp->recv_cq, 261 .recv_cq = gsi->rx_qp->recv_cq,
175 .cap = { 262 .cap = {
176 .max_send_wr = gsi->cap.max_send_wr, 263 .max_send_wr = gsi->cap.max_send_wr,
@@ -326,12 +413,69 @@ int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
326 return ret; 413 return ret;
327} 414}
328 415
416/* Call with gsi->lock locked */
417static int mlx5_ib_add_outstanding_wr(struct mlx5_ib_gsi_qp *gsi,
418 struct ib_ud_wr *wr, struct ib_wc *wc)
419{
420 struct mlx5_ib_dev *dev = to_mdev(gsi->rx_qp->device);
421 struct mlx5_ib_gsi_wr *gsi_wr;
422
423 if (gsi->outstanding_pi == gsi->outstanding_ci + gsi->cap.max_send_wr) {
424 mlx5_ib_warn(dev, "no available GSI work request.\n");
425 return -ENOMEM;
426 }
427
428 gsi_wr = &gsi->outstanding_wrs[gsi->outstanding_pi];
429 gsi->outstanding_pi = next_outstanding(gsi, gsi->outstanding_pi);
430
431 if (!wc) {
432 memset(&gsi_wr->wc, 0, sizeof(gsi_wr->wc));
433 gsi_wr->wc.pkey_index = wr->pkey_index;
434 gsi_wr->wc.wr_id = wr->wr.wr_id;
435 } else {
436 gsi_wr->wc = *wc;
437 gsi_wr->completed = true;
438 }
439
440 gsi_wr->cqe.done = &handle_single_completion;
441 wr->wr.wr_cqe = &gsi_wr->cqe;
442
443 return 0;
444}
445
329int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr, 446int mlx5_ib_gsi_post_send(struct ib_qp *qp, struct ib_send_wr *wr,
330 struct ib_send_wr **bad_wr) 447 struct ib_send_wr **bad_wr)
331{ 448{
332 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp); 449 struct mlx5_ib_gsi_qp *gsi = gsi_qp(qp);
450 unsigned long flags;
451 int ret;
452
453 for (; wr; wr = wr->next) {
454 struct ib_ud_wr cur_wr = *ud_wr(wr);
455
456 cur_wr.wr.next = NULL;
333 457
334 return ib_post_send(gsi->rx_qp, wr, bad_wr); 458 spin_lock_irqsave(&gsi->lock, flags);
459 ret = mlx5_ib_add_outstanding_wr(gsi, &cur_wr, NULL);
460 if (ret)
461 goto err;
462
463 ret = ib_post_send(gsi->rx_qp, &cur_wr.wr, bad_wr);
464 if (ret) {
465 /* Undo the effect of adding the outstanding wr */
466 gsi->outstanding_pi = (gsi->outstanding_pi - 1) %
467 gsi->cap.max_send_wr;
468 goto err;
469 }
470 spin_unlock_irqrestore(&gsi->lock, flags);
471 }
472
473 return 0;
474
475err:
476 spin_unlock_irqrestore(&gsi->lock, flags);
477 *bad_wr = wr;
478 return ret;
335} 479}
336 480
337int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr, 481int mlx5_ib_gsi_post_recv(struct ib_qp *qp, struct ib_recv_wr *wr,