aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c16
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c70
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c80
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c6
10 files changed, 87 insertions, 103 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index d5d1929753e4..cedda25232be 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -141,7 +141,7 @@ static const char *to_qp_state_str(int state)
141 return "C2_QP_STATE_ERROR"; 141 return "C2_QP_STATE_ERROR";
142 default: 142 default:
143 return "<invalid QP state>"; 143 return "<invalid QP state>";
144 }; 144 }
145} 145}
146 146
147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) 147void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index d6c5a73becf4..f0612645de99 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1691,9 +1691,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow; 1691 ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow; 1692 ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
1693 1693
1694#ifdef CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING
1694 ibdev->ib_dev.uverbs_cmd_mask |= 1695 ibdev->ib_dev.uverbs_cmd_mask |=
1695 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) | 1696 (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
1696 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW); 1697 (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
1698#endif /* CONFIG_INFINIBAND_EXPERIMENTAL_UVERBS_FLOW_STEERING */
1697 } 1699 }
1698 1700
1699 mlx4_ib_alloc_eqs(dev, ibdev); 1701 mlx4_ib_alloc_eqs(dev, ibdev);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 3f831de9a4d8..b1a6cb3a2809 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -164,6 +164,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
164static int alloc_comp_eqs(struct mlx5_ib_dev *dev) 164static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
165{ 165{
166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; 166 struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
167 char name[MLX5_MAX_EQ_NAME];
167 struct mlx5_eq *eq, *n; 168 struct mlx5_eq *eq, *n;
168 int ncomp_vec; 169 int ncomp_vec;
169 int nent; 170 int nent;
@@ -180,11 +181,10 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
180 goto clean; 181 goto clean;
181 } 182 }
182 183
183 snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); 184 snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
184 err = mlx5_create_map_eq(&dev->mdev, eq, 185 err = mlx5_create_map_eq(&dev->mdev, eq,
185 i + MLX5_EQ_VEC_COMP_BASE, nent, 0, 186 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
186 eq->name, 187 name, &dev->mdev.priv.uuari.uars[0]);
187 &dev->mdev.priv.uuari.uars[0]);
188 if (err) { 188 if (err) {
189 kfree(eq); 189 kfree(eq);
190 goto clean; 190 goto clean;
@@ -301,9 +301,8 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
301 props->max_srq_sge = max_rq_sg - 1; 301 props->max_srq_sge = max_rq_sg - 1;
302 props->max_fast_reg_page_list_len = (unsigned int)-1; 302 props->max_fast_reg_page_list_len = (unsigned int)-1;
303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; 303 props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay;
304 props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? 304 props->atomic_cap = IB_ATOMIC_NONE;
305 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 305 props->masked_atomic_cap = IB_ATOMIC_NONE;
306 props->masked_atomic_cap = IB_ATOMIC_HCA;
307 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 306 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
308 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; 307 props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg;
309 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; 308 props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
@@ -1006,6 +1005,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1006 ibev.device = &ibdev->ib_dev; 1005 ibev.device = &ibdev->ib_dev;
1007 ibev.element.port_num = port; 1006 ibev.element.port_num = port;
1008 1007
1008 if (port < 1 || port > ibdev->num_ports) {
1009 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1010 return;
1011 }
1012
1009 if (ibdev->ib_active) 1013 if (ibdev->ib_active)
1010 ib_dispatch_event(&ibev); 1014 ib_dispatch_event(&ibev);
1011} 1015}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bd41df95b6f0..3453580b1eb2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,10 @@ enum {
42 DEF_CACHE_SIZE = 10, 42 DEF_CACHE_SIZE = 10,
43}; 43};
44 44
45enum {
46 MLX5_UMR_ALIGN = 2048
47};
48
45static __be64 *mr_align(__be64 *ptr, int align) 49static __be64 *mr_align(__be64 *ptr, int align)
46{ 50{
47 unsigned long mask = align - 1; 51 unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
61 65
62static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 66static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
63{ 67{
64 struct device *ddev = dev->ib_dev.dma_device;
65 struct mlx5_mr_cache *cache = &dev->cache; 68 struct mlx5_mr_cache *cache = &dev->cache;
66 struct mlx5_cache_ent *ent = &cache->ent[c]; 69 struct mlx5_cache_ent *ent = &cache->ent[c];
67 struct mlx5_create_mkey_mbox_in *in; 70 struct mlx5_create_mkey_mbox_in *in;
68 struct mlx5_ib_mr *mr; 71 struct mlx5_ib_mr *mr;
69 int npages = 1 << ent->order; 72 int npages = 1 << ent->order;
70 int size = sizeof(u64) * npages;
71 int err = 0; 73 int err = 0;
72 int i; 74 int i;
73 75
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
83 } 85 }
84 mr->order = ent->order; 86 mr->order = ent->order;
85 mr->umred = 1; 87 mr->umred = 1;
86 mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
87 if (!mr->pas) {
88 kfree(mr);
89 err = -ENOMEM;
90 goto out;
91 }
92 mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
93 DMA_TO_DEVICE);
94 if (dma_mapping_error(ddev, mr->dma)) {
95 kfree(mr->pas);
96 kfree(mr);
97 err = -ENOMEM;
98 goto out;
99 }
100
101 in->seg.status = 1 << 6; 88 in->seg.status = 1 << 6;
102 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); 89 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
103 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 90 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
108 sizeof(*in)); 95 sizeof(*in));
109 if (err) { 96 if (err) {
110 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 97 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
111 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
112 kfree(mr->pas);
113 kfree(mr); 98 kfree(mr);
114 goto out; 99 goto out;
115 } 100 }
@@ -129,11 +114,9 @@ out:
129 114
130static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) 115static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
131{ 116{
132 struct device *ddev = dev->ib_dev.dma_device;
133 struct mlx5_mr_cache *cache = &dev->cache; 117 struct mlx5_mr_cache *cache = &dev->cache;
134 struct mlx5_cache_ent *ent = &cache->ent[c]; 118 struct mlx5_cache_ent *ent = &cache->ent[c];
135 struct mlx5_ib_mr *mr; 119 struct mlx5_ib_mr *mr;
136 int size;
137 int err; 120 int err;
138 int i; 121 int i;
139 122
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
149 ent->size--; 132 ent->size--;
150 spin_unlock(&ent->lock); 133 spin_unlock(&ent->lock);
151 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 134 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
152 if (err) { 135 if (err)
153 mlx5_ib_warn(dev, "failed destroy mkey\n"); 136 mlx5_ib_warn(dev, "failed destroy mkey\n");
154 } else { 137 else
155 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
156 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
157 kfree(mr->pas);
158 kfree(mr); 138 kfree(mr);
159 }
160 } 139 }
161} 140}
162 141
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
408 387
409static void clean_keys(struct mlx5_ib_dev *dev, int c) 388static void clean_keys(struct mlx5_ib_dev *dev, int c)
410{ 389{
411 struct device *ddev = dev->ib_dev.dma_device;
412 struct mlx5_mr_cache *cache = &dev->cache; 390 struct mlx5_mr_cache *cache = &dev->cache;
413 struct mlx5_cache_ent *ent = &cache->ent[c]; 391 struct mlx5_cache_ent *ent = &cache->ent[c];
414 struct mlx5_ib_mr *mr; 392 struct mlx5_ib_mr *mr;
415 int size;
416 int err; 393 int err;
417 394
395 cancel_delayed_work(&ent->dwork);
418 while (1) { 396 while (1) {
419 spin_lock(&ent->lock); 397 spin_lock(&ent->lock);
420 if (list_empty(&ent->head)) { 398 if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
427 ent->size--; 405 ent->size--;
428 spin_unlock(&ent->lock); 406 spin_unlock(&ent->lock);
429 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 407 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
430 if (err) { 408 if (err)
431 mlx5_ib_warn(dev, "failed destroy mkey\n"); 409 mlx5_ib_warn(dev, "failed destroy mkey\n");
432 } else { 410 else
433 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
434 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
435 kfree(mr->pas);
436 kfree(mr); 411 kfree(mr);
437 }
438 } 412 }
439} 413}
440 414
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
540 int i; 514 int i;
541 515
542 dev->cache.stopped = 1; 516 dev->cache.stopped = 1;
543 destroy_workqueue(dev->cache.wq); 517 flush_workqueue(dev->cache.wq);
544 518
545 mlx5_mr_cache_debugfs_cleanup(dev); 519 mlx5_mr_cache_debugfs_cleanup(dev);
546 520
547 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 521 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
548 clean_keys(dev, i); 522 clean_keys(dev, i);
549 523
524 destroy_workqueue(dev->cache.wq);
525
550 return 0; 526 return 0;
551} 527}
552 528
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
675 int page_shift, int order, int access_flags) 651 int page_shift, int order, int access_flags)
676{ 652{
677 struct mlx5_ib_dev *dev = to_mdev(pd->device); 653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
654 struct device *ddev = dev->ib_dev.dma_device;
678 struct umr_common *umrc = &dev->umrc; 655 struct umr_common *umrc = &dev->umrc;
679 struct ib_send_wr wr, *bad; 656 struct ib_send_wr wr, *bad;
680 struct mlx5_ib_mr *mr; 657 struct mlx5_ib_mr *mr;
681 struct ib_sge sg; 658 struct ib_sge sg;
659 int size = sizeof(u64) * npages;
682 int err; 660 int err;
683 int i; 661 int i;
684 662
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
697 if (!mr) 675 if (!mr)
698 return ERR_PTR(-EAGAIN); 676 return ERR_PTR(-EAGAIN);
699 677
700 mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); 678 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
679 if (!mr->pas) {
680 err = -ENOMEM;
681 goto error;
682 }
683
684 mlx5_ib_populate_pas(dev, umem, page_shift,
685 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
686
687 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
688 DMA_TO_DEVICE);
689 if (dma_mapping_error(ddev, mr->dma)) {
690 kfree(mr->pas);
691 err = -ENOMEM;
692 goto error;
693 }
701 694
702 memset(&wr, 0, sizeof(wr)); 695 memset(&wr, 0, sizeof(wr));
703 wr.wr_id = (u64)(unsigned long)mr; 696 wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
718 wait_for_completion(&mr->done); 711 wait_for_completion(&mr->done);
719 up(&umrc->sem); 712 up(&umrc->sem);
720 713
714 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
715 kfree(mr->pas);
716
721 if (mr->status != IB_WC_SUCCESS) { 717 if (mr->status != IB_WC_SUCCESS) {
722 mlx5_ib_warn(dev, "reg umr failed\n"); 718 mlx5_ib_warn(dev, "reg umr failed\n");
723 err = -EFAULT; 719 err = -EFAULT;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 045f8cdbd303..5659ea880741 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -203,7 +203,7 @@ static int sq_overhead(enum ib_qp_type qp_type)
203 203
204 switch (qp_type) { 204 switch (qp_type) {
205 case IB_QPT_XRC_INI: 205 case IB_QPT_XRC_INI:
206 size = sizeof(struct mlx5_wqe_xrc_seg); 206 size += sizeof(struct mlx5_wqe_xrc_seg);
207 /* fall through */ 207 /* fall through */
208 case IB_QPT_RC: 208 case IB_QPT_RC:
209 size += sizeof(struct mlx5_wqe_ctrl_seg) + 209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
@@ -211,20 +211,23 @@ static int sq_overhead(enum ib_qp_type qp_type)
211 sizeof(struct mlx5_wqe_raddr_seg); 211 sizeof(struct mlx5_wqe_raddr_seg);
212 break; 212 break;
213 213
214 case IB_QPT_XRC_TGT:
215 return 0;
216
214 case IB_QPT_UC: 217 case IB_QPT_UC:
215 size = sizeof(struct mlx5_wqe_ctrl_seg) + 218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
216 sizeof(struct mlx5_wqe_raddr_seg); 219 sizeof(struct mlx5_wqe_raddr_seg);
217 break; 220 break;
218 221
219 case IB_QPT_UD: 222 case IB_QPT_UD:
220 case IB_QPT_SMI: 223 case IB_QPT_SMI:
221 case IB_QPT_GSI: 224 case IB_QPT_GSI:
222 size = sizeof(struct mlx5_wqe_ctrl_seg) + 225 size += sizeof(struct mlx5_wqe_ctrl_seg) +
223 sizeof(struct mlx5_wqe_datagram_seg); 226 sizeof(struct mlx5_wqe_datagram_seg);
224 break; 227 break;
225 228
226 case MLX5_IB_QPT_REG_UMR: 229 case MLX5_IB_QPT_REG_UMR:
227 size = sizeof(struct mlx5_wqe_ctrl_seg) + 230 size += sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 231 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
229 sizeof(struct mlx5_mkey_seg); 232 sizeof(struct mlx5_mkey_seg);
230 break; 233 break;
@@ -270,7 +273,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
270 return wqe_size; 273 return wqe_size;
271 274
272 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) { 275 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
273 mlx5_ib_dbg(dev, "\n"); 276 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
277 wqe_size, dev->mdev.caps.max_sq_desc_sz);
274 return -EINVAL; 278 return -EINVAL;
275 } 279 }
276 280
@@ -280,9 +284,15 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
280 284
281 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 285 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
282 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 286 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
287 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
288 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
289 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
290 return -ENOMEM;
291 }
283 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 292 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
284 qp->sq.max_gs = attr->cap.max_send_sge; 293 qp->sq.max_gs = attr->cap.max_send_sge;
285 qp->sq.max_post = 1 << ilog2(wq_size / wqe_size); 294 qp->sq.max_post = wq_size / wqe_size;
295 attr->cap.max_send_wr = qp->sq.max_post;
286 296
287 return wq_size; 297 return wq_size;
288} 298}
@@ -1280,6 +1290,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1280 MLX5_QP_OPTPAR_Q_KEY, 1290 MLX5_QP_OPTPAR_Q_KEY,
1281 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX | 1291 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1282 MLX5_QP_OPTPAR_Q_KEY, 1292 MLX5_QP_OPTPAR_Q_KEY,
1293 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1294 MLX5_QP_OPTPAR_RRE |
1295 MLX5_QP_OPTPAR_RAE |
1296 MLX5_QP_OPTPAR_RWE |
1297 MLX5_QP_OPTPAR_PKEY_INDEX,
1283 }, 1298 },
1284 }, 1299 },
1285 [MLX5_QP_STATE_RTR] = { 1300 [MLX5_QP_STATE_RTR] = {
@@ -1314,6 +1329,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
1314 [MLX5_QP_STATE_RTS] = { 1329 [MLX5_QP_STATE_RTS] = {
1315 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY, 1330 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1316 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY, 1331 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1332 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1333 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1334 MLX5_QP_OPTPAR_RWE |
1335 MLX5_QP_OPTPAR_RAE |
1336 MLX5_QP_OPTPAR_RRE,
1317 }, 1337 },
1318 }, 1338 },
1319}; 1339};
@@ -1651,29 +1671,6 @@ static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1651 rseg->reserved = 0; 1671 rseg->reserved = 0;
1652} 1672}
1653 1673
1654static void set_atomic_seg(struct mlx5_wqe_atomic_seg *aseg, struct ib_send_wr *wr)
1655{
1656 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1657 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1658 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1659 } else if (wr->opcode == IB_WR_MASKED_ATOMIC_FETCH_AND_ADD) {
1660 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1661 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1662 } else {
1663 aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
1664 aseg->compare = 0;
1665 }
1666}
1667
1668static void set_masked_atomic_seg(struct mlx5_wqe_masked_atomic_seg *aseg,
1669 struct ib_send_wr *wr)
1670{
1671 aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
1672 aseg->swap_add_mask = cpu_to_be64(wr->wr.atomic.swap_mask);
1673 aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
1674 aseg->compare_mask = cpu_to_be64(wr->wr.atomic.compare_add_mask);
1675}
1676
1677static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg, 1674static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1678 struct ib_send_wr *wr) 1675 struct ib_send_wr *wr)
1679{ 1676{
@@ -2063,28 +2060,11 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2063 2060
2064 case IB_WR_ATOMIC_CMP_AND_SWP: 2061 case IB_WR_ATOMIC_CMP_AND_SWP:
2065 case IB_WR_ATOMIC_FETCH_AND_ADD: 2062 case IB_WR_ATOMIC_FETCH_AND_ADD:
2066 set_raddr_seg(seg, wr->wr.atomic.remote_addr,
2067 wr->wr.atomic.rkey);
2068 seg += sizeof(struct mlx5_wqe_raddr_seg);
2069
2070 set_atomic_seg(seg, wr);
2071 seg += sizeof(struct mlx5_wqe_atomic_seg);
2072
2073 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2074 sizeof(struct mlx5_wqe_atomic_seg)) / 16;
2075 break;
2076
2077 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP: 2063 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2078 set_raddr_seg(seg, wr->wr.atomic.remote_addr, 2064 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2079 wr->wr.atomic.rkey); 2065 err = -ENOSYS;
2080 seg += sizeof(struct mlx5_wqe_raddr_seg); 2066 *bad_wr = wr;
2081 2067 goto out;
2082 set_masked_atomic_seg(seg, wr);
2083 seg += sizeof(struct mlx5_wqe_masked_atomic_seg);
2084
2085 size += (sizeof(struct mlx5_wqe_raddr_seg) +
2086 sizeof(struct mlx5_wqe_masked_atomic_seg)) / 16;
2087 break;
2088 2068
2089 case IB_WR_LOCAL_INV: 2069 case IB_WR_LOCAL_INV:
2090 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2070 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 84d297afd6a9..0aa478bc291a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -295,7 +295,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
295 mlx5_vfree(in); 295 mlx5_vfree(in);
296 if (err) { 296 if (err) {
297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 297 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
298 goto err_srq; 298 goto err_usr_kern_srq;
299 } 299 }
300 300
301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn); 301 mlx5_ib_dbg(dev, "create SRQ with srqn 0x%x\n", srq->msrq.srqn);
@@ -316,6 +316,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
316 316
317err_core: 317err_core:
318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq); 318 mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
319
320err_usr_kern_srq:
319 if (pd->uobject) 321 if (pd->uobject)
320 destroy_srq_user(pd, srq); 322 destroy_srq_user(pd, srq);
321 else 323 else
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 7c9d35f39d75..690201738993 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -357,7 +357,7 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n", 357 mthca_warn(dev, "Unhandled event %02x(%02x) on EQ %d\n",
358 eqe->type, eqe->subtype, eq->eqn); 358 eqe->type, eqe->subtype, eq->eqn);
359 break; 359 break;
360 }; 360 }
361 361
362 set_eqe_hw(eqe); 362 set_eqe_hw(eqe);
363 ++eq->cons_index; 363 ++eq->cons_index;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 4ed8235d2d36..50219ab2279d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -150,7 +150,7 @@ enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
150 return IB_QPS_SQE; 150 return IB_QPS_SQE;
151 case OCRDMA_QPS_ERR: 151 case OCRDMA_QPS_ERR:
152 return IB_QPS_ERR; 152 return IB_QPS_ERR;
153 }; 153 }
154 return IB_QPS_ERR; 154 return IB_QPS_ERR;
155} 155}
156 156
@@ -171,7 +171,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
171 return OCRDMA_QPS_SQE; 171 return OCRDMA_QPS_SQE;
172 case IB_QPS_ERR: 172 case IB_QPS_ERR:
173 return OCRDMA_QPS_ERR; 173 return OCRDMA_QPS_ERR;
174 }; 174 }
175 return OCRDMA_QPS_ERR; 175 return OCRDMA_QPS_ERR;
176} 176}
177 177
@@ -1982,7 +1982,7 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
1982 break; 1982 break;
1983 default: 1983 default:
1984 return -EINVAL; 1984 return -EINVAL;
1985 }; 1985 }
1986 1986
1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd)); 1987 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
1988 if (!cmd) 1988 if (!cmd)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 56e004940f18..0ce7674621ea 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -531,7 +531,7 @@ static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
531 case BE_DEV_DOWN: 531 case BE_DEV_DOWN:
532 ocrdma_close(dev); 532 ocrdma_close(dev);
533 break; 533 break;
534 }; 534 }
535} 535}
536 536
537static struct ocrdma_driver ocrdma_drv = { 537static struct ocrdma_driver ocrdma_drv = {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 6e982bb43c31..69f1d1221a6b 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -141,7 +141,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
141 /* Unsupported */ 141 /* Unsupported */
142 *ib_speed = IB_SPEED_SDR; 142 *ib_speed = IB_SPEED_SDR;
143 *ib_width = IB_WIDTH_1X; 143 *ib_width = IB_WIDTH_1X;
144 }; 144 }
145} 145}
146 146
147 147
@@ -2331,7 +2331,7 @@ static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2331 default: 2331 default:
2332 ibwc_status = IB_WC_GENERAL_ERR; 2332 ibwc_status = IB_WC_GENERAL_ERR;
2333 break; 2333 break;
2334 }; 2334 }
2335 return ibwc_status; 2335 return ibwc_status;
2336} 2336}
2337 2337
@@ -2370,7 +2370,7 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2370 pr_err("%s() invalid opcode received = 0x%x\n", 2370 pr_err("%s() invalid opcode received = 0x%x\n",
2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); 2371 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2372 break; 2372 break;
2373 }; 2373 }
2374} 2374}
2375 2375
2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp, 2376static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,