aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c13
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h13
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c76
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c42
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c14
-rw-r--r--drivers/infiniband/hw/mlx5/user.h2
6 files changed, 106 insertions, 54 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 62bb6b49dc1d..8ae4f896cb41 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/kref.h> 33#include <linux/kref.h>
34#include <rdma/ib_umem.h> 34#include <rdma/ib_umem.h>
35#include <rdma/ib_user_verbs.h>
35#include "mlx5_ib.h" 36#include "mlx5_ib.h"
36#include "user.h" 37#include "user.h"
37 38
@@ -602,14 +603,24 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
602 int *cqe_size, int *index, int *inlen) 603 int *cqe_size, int *index, int *inlen)
603{ 604{
604 struct mlx5_ib_create_cq ucmd; 605 struct mlx5_ib_create_cq ucmd;
606 size_t ucmdlen;
605 int page_shift; 607 int page_shift;
606 int npages; 608 int npages;
607 int ncont; 609 int ncont;
608 int err; 610 int err;
609 611
610 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) 612 ucmdlen =
613 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
614 sizeof(ucmd)) ? (sizeof(ucmd) -
615 sizeof(ucmd.reserved)) : sizeof(ucmd);
616
617 if (ib_copy_from_udata(&ucmd, udata, ucmdlen))
611 return -EFAULT; 618 return -EFAULT;
612 619
620 if (ucmdlen == sizeof(ucmd) &&
621 ucmd.reserved != 0)
622 return -EINVAL;
623
613 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128) 624 if (ucmd.cqe_size != 64 && ucmd.cqe_size != 128)
614 return -EINVAL; 625 return -EINVAL;
615 626
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 50541586e0a6..f2ccf1a5a291 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -264,8 +264,6 @@ struct mlx5_ib_mr {
264 __be64 *pas; 264 __be64 *pas;
265 dma_addr_t dma; 265 dma_addr_t dma;
266 int npages; 266 int npages;
267 struct completion done;
268 enum ib_wc_status status;
269 struct mlx5_ib_dev *dev; 267 struct mlx5_ib_dev *dev;
270 struct mlx5_create_mkey_mbox_out out; 268 struct mlx5_create_mkey_mbox_out out;
271 struct mlx5_core_sig_ctx *sig; 269 struct mlx5_core_sig_ctx *sig;
@@ -277,6 +275,17 @@ struct mlx5_ib_fast_reg_page_list {
277 dma_addr_t map; 275 dma_addr_t map;
278}; 276};
279 277
278struct mlx5_ib_umr_context {
279 enum ib_wc_status status;
280 struct completion done;
281};
282
283static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
284{
285 context->status = -1;
286 init_completion(&context->done);
287}
288
280struct umr_common { 289struct umr_common {
281 struct ib_pd *pd; 290 struct ib_pd *pd;
282 struct ib_cq *cq; 291 struct ib_cq *cq;
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 81392b26d078..afa873bd028e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -73,6 +73,8 @@ static void reg_mr_callback(int status, void *context)
73 struct mlx5_cache_ent *ent = &cache->ent[c]; 73 struct mlx5_cache_ent *ent = &cache->ent[c];
74 u8 key; 74 u8 key;
75 unsigned long flags; 75 unsigned long flags;
76 struct mlx5_mr_table *table = &dev->mdev.priv.mr_table;
77 int err;
76 78
77 spin_lock_irqsave(&ent->lock, flags); 79 spin_lock_irqsave(&ent->lock, flags);
78 ent->pending--; 80 ent->pending--;
@@ -107,6 +109,13 @@ static void reg_mr_callback(int status, void *context)
107 ent->cur++; 109 ent->cur++;
108 ent->size++; 110 ent->size++;
109 spin_unlock_irqrestore(&ent->lock, flags); 111 spin_unlock_irqrestore(&ent->lock, flags);
112
113 write_lock_irqsave(&table->lock, flags);
114 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmr.key),
115 &mr->mmr);
116 if (err)
117 pr_err("Error inserting to mr tree. 0x%x\n", -err);
118 write_unlock_irqrestore(&table->lock, flags);
110} 119}
111 120
112static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 121static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
@@ -699,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
699 708
700void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) 709void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
701{ 710{
702 struct mlx5_ib_mr *mr; 711 struct mlx5_ib_umr_context *context;
703 struct ib_wc wc; 712 struct ib_wc wc;
704 int err; 713 int err;
705 714
@@ -712,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
712 if (err == 0) 721 if (err == 0)
713 break; 722 break;
714 723
715 mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id; 724 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
716 mr->status = wc.status; 725 context->status = wc.status;
717 complete(&mr->done); 726 complete(&context->done);
718 } 727 }
719 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 728 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
720} 729}
@@ -726,11 +735,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
726 struct mlx5_ib_dev *dev = to_mdev(pd->device); 735 struct mlx5_ib_dev *dev = to_mdev(pd->device);
727 struct device *ddev = dev->ib_dev.dma_device; 736 struct device *ddev = dev->ib_dev.dma_device;
728 struct umr_common *umrc = &dev->umrc; 737 struct umr_common *umrc = &dev->umrc;
738 struct mlx5_ib_umr_context umr_context;
729 struct ib_send_wr wr, *bad; 739 struct ib_send_wr wr, *bad;
730 struct mlx5_ib_mr *mr; 740 struct mlx5_ib_mr *mr;
731 struct ib_sge sg; 741 struct ib_sge sg;
732 int size = sizeof(u64) * npages; 742 int size = sizeof(u64) * npages;
733 int err; 743 int err = 0;
734 int i; 744 int i;
735 745
736 for (i = 0; i < 1; i++) { 746 for (i = 0; i < 1; i++) {
@@ -751,7 +761,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
751 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); 761 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
752 if (!mr->pas) { 762 if (!mr->pas) {
753 err = -ENOMEM; 763 err = -ENOMEM;
754 goto error; 764 goto free_mr;
755 } 765 }
756 766
757 mlx5_ib_populate_pas(dev, umem, page_shift, 767 mlx5_ib_populate_pas(dev, umem, page_shift,
@@ -760,44 +770,46 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
760 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size, 770 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
761 DMA_TO_DEVICE); 771 DMA_TO_DEVICE);
762 if (dma_mapping_error(ddev, mr->dma)) { 772 if (dma_mapping_error(ddev, mr->dma)) {
763 kfree(mr->pas);
764 err = -ENOMEM; 773 err = -ENOMEM;
765 goto error; 774 goto free_pas;
766 } 775 }
767 776
768 memset(&wr, 0, sizeof(wr)); 777 memset(&wr, 0, sizeof(wr));
769 wr.wr_id = (u64)(unsigned long)mr; 778 wr.wr_id = (u64)(unsigned long)&umr_context;
770 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); 779 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
771 780
772 /* We serialize polls so one process does not kidnap another's 781 mlx5_ib_init_umr_context(&umr_context);
773 * completion. This is not a problem since wr is completed in
774 * around 1 usec
775 */
776 down(&umrc->sem); 782 down(&umrc->sem);
777 init_completion(&mr->done);
778 err = ib_post_send(umrc->qp, &wr, &bad); 783 err = ib_post_send(umrc->qp, &wr, &bad);
779 if (err) { 784 if (err) {
780 mlx5_ib_warn(dev, "post send failed, err %d\n", err); 785 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
781 up(&umrc->sem); 786 goto unmap_dma;
782 goto error; 787 } else {
788 wait_for_completion(&umr_context.done);
789 if (umr_context.status != IB_WC_SUCCESS) {
790 mlx5_ib_warn(dev, "reg umr failed\n");
791 err = -EFAULT;
792 }
783 } 793 }
784 wait_for_completion(&mr->done);
785 up(&umrc->sem);
786 794
795 mr->mmr.iova = virt_addr;
796 mr->mmr.size = len;
797 mr->mmr.pd = to_mpd(pd)->pdn;
798
799unmap_dma:
800 up(&umrc->sem);
787 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); 801 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
802
803free_pas:
788 kfree(mr->pas); 804 kfree(mr->pas);
789 805
790 if (mr->status != IB_WC_SUCCESS) { 806free_mr:
791 mlx5_ib_warn(dev, "reg umr failed\n"); 807 if (err) {
792 err = -EFAULT; 808 free_cached_mr(dev, mr);
793 goto error; 809 return ERR_PTR(err);
794 } 810 }
795 811
796 return mr; 812 return mr;
797
798error:
799 free_cached_mr(dev, mr);
800 return ERR_PTR(err);
801} 813}
802 814
803static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, 815static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
@@ -926,24 +938,26 @@ error:
926static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) 938static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
927{ 939{
928 struct umr_common *umrc = &dev->umrc; 940 struct umr_common *umrc = &dev->umrc;
941 struct mlx5_ib_umr_context umr_context;
929 struct ib_send_wr wr, *bad; 942 struct ib_send_wr wr, *bad;
930 int err; 943 int err;
931 944
932 memset(&wr, 0, sizeof(wr)); 945 memset(&wr, 0, sizeof(wr));
933 wr.wr_id = (u64)(unsigned long)mr; 946 wr.wr_id = (u64)(unsigned long)&umr_context;
934 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); 947 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
935 948
949 mlx5_ib_init_umr_context(&umr_context);
936 down(&umrc->sem); 950 down(&umrc->sem);
937 init_completion(&mr->done);
938 err = ib_post_send(umrc->qp, &wr, &bad); 951 err = ib_post_send(umrc->qp, &wr, &bad);
939 if (err) { 952 if (err) {
940 up(&umrc->sem); 953 up(&umrc->sem);
941 mlx5_ib_dbg(dev, "err %d\n", err); 954 mlx5_ib_dbg(dev, "err %d\n", err);
942 goto error; 955 goto error;
956 } else {
957 wait_for_completion(&umr_context.done);
958 up(&umrc->sem);
943 } 959 }
944 wait_for_completion(&mr->done); 960 if (umr_context.status != IB_WC_SUCCESS) {
945 up(&umrc->sem);
946 if (mr->status != IB_WC_SUCCESS) {
947 mlx5_ib_warn(dev, "unreg umr failed\n"); 961 mlx5_ib_warn(dev, "unreg umr failed\n");
948 err = -EFAULT; 962 err = -EFAULT;
949 goto error; 963 goto error;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index dc930ed21eca..d13ddf1c0033 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -574,6 +574,10 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
574 uar_index = uuarn_to_uar_index(&context->uuari, uuarn); 574 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
575 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index); 575 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
576 576
577 qp->rq.offset = 0;
578 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
579 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
580
577 err = set_user_buf_size(dev, qp, &ucmd); 581 err = set_user_buf_size(dev, qp, &ucmd);
578 if (err) 582 if (err)
579 goto err_uuar; 583 goto err_uuar;
@@ -2078,6 +2082,7 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
2078 struct ib_sig_domain *wire = &sig_attrs->wire; 2082 struct ib_sig_domain *wire = &sig_attrs->wire;
2079 int ret, selector; 2083 int ret, selector;
2080 2084
2085 memset(bsf, 0, sizeof(*bsf));
2081 switch (sig_attrs->mem.sig_type) { 2086 switch (sig_attrs->mem.sig_type) {
2082 case IB_SIG_TYPE_T10_DIF: 2087 case IB_SIG_TYPE_T10_DIF:
2083 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF) 2088 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
@@ -2090,9 +2095,11 @@ static int mlx5_set_bsf(struct ib_mr *sig_mr,
2090 /* Same block structure */ 2095 /* Same block structure */
2091 basic->bsf_size_sbs = 1 << 4; 2096 basic->bsf_size_sbs = 1 << 4;
2092 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type) 2097 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2093 basic->wire.copy_byte_mask = 0xff; 2098 basic->wire.copy_byte_mask |= 0xc0;
2094 else 2099 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2095 basic->wire.copy_byte_mask = 0x3f; 2100 basic->wire.copy_byte_mask |= 0x30;
2101 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2102 basic->wire.copy_byte_mask |= 0x0f;
2096 } else 2103 } else
2097 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval); 2104 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2098 2105
@@ -2131,9 +2138,13 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2131 int ret; 2138 int ret;
2132 int wqe_size; 2139 int wqe_size;
2133 2140
2134 if (!wr->wr.sig_handover.prot) { 2141 if (!wr->wr.sig_handover.prot ||
2142 (data_key == wr->wr.sig_handover.prot->lkey &&
2143 data_va == wr->wr.sig_handover.prot->addr &&
2144 data_len == wr->wr.sig_handover.prot->length)) {
2135 /** 2145 /**
2136 * Source domain doesn't contain signature information 2146 * Source domain doesn't contain signature information
2147 * or data and protection are interleaved in memory.
2137 * So need construct: 2148 * So need construct:
2138 * ------------------ 2149 * ------------------
2139 * | data_klm | 2150 * | data_klm |
@@ -2187,23 +2198,13 @@ static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2187 data_sentry->bcount = cpu_to_be16(block_size); 2198 data_sentry->bcount = cpu_to_be16(block_size);
2188 data_sentry->key = cpu_to_be32(data_key); 2199 data_sentry->key = cpu_to_be32(data_key);
2189 data_sentry->va = cpu_to_be64(data_va); 2200 data_sentry->va = cpu_to_be64(data_va);
2201 data_sentry->stride = cpu_to_be16(block_size);
2202
2190 prot_sentry->bcount = cpu_to_be16(prot_size); 2203 prot_sentry->bcount = cpu_to_be16(prot_size);
2191 prot_sentry->key = cpu_to_be32(prot_key); 2204 prot_sentry->key = cpu_to_be32(prot_key);
2205 prot_sentry->va = cpu_to_be64(prot_va);
2206 prot_sentry->stride = cpu_to_be16(prot_size);
2192 2207
2193 if (prot_key == data_key && prot_va == data_va) {
2194 /**
2195 * The data and protection are interleaved
2196 * in a single memory region
2197 **/
2198 prot_sentry->va = cpu_to_be64(data_va + block_size);
2199 prot_sentry->stride = cpu_to_be16(block_size + prot_size);
2200 data_sentry->stride = prot_sentry->stride;
2201 } else {
2202 /* The data and protection are two different buffers */
2203 prot_sentry->va = cpu_to_be64(prot_va);
2204 data_sentry->stride = cpu_to_be16(block_size);
2205 prot_sentry->stride = cpu_to_be16(prot_size);
2206 }
2207 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) + 2208 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2208 sizeof(*prot_sentry), 64); 2209 sizeof(*prot_sentry), 64);
2209 } 2210 }
@@ -2275,7 +2276,10 @@ static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2275 2276
2276 /* length of the protected region, data + protection */ 2277 /* length of the protected region, data + protection */
2277 region_len = wr->sg_list->length; 2278 region_len = wr->sg_list->length;
2278 if (wr->wr.sig_handover.prot) 2279 if (wr->wr.sig_handover.prot &&
2280 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
2281 wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
2282 wr->wr.sig_handover.prot->length != wr->sg_list->length))
2279 region_len += wr->wr.sig_handover.prot->length; 2283 region_len += wr->wr.sig_handover.prot->length;
2280 2284
2281 /** 2285 /**
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 210b3eaf188a..384af6dec5eb 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -35,6 +35,7 @@
35#include <linux/mlx5/srq.h> 35#include <linux/mlx5/srq.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <rdma/ib_umem.h> 37#include <rdma/ib_umem.h>
38#include <rdma/ib_user_verbs.h>
38 39
39#include "mlx5_ib.h" 40#include "mlx5_ib.h"
40#include "user.h" 41#include "user.h"
@@ -78,16 +79,27 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
78{ 79{
79 struct mlx5_ib_dev *dev = to_mdev(pd->device); 80 struct mlx5_ib_dev *dev = to_mdev(pd->device);
80 struct mlx5_ib_create_srq ucmd; 81 struct mlx5_ib_create_srq ucmd;
82 size_t ucmdlen;
81 int err; 83 int err;
82 int npages; 84 int npages;
83 int page_shift; 85 int page_shift;
84 int ncont; 86 int ncont;
85 u32 offset; 87 u32 offset;
86 88
87 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) { 89 ucmdlen =
90 (udata->inlen - sizeof(struct ib_uverbs_cmd_hdr) <
91 sizeof(ucmd)) ? (sizeof(ucmd) -
92 sizeof(ucmd.reserved)) : sizeof(ucmd);
93
94 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
88 mlx5_ib_dbg(dev, "failed copy udata\n"); 95 mlx5_ib_dbg(dev, "failed copy udata\n");
89 return -EFAULT; 96 return -EFAULT;
90 } 97 }
98
99 if (ucmdlen == sizeof(ucmd) &&
100 ucmd.reserved != 0)
101 return -EINVAL;
102
91 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 103 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
92 104
93 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size, 105 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, buf_size,
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 0f4f8e42a17f..d0ba264ac1ed 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -91,6 +91,7 @@ struct mlx5_ib_create_cq {
91 __u64 buf_addr; 91 __u64 buf_addr;
92 __u64 db_addr; 92 __u64 db_addr;
93 __u32 cqe_size; 93 __u32 cqe_size;
94 __u32 reserved; /* explicit padding (optional on i386) */
94}; 95};
95 96
96struct mlx5_ib_create_cq_resp { 97struct mlx5_ib_create_cq_resp {
@@ -109,6 +110,7 @@ struct mlx5_ib_create_srq {
109 __u64 buf_addr; 110 __u64 buf_addr;
110 __u64 db_addr; 111 __u64 db_addr;
111 __u32 flags; 112 __u32 flags;
113 __u32 reserved; /* explicit padding (optional on i386) */
112}; 114};
113 115
114struct mlx5_ib_create_srq_resp { 116struct mlx5_ib_create_srq_resp {