aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYonatan Cohen <yonatanc@mellanox.com>2018-01-02 09:08:06 -0500
committerSaeed Mahameed <saeedm@mellanox.com>2018-02-15 03:30:03 -0500
commit388ca8be00370db132464e27f745b8a0add19fcb (patch)
treec947d371b76ffea3a91b0b042295654f7751f3a5
parent3ec5693b17314b58977ba3c8d720d1f9cfef39f8 (diff)
IB/mlx5: Implement fragmented completion queue (CQ)
The current implementation of create CQ requires contiguous memory, such requirement is problematic once the memory is fragmented or the system is low in memory, it causes for failures in dma_zalloc_coherent(). This patch implements new scheme of fragmented CQ to overcome this issue by introducing new type: 'struct mlx5_frag_buf_ctrl' to allocate fragmented buffers, rather than contiguous ones. Base the Completion Queues (CQs) on this new fragmented buffer. It fixes following crashes: kworker/29:0: page allocation failure: order:6, mode:0x80d0 CPU: 29 PID: 8374 Comm: kworker/29:0 Tainted: G OE 3.10.0 Workqueue: ib_cm cm_work_handler [ib_cm] Call Trace: [<>] dump_stack+0x19/0x1b [<>] warn_alloc_failed+0x110/0x180 [<>] __alloc_pages_slowpath+0x6b7/0x725 [<>] __alloc_pages_nodemask+0x405/0x420 [<>] dma_generic_alloc_coherent+0x8f/0x140 [<>] x86_swiotlb_alloc_coherent+0x21/0x50 [<>] mlx5_dma_zalloc_coherent_node+0xad/0x110 [mlx5_core] [<>] ? mlx5_db_alloc_node+0x69/0x1b0 [mlx5_core] [<>] mlx5_buf_alloc_node+0x3e/0xa0 [mlx5_core] [<>] mlx5_buf_alloc+0x14/0x20 [mlx5_core] [<>] create_cq_kernel+0x90/0x1f0 [mlx5_ib] [<>] mlx5_ib_create_cq+0x3b0/0x4e0 [mlx5_ib] Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com> Reviewed-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c64
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h22
-rw-r--r--include/linux/mlx5/driver.h51
7 files changed, 124 insertions, 85 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5b974fb97611..c4c7b82f4ac1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -64,14 +64,9 @@ static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
64 } 64 }
65} 65}
66 66
67static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
68{
69 return mlx5_buf_offset(&buf->buf, n * size);
70}
71
72static void *get_cqe(struct mlx5_ib_cq *cq, int n) 67static void *get_cqe(struct mlx5_ib_cq *cq, int n)
73{ 68{
74 return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz); 69 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
75} 70}
76 71
77static u8 sw_ownership_bit(int n, int nent) 72static u8 sw_ownership_bit(int n, int nent)
@@ -403,7 +398,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
403 398
404static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf) 399static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
405{ 400{
406 mlx5_buf_free(dev->mdev, &buf->buf); 401 mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
407} 402}
408 403
409static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, 404static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -724,12 +719,25 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
724 return ret; 719 return ret;
725} 720}
726 721
727static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, 722static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
728 int nent, int cqe_size) 723 struct mlx5_ib_cq_buf *buf,
724 int nent,
725 int cqe_size)
729{ 726{
727 struct mlx5_frag_buf_ctrl *c = &buf->fbc;
728 struct mlx5_frag_buf *frag_buf = &c->frag_buf;
729 u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
730 int err; 730 int err;
731 731
732 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); 732 MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
733 MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
734
735 mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
736
737 err = mlx5_frag_buf_alloc_node(dev->mdev,
738 nent * cqe_size,
739 frag_buf,
740 dev->mdev->priv.numa_node);
733 if (err) 741 if (err)
734 return err; 742 return err;
735 743
@@ -862,14 +870,15 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
862 ib_umem_release(cq->buf.umem); 870 ib_umem_release(cq->buf.umem);
863} 871}
864 872
865static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf) 873static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
874 struct mlx5_ib_cq_buf *buf)
866{ 875{
867 int i; 876 int i;
868 void *cqe; 877 void *cqe;
869 struct mlx5_cqe64 *cqe64; 878 struct mlx5_cqe64 *cqe64;
870 879
871 for (i = 0; i < buf->nent; i++) { 880 for (i = 0; i < buf->nent; i++) {
872 cqe = get_cqe_from_buf(buf, i, buf->cqe_size); 881 cqe = get_cqe(cq, i);
873 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; 882 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
874 cqe64->op_own = MLX5_CQE_INVALID << 4; 883 cqe64->op_own = MLX5_CQE_INVALID << 4;
875 } 884 }
@@ -891,14 +900,15 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
891 cq->mcq.arm_db = cq->db.db + 1; 900 cq->mcq.arm_db = cq->db.db + 1;
892 cq->mcq.cqe_sz = cqe_size; 901 cq->mcq.cqe_sz = cqe_size;
893 902
894 err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size); 903 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
895 if (err) 904 if (err)
896 goto err_db; 905 goto err_db;
897 906
898 init_cq_buf(cq, &cq->buf); 907 init_cq_frag_buf(cq, &cq->buf);
899 908
900 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) + 909 *inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
901 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages; 910 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
911 cq->buf.fbc.frag_buf.npages;
902 *cqb = kvzalloc(*inlen, GFP_KERNEL); 912 *cqb = kvzalloc(*inlen, GFP_KERNEL);
903 if (!*cqb) { 913 if (!*cqb) {
904 err = -ENOMEM; 914 err = -ENOMEM;
@@ -906,11 +916,12 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
906 } 916 }
907 917
908 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas); 918 pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
909 mlx5_fill_page_array(&cq->buf.buf, pas); 919 mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
910 920
911 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context); 921 cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
912 MLX5_SET(cqc, cqc, log_page_size, 922 MLX5_SET(cqc, cqc, log_page_size,
913 cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); 923 cq->buf.fbc.frag_buf.page_shift -
924 MLX5_ADAPTER_PAGE_SHIFT);
914 925
915 *index = dev->mdev->priv.uar->index; 926 *index = dev->mdev->priv.uar->index;
916 927
@@ -1207,11 +1218,11 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1207 if (!cq->resize_buf) 1218 if (!cq->resize_buf)
1208 return -ENOMEM; 1219 return -ENOMEM;
1209 1220
1210 err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size); 1221 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
1211 if (err) 1222 if (err)
1212 goto ex; 1223 goto ex;
1213 1224
1214 init_cq_buf(cq, cq->resize_buf); 1225 init_cq_frag_buf(cq, cq->resize_buf);
1215 1226
1216 return 0; 1227 return 0;
1217 1228
@@ -1256,9 +1267,8 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
1256 } 1267 }
1257 1268
1258 while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) { 1269 while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
1259 dcqe = get_cqe_from_buf(cq->resize_buf, 1270 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
1260 (i + 1) & (cq->resize_buf->nent), 1271 (i + 1) & cq->resize_buf->nent);
1261 dsize);
1262 dcqe64 = dsize == 64 ? dcqe : dcqe + 64; 1272 dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
1263 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); 1273 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
1264 memcpy(dcqe, scqe, dsize); 1274 memcpy(dcqe, scqe, dsize);
@@ -1324,8 +1334,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1324 cqe_size = 64; 1334 cqe_size = 64;
1325 err = resize_kernel(dev, cq, entries, cqe_size); 1335 err = resize_kernel(dev, cq, entries, cqe_size);
1326 if (!err) { 1336 if (!err) {
1327 npas = cq->resize_buf->buf.npages; 1337 struct mlx5_frag_buf_ctrl *c;
1328 page_shift = cq->resize_buf->buf.page_shift; 1338
1339 c = &cq->resize_buf->fbc;
1340 npas = c->frag_buf.npages;
1341 page_shift = c->frag_buf.page_shift;
1329 } 1342 }
1330 } 1343 }
1331 1344
@@ -1346,7 +1359,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1346 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift, 1359 mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
1347 pas, 0); 1360 pas, 0);
1348 else 1361 else
1349 mlx5_fill_page_array(&cq->resize_buf->buf, pas); 1362 mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
1363 pas);
1350 1364
1351 MLX5_SET(modify_cq_in, in, 1365 MLX5_SET(modify_cq_in, in,
1352 modify_field_select_resize_field_select.resize_field_select.resize_field_select, 1366 modify_field_select_resize_field_select.resize_field_select.resize_field_select,
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 139385129973..eafb9751daf6 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -371,7 +371,7 @@ struct mlx5_ib_qp {
371 struct mlx5_ib_rss_qp rss_qp; 371 struct mlx5_ib_rss_qp rss_qp;
372 struct mlx5_ib_dct dct; 372 struct mlx5_ib_dct dct;
373 }; 373 };
374 struct mlx5_buf buf; 374 struct mlx5_frag_buf buf;
375 375
376 struct mlx5_db db; 376 struct mlx5_db db;
377 struct mlx5_ib_wq rq; 377 struct mlx5_ib_wq rq;
@@ -413,7 +413,7 @@ struct mlx5_ib_qp {
413}; 413};
414 414
415struct mlx5_ib_cq_buf { 415struct mlx5_ib_cq_buf {
416 struct mlx5_buf buf; 416 struct mlx5_frag_buf_ctrl fbc;
417 struct ib_umem *umem; 417 struct ib_umem *umem;
418 int cqe_size; 418 int cqe_size;
419 int nent; 419 int nent;
@@ -495,7 +495,7 @@ struct mlx5_ib_wc {
495struct mlx5_ib_srq { 495struct mlx5_ib_srq {
496 struct ib_srq ibsrq; 496 struct ib_srq ibsrq;
497 struct mlx5_core_srq msrq; 497 struct mlx5_core_srq msrq;
498 struct mlx5_buf buf; 498 struct mlx5_frag_buf buf;
499 struct mlx5_db db; 499 struct mlx5_db db;
500 u64 *wrid; 500 u64 *wrid;
501 /* protect SRQ hanlding 501 /* protect SRQ hanlding
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 47239bf7bf43..323ffe8bf7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,19 +71,24 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
71} 71}
72 72
73int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, 73int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
74 struct mlx5_buf *buf, int node) 74 struct mlx5_frag_buf *buf, int node)
75{ 75{
76 dma_addr_t t; 76 dma_addr_t t;
77 77
78 buf->size = size; 78 buf->size = size;
79 buf->npages = 1; 79 buf->npages = 1;
80 buf->page_shift = (u8)get_order(size) + PAGE_SHIFT; 80 buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
81 buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size, 81
82 &t, node); 82 buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
83 if (!buf->direct.buf) 83 if (!buf->frags)
84 return -ENOMEM; 84 return -ENOMEM;
85 85
86 buf->direct.map = t; 86 buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
87 &t, node);
88 if (!buf->frags->buf)
89 goto err_out;
90
91 buf->frags->map = t;
87 92
88 while (t & ((1 << buf->page_shift) - 1)) { 93 while (t & ((1 << buf->page_shift) - 1)) {
89 --buf->page_shift; 94 --buf->page_shift;
@@ -91,18 +96,24 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
91 } 96 }
92 97
93 return 0; 98 return 0;
99err_out:
100 kfree(buf->frags);
101 return -ENOMEM;
94} 102}
95 103
96int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf) 104int mlx5_buf_alloc(struct mlx5_core_dev *dev,
105 int size, struct mlx5_frag_buf *buf)
97{ 106{
98 return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node); 107 return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
99} 108}
100EXPORT_SYMBOL_GPL(mlx5_buf_alloc); 109EXPORT_SYMBOL(mlx5_buf_alloc);
101 110
102void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf) 111void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
103{ 112{
104 dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf, 113 dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
105 buf->direct.map); 114 buf->frags->map);
115
116 kfree(buf->frags);
106} 117}
107EXPORT_SYMBOL_GPL(mlx5_buf_free); 118EXPORT_SYMBOL_GPL(mlx5_buf_free);
108 119
@@ -147,6 +158,7 @@ err_free_buf:
147err_out: 158err_out:
148 return -ENOMEM; 159 return -ENOMEM;
149} 160}
161EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
150 162
151void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf) 163void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
152{ 164{
@@ -162,6 +174,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
162 } 174 }
163 kfree(buf->frags); 175 kfree(buf->frags);
164} 176}
177EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
165 178
166static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev, 179static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
167 int node) 180 int node)
@@ -275,13 +288,13 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
275} 288}
276EXPORT_SYMBOL_GPL(mlx5_db_free); 289EXPORT_SYMBOL_GPL(mlx5_db_free);
277 290
278void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas) 291void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
279{ 292{
280 u64 addr; 293 u64 addr;
281 int i; 294 int i;
282 295
283 for (i = 0; i < buf->npages; i++) { 296 for (i = 0; i < buf->npages; i++) {
284 addr = buf->direct.map + (i << buf->page_shift); 297 addr = buf->frags->map + (i << buf->page_shift);
285 298
286 pas[i] = cpu_to_be64(addr); 299 pas[i] = cpu_to_be64(addr);
287 } 300 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0d4bb0688faa..80b84f6af2a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -52,7 +52,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
52static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc, 52static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
53 void *data) 53 void *data)
54{ 54{
55 u32 ci = cqcc & cq->wq.sz_m1; 55 u32 ci = cqcc & cq->wq.fbc.sz_m1;
56 56
57 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64)); 57 memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
58} 58}
@@ -74,9 +74,10 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
74 74
75static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n) 75static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
76{ 76{
77 u8 op_own = (cqcc >> cq->wq.log_sz) & 1; 77 struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc;
78 u32 wq_sz = 1 << cq->wq.log_sz; 78 u8 op_own = (cqcc >> fbc->log_sz) & 1;
79 u32 ci = cqcc & cq->wq.sz_m1; 79 u32 wq_sz = 1 << fbc->log_sz;
80 u32 ci = cqcc & fbc->sz_m1;
80 u32 ci_top = min_t(u32, wq_sz, ci + n); 81 u32 ci_top = min_t(u32, wq_sz, ci + n);
81 82
82 for (; ci < ci_top; ci++, n--) { 83 for (; ci < ci_top; ci++, n--) {
@@ -101,7 +102,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
101 cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt; 102 cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
102 cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum; 103 cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
103 cq->title.op_own &= 0xf0; 104 cq->title.op_own &= 0xf0;
104 cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz); 105 cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
105 cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter); 106 cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
106 107
107 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) 108 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 6bcfc25350f5..ea66448ba365 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -41,7 +41,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
41 41
42u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) 42u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
43{ 43{
44 return wq->sz_m1 + 1; 44 return wq->fbc.sz_m1 + 1;
45} 45}
46 46
47u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) 47u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
@@ -62,7 +62,7 @@ static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
62 62
63static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) 63static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
64{ 64{
65 return mlx5_cqwq_get_size(wq) << wq->log_stride; 65 return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
66} 66}
67 67
68static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) 68static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
@@ -92,7 +92,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
92 goto err_db_free; 92 goto err_db_free;
93 } 93 }
94 94
95 wq->buf = wq_ctrl->buf.direct.buf; 95 wq->buf = wq_ctrl->buf.frags->buf;
96 wq->db = wq_ctrl->db.db; 96 wq->db = wq_ctrl->db.db;
97 97
98 wq_ctrl->mdev = mdev; 98 wq_ctrl->mdev = mdev;
@@ -130,7 +130,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
130 goto err_db_free; 130 goto err_db_free;
131 } 131 }
132 132
133 wq->rq.buf = wq_ctrl->buf.direct.buf; 133 wq->rq.buf = wq_ctrl->buf.frags->buf;
134 wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq); 134 wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
135 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; 135 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
136 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; 136 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -151,11 +151,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
151{ 151{
152 int err; 152 int err;
153 153
154 wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); 154 mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
155 wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
156 wq->sz_m1 = (1 << wq->log_sz) - 1;
157 wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
158 wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1;
159 155
160 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 156 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
161 if (err) { 157 if (err) {
@@ -172,7 +168,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
172 goto err_db_free; 168 goto err_db_free;
173 } 169 }
174 170
175 wq->frag_buf = wq_ctrl->frag_buf; 171 wq->fbc.frag_buf = wq_ctrl->frag_buf;
176 wq->db = wq_ctrl->db.db; 172 wq->db = wq_ctrl->db.db;
177 173
178 wq_ctrl->mdev = mdev; 174 wq_ctrl->mdev = mdev;
@@ -209,7 +205,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
209 goto err_db_free; 205 goto err_db_free;
210 } 206 }
211 207
212 wq->buf = wq_ctrl->buf.direct.buf; 208 wq->buf = wq_ctrl->buf.frags->buf;
213 wq->db = wq_ctrl->db.db; 209 wq->db = wq_ctrl->db.db;
214 210
215 for (i = 0; i < wq->sz_m1; i++) { 211 for (i = 0; i < wq->sz_m1; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 718589d0cec2..fca90b94596d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -45,7 +45,7 @@ struct mlx5_wq_param {
45 45
46struct mlx5_wq_ctrl { 46struct mlx5_wq_ctrl {
47 struct mlx5_core_dev *mdev; 47 struct mlx5_core_dev *mdev;
48 struct mlx5_buf buf; 48 struct mlx5_frag_buf buf;
49 struct mlx5_db db; 49 struct mlx5_db db;
50}; 50};
51 51
@@ -68,14 +68,9 @@ struct mlx5_wq_qp {
68}; 68};
69 69
70struct mlx5_cqwq { 70struct mlx5_cqwq {
71 struct mlx5_frag_buf frag_buf; 71 struct mlx5_frag_buf_ctrl fbc;
72 __be32 *db; 72 __be32 *db;
73 u32 sz_m1; 73 u32 cc; /* consumer counter */
74 u32 frag_sz_m1;
75 u32 cc; /* consumer counter */
76 u8 log_sz;
77 u8 log_stride;
78 u8 log_frag_strides;
79}; 74};
80 75
81struct mlx5_wq_ll { 76struct mlx5_wq_ll {
@@ -131,20 +126,17 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
131 126
132static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) 127static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
133{ 128{
134 return wq->cc & wq->sz_m1; 129 return wq->cc & wq->fbc.sz_m1;
135} 130}
136 131
137static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) 132static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
138{ 133{
139 unsigned int frag = (ix >> wq->log_frag_strides); 134 return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
140
141 return wq->frag_buf.frags[frag].buf +
142 ((wq->frag_sz_m1 & ix) << wq->log_stride);
143} 135}
144 136
145static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) 137static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
146{ 138{
147 return wq->cc >> wq->log_sz; 139 return wq->cc >> wq->fbc.log_sz;
148} 140}
149 141
150static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) 142static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2860a253275b..bfea26af6de5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -345,13 +345,6 @@ struct mlx5_buf_list {
345 dma_addr_t map; 345 dma_addr_t map;
346}; 346};
347 347
348struct mlx5_buf {
349 struct mlx5_buf_list direct;
350 int npages;
351 int size;
352 u8 page_shift;
353};
354
355struct mlx5_frag_buf { 348struct mlx5_frag_buf {
356 struct mlx5_buf_list *frags; 349 struct mlx5_buf_list *frags;
357 int npages; 350 int npages;
@@ -359,6 +352,15 @@ struct mlx5_frag_buf {
359 u8 page_shift; 352 u8 page_shift;
360}; 353};
361 354
355struct mlx5_frag_buf_ctrl {
356 struct mlx5_frag_buf frag_buf;
357 u32 sz_m1;
358 u32 frag_sz_m1;
359 u8 log_sz;
360 u8 log_stride;
361 u8 log_frag_strides;
362};
363
362struct mlx5_eq_tasklet { 364struct mlx5_eq_tasklet {
363 struct list_head list; 365 struct list_head list;
364 struct list_head process_list; 366 struct list_head process_list;
@@ -386,7 +388,7 @@ struct mlx5_eq {
386 struct mlx5_cq_table cq_table; 388 struct mlx5_cq_table cq_table;
387 __be32 __iomem *doorbell; 389 __be32 __iomem *doorbell;
388 u32 cons_index; 390 u32 cons_index;
389 struct mlx5_buf buf; 391 struct mlx5_frag_buf buf;
390 int size; 392 int size;
391 unsigned int irqn; 393 unsigned int irqn;
392 u8 eqn; 394 u8 eqn;
@@ -932,9 +934,9 @@ struct mlx5_hca_vport_context {
932 bool grh_required; 934 bool grh_required;
933}; 935};
934 936
935static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset) 937static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
936{ 938{
937 return buf->direct.buf + offset; 939 return buf->frags->buf + offset;
938} 940}
939 941
940#define STRUCT_FIELD(header, field) \ 942#define STRUCT_FIELD(header, field) \
@@ -973,6 +975,25 @@ static inline u32 mlx5_base_mkey(const u32 key)
973 return key & 0xffffff00u; 975 return key & 0xffffff00u;
974} 976}
975 977
978static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
979 void *cqc)
980{
981 fbc->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
982 fbc->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
983 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
984 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
985 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
986}
987
988static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
989 u32 ix)
990{
991 unsigned int frag = (ix >> fbc->log_frag_strides);
992
993 return fbc->frag_buf.frags[frag].buf +
994 ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
995}
996
976int mlx5_cmd_init(struct mlx5_core_dev *dev); 997int mlx5_cmd_init(struct mlx5_core_dev *dev);
977void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 998void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
978void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 999void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@@ -998,9 +1019,10 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
998void mlx5_trigger_health_work(struct mlx5_core_dev *dev); 1019void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
999void mlx5_drain_health_recovery(struct mlx5_core_dev *dev); 1020void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
1000int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size, 1021int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1001 struct mlx5_buf *buf, int node); 1022 struct mlx5_frag_buf *buf, int node);
1002int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf); 1023int mlx5_buf_alloc(struct mlx5_core_dev *dev,
1003void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf); 1024 int size, struct mlx5_frag_buf *buf);
1025void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
1004int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size, 1026int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1005 struct mlx5_frag_buf *buf, int node); 1027 struct mlx5_frag_buf *buf, int node);
1006void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf); 1028void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -1045,7 +1067,8 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1045int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); 1067int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1046void mlx5_register_debugfs(void); 1068void mlx5_register_debugfs(void);
1047void mlx5_unregister_debugfs(void); 1069void mlx5_unregister_debugfs(void);
1048void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); 1070
1071void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
1049void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); 1072void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1050void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); 1073void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type);
1051void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); 1074void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type);