summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMajd Dibbiny <majd@mellanox.com>2016-10-27 09:36:47 -0400
committerDoug Ledford <dledford@redhat.com>2016-11-16 20:04:48 -0500
commit762f899ae7875554284af92b821be8c083227092 (patch)
treea83f654fb5918a0301ac51b4664e26de72ef47b7
parent288c01b746aab484651391ca6d64b585d3eb5ec6 (diff)
IB/mlx5: Limit mkey page size to 2GB
The maximum page size in the mkey context is 2GB. Until today, we didn't enforce this requirement in the code, and therefore, if we got a page size larger than 2GB, we have passed zeros in the log_page_shift instead of the actual value and the registration failed. This patch limits the driver to use compound pages of 2GB for mkeys. Fixes: e126ba97dba9 ('mlx5: Add driver for Mellanox Connect-IB adapters') Signed-off-by: Maor Gottlieb <maorg@mellanox.com> Signed-off-by: Majd Dibbiny <majd@mellanox.com> Signed-off-by: Leon Romanovsky <leon@kernel.org> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c7
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c2
6 files changed, 18 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 79d017baf6f4..9e0598b5615f 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -770,7 +770,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
770 if (err) 770 if (err)
771 goto err_umem; 771 goto err_umem;
772 772
773 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, &npages, &page_shift, 773 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift,
774 &ncont, NULL); 774 &ncont, NULL);
775 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n", 775 mlx5_ib_dbg(dev, "addr 0x%llx, size %u, npages %d, page_shift %d, ncont %d\n",
776 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont); 776 ucmd.buf_addr, entries * ucmd.cqe_size, npages, page_shift, ncont);
@@ -1125,7 +1125,7 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1125 return err; 1125 return err;
1126 } 1126 }
1127 1127
1128 mlx5_ib_cont_pages(umem, ucmd.buf_addr, &npages, page_shift, 1128 mlx5_ib_cont_pages(umem, ucmd.buf_addr, 0, &npages, page_shift,
1129 npas, NULL); 1129 npas, NULL);
1130 1130
1131 cq->resize_umem = umem; 1131 cq->resize_umem = umem;
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 996b54e366b0..6851357c16f4 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -37,12 +37,15 @@
37 37
38/* @umem: umem object to scan 38/* @umem: umem object to scan
39 * @addr: ib virtual address requested by the user 39 * @addr: ib virtual address requested by the user
40 * @max_page_shift: high limit for page_shift - 0 means no limit
40 * @count: number of PAGE_SIZE pages covered by umem 41 * @count: number of PAGE_SIZE pages covered by umem
41 * @shift: page shift for the compound pages found in the region 42 * @shift: page shift for the compound pages found in the region
42 * @ncont: number of compund pages 43 * @ncont: number of compund pages
43 * @order: log2 of the number of compound pages 44 * @order: log2 of the number of compound pages
44 */ 45 */
45void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, 46void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
47 unsigned long max_page_shift,
48 int *count, int *shift,
46 int *ncont, int *order) 49 int *ncont, int *order)
47{ 50{
48 unsigned long tmp; 51 unsigned long tmp;
@@ -72,6 +75,8 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
72 addr = addr >> page_shift; 75 addr = addr >> page_shift;
73 tmp = (unsigned long)addr; 76 tmp = (unsigned long)addr;
74 m = find_first_bit(&tmp, BITS_PER_LONG); 77 m = find_first_bit(&tmp, BITS_PER_LONG);
78 if (max_page_shift)
79 m = min_t(unsigned long, max_page_shift - page_shift, m);
75 skip = 1 << m; 80 skip = 1 << m;
76 mask = skip - 1; 81 mask = skip - 1;
77 i = 0; 82 i = 0;
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d5d007740159..95937e7d2584 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -63,6 +63,8 @@ pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
63#define MLX5_IB_DEFAULT_UIDX 0xffffff 63#define MLX5_IB_DEFAULT_UIDX 0xffffff
64#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index) 64#define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
65 65
66#define MLX5_MKEY_PAGE_SHIFT_MASK __mlx5_mask(mkc, log_page_size)
67
66enum { 68enum {
67 MLX5_IB_MMAP_CMD_SHIFT = 8, 69 MLX5_IB_MMAP_CMD_SHIFT = 8,
68 MLX5_IB_MMAP_CMD_MASK = 0xff, 70 MLX5_IB_MMAP_CMD_MASK = 0xff,
@@ -823,7 +825,9 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
823 struct ib_port_attr *props); 825 struct ib_port_attr *props);
824int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev); 826int mlx5_ib_init_fmr(struct mlx5_ib_dev *dev);
825void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev); 827void mlx5_ib_cleanup_fmr(struct mlx5_ib_dev *dev);
826void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, 828void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
829 unsigned long max_page_shift,
830 int *count, int *shift,
827 int *ncont, int *order); 831 int *ncont, int *order);
828void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 832void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
829 int page_shift, size_t offset, size_t num_pages, 833 int page_shift, size_t offset, size_t num_pages,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 881166a9d868..6cbda901f259 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -855,7 +855,8 @@ static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
855 return (void *)umem; 855 return (void *)umem;
856 } 856 }
857 857
858 mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order); 858 mlx5_ib_cont_pages(umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
859 page_shift, ncont, order);
859 if (!*npages) { 860 if (!*npages) {
860 mlx5_ib_warn(dev, "avoid zero region\n"); 861 mlx5_ib_warn(dev, "avoid zero region\n");
861 ib_umem_release(umem); 862 ib_umem_release(umem);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c84b07f9f111..aa27688f5ae9 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -675,7 +675,7 @@ static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
675 return PTR_ERR(*umem); 675 return PTR_ERR(*umem);
676 } 676 }
677 677
678 mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL); 678 mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
679 679
680 err = mlx5_ib_get_buf_offset(addr, *page_shift, offset); 680 err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
681 if (err) { 681 if (err) {
@@ -728,7 +728,7 @@ static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
728 return err; 728 return err;
729 } 729 }
730 730
731 mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, &npages, &page_shift, 731 mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
732 &ncont, NULL); 732 &ncont, NULL);
733 err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift, 733 err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
734 &rwq->rq_page_offset); 734 &rwq->rq_page_offset);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 3857dbd9c956..f384db5367fb 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -118,7 +118,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
118 return err; 118 return err;
119 } 119 }
120 120
121 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, &npages, 121 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages,
122 &page_shift, &ncont, NULL); 122 &page_shift, &ncont, NULL);
123 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, 123 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift,
124 &offset); 124 &offset);