aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorYishai Hadas <yishaih@mellanox.com>2014-09-14 09:47:55 -0400
committerRoland Dreier <roland@purestorage.com>2014-10-09 03:08:40 -0400
commitf39f86971c0cded8c2563e7dfd82c650ca9c0044 (patch)
tree136b25982e9c344c38b8e8b56bb267e22d22230f /drivers/infiniband
parentf83b42636a91e63f330ea90996646b4a885aca74 (diff)
IB/mlx5: Modify to work with arbitrary page size
When dealing with umem objects, the driver assumed host page sizes defined by PAGE_SHIFT. Modify the code to use arbitrary page shift provided by umem->page_shift to support different page sizes. Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index a3e81444c825..dae07eae9507 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -55,16 +55,17 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
55 u64 pfn; 55 u64 pfn;
56 struct scatterlist *sg; 56 struct scatterlist *sg;
57 int entry; 57 int entry;
58 unsigned long page_shift = ilog2(umem->page_size);
58 59
59 addr = addr >> PAGE_SHIFT; 60 addr = addr >> page_shift;
60 tmp = (unsigned long)addr; 61 tmp = (unsigned long)addr;
61 m = find_first_bit(&tmp, sizeof(tmp)); 62 m = find_first_bit(&tmp, sizeof(tmp));
62 skip = 1 << m; 63 skip = 1 << m;
63 mask = skip - 1; 64 mask = skip - 1;
64 i = 0; 65 i = 0;
65 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 66 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
66 len = sg_dma_len(sg) >> PAGE_SHIFT; 67 len = sg_dma_len(sg) >> page_shift;
67 pfn = sg_dma_address(sg) >> PAGE_SHIFT; 68 pfn = sg_dma_address(sg) >> page_shift;
68 for (k = 0; k < len; k++) { 69 for (k = 0; k < len; k++) {
69 if (!(i & mask)) { 70 if (!(i & mask)) {
70 tmp = (unsigned long)pfn; 71 tmp = (unsigned long)pfn;
@@ -103,14 +104,15 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
103 104
104 *ncont = 0; 105 *ncont = 0;
105 } 106 }
106 *shift = PAGE_SHIFT + m; 107 *shift = page_shift + m;
107 *count = i; 108 *count = i;
108} 109}
109 110
110void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 111void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
111 int page_shift, __be64 *pas, int umr) 112 int page_shift, __be64 *pas, int umr)
112{ 113{
113 int shift = page_shift - PAGE_SHIFT; 114 unsigned long umem_page_shift = ilog2(umem->page_size);
115 int shift = page_shift - umem_page_shift;
114 int mask = (1 << shift) - 1; 116 int mask = (1 << shift) - 1;
115 int i, k; 117 int i, k;
116 u64 cur = 0; 118 u64 cur = 0;
@@ -121,11 +123,11 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
121 123
122 i = 0; 124 i = 0;
123 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 125 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
124 len = sg_dma_len(sg) >> PAGE_SHIFT; 126 len = sg_dma_len(sg) >> umem_page_shift;
125 base = sg_dma_address(sg); 127 base = sg_dma_address(sg);
126 for (k = 0; k < len; k++) { 128 for (k = 0; k < len; k++) {
127 if (!(i & mask)) { 129 if (!(i & mask)) {
128 cur = base + (k << PAGE_SHIFT); 130 cur = base + (k << umem_page_shift);
129 if (umr) 131 if (umr)
130 cur |= 3; 132 cur |= 3;
131 133
@@ -134,7 +136,7 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
134 i >> shift, be64_to_cpu(pas[i >> shift])); 136 i >> shift, be64_to_cpu(pas[i >> shift]));
135 } else 137 } else
136 mlx5_ib_dbg(dev, "=====> 0x%llx\n", 138 mlx5_ib_dbg(dev, "=====> 0x%llx\n",
137 base + (k << PAGE_SHIFT)); 139 base + (k << umem_page_shift));
138 i++; 140 i++;
139 } 141 }
140 } 142 }