aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorEli Cohen <eli@dev.mellanox.co.il>2013-10-23 02:53:19 -0400
committerRoland Dreier <roland@purestorage.com>2013-11-08 17:43:00 -0500
commitbf0bf77f6519e5dcd57a77b47e1d151c1e81b7ec (patch)
tree18f449ec92393cb3f828c9f9c95c25234f807d3a /drivers/net/ethernet
parent952f5f6e807ba82e1b82fcfcf7f73db022342aa7 (diff)
mlx5: Support communicating arbitrary host page size to firmware
Connect-IB firmware requires 4K pages to be communicated with the driver. This patch breaks larger pages to 4K units to enable support for architectures utilizing larger page size, such as PowerPC. This patch also fixes several places that referred to PAGE_SHIFT instead of explicit 12 which is the inherent page shift on Connect-IB. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c174
2 files changed, 121 insertions, 55 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 2231d93cc7ad..6b4b436840bd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -354,7 +354,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
354 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ); 354 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_EQ);
355 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index); 355 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
356 in->ctx.intr = vecidx; 356 in->ctx.intr = vecidx;
357 in->ctx.log_page_size = PAGE_SHIFT - 12; 357 in->ctx.log_page_size = eq->buf.page_shift - 12;
358 in->events_mask = cpu_to_be64(mask); 358 in->events_mask = cpu_to_be64(mask);
359 359
360 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 360 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index a0d0da35578c..013aa422adee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -57,10 +57,13 @@ struct mlx5_pages_req {
57}; 57};
58 58
59struct fw_page { 59struct fw_page {
60 struct rb_node rb_node; 60 struct rb_node rb_node;
61 u64 addr; 61 u64 addr;
62 struct page *page; 62 struct page *page;
63 u16 func_id; 63 u16 func_id;
64 unsigned long bitmask;
65 struct list_head list;
66 unsigned free_count;
64}; 67};
65 68
66struct mlx5_query_pages_inbox { 69struct mlx5_query_pages_inbox {
@@ -94,6 +97,11 @@ enum {
94 MAX_RECLAIM_TIME_MSECS = 5000, 97 MAX_RECLAIM_TIME_MSECS = 5000,
95}; 98};
96 99
100enum {
101 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
102 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / 4096,
103};
104
97static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) 105static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
98{ 106{
99 struct rb_root *root = &dev->priv.page_root; 107 struct rb_root *root = &dev->priv.page_root;
@@ -101,6 +109,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
101 struct rb_node *parent = NULL; 109 struct rb_node *parent = NULL;
102 struct fw_page *nfp; 110 struct fw_page *nfp;
103 struct fw_page *tfp; 111 struct fw_page *tfp;
112 int i;
104 113
105 while (*new) { 114 while (*new) {
106 parent = *new; 115 parent = *new;
@@ -113,25 +122,29 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u
113 return -EEXIST; 122 return -EEXIST;
114 } 123 }
115 124
116 nfp = kmalloc(sizeof(*nfp), GFP_KERNEL); 125 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
117 if (!nfp) 126 if (!nfp)
118 return -ENOMEM; 127 return -ENOMEM;
119 128
120 nfp->addr = addr; 129 nfp->addr = addr;
121 nfp->page = page; 130 nfp->page = page;
122 nfp->func_id = func_id; 131 nfp->func_id = func_id;
132 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
133 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
134 set_bit(i, &nfp->bitmask);
123 135
124 rb_link_node(&nfp->rb_node, parent, new); 136 rb_link_node(&nfp->rb_node, parent, new);
125 rb_insert_color(&nfp->rb_node, root); 137 rb_insert_color(&nfp->rb_node, root);
138 list_add(&nfp->list, &dev->priv.free_list);
126 139
127 return 0; 140 return 0;
128} 141}
129 142
130static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) 143static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
131{ 144{
132 struct rb_root *root = &dev->priv.page_root; 145 struct rb_root *root = &dev->priv.page_root;
133 struct rb_node *tmp = root->rb_node; 146 struct rb_node *tmp = root->rb_node;
134 struct page *result = NULL; 147 struct fw_page *result = NULL;
135 struct fw_page *tfp; 148 struct fw_page *tfp;
136 149
137 while (tmp) { 150 while (tmp) {
@@ -141,9 +154,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
141 } else if (tfp->addr > addr) { 154 } else if (tfp->addr > addr) {
142 tmp = tmp->rb_right; 155 tmp = tmp->rb_right;
143 } else { 156 } else {
144 rb_erase(&tfp->rb_node, root); 157 result = tfp;
145 result = tfp->page;
146 kfree(tfp);
147 break; 158 break;
148 } 159 }
149 } 160 }
@@ -176,13 +187,97 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
176 return err; 187 return err;
177} 188}
178 189
190static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
191{
192 struct fw_page *fp;
193 unsigned n;
194
195 if (list_empty(&dev->priv.free_list)) {
196 return -ENOMEM;
197 mlx5_core_warn(dev, "\n");
198 }
199
200 fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
201 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
202 if (n >= MLX5_NUM_4K_IN_PAGE) {
203 mlx5_core_warn(dev, "alloc 4k bug\n");
204 return -ENOENT;
205 }
206 clear_bit(n, &fp->bitmask);
207 fp->free_count--;
208 if (!fp->free_count)
209 list_del(&fp->list);
210
211 *addr = fp->addr + n * 4096;
212
213 return 0;
214}
215
216static void free_4k(struct mlx5_core_dev *dev, u64 addr)
217{
218 struct fw_page *fwp;
219 int n;
220
221 fwp = find_fw_page(dev, addr & PAGE_MASK);
222 if (!fwp) {
223 mlx5_core_warn(dev, "page not found\n");
224 return;
225 }
226
227 n = (addr & ~PAGE_MASK) % 4096;
228 fwp->free_count++;
229 set_bit(n, &fwp->bitmask);
230 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
231 rb_erase(&fwp->rb_node, &dev->priv.page_root);
232 list_del(&fwp->list);
233 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
234 __free_page(fwp->page);
235 kfree(fwp);
236 } else if (fwp->free_count == 1) {
237 list_add(&fwp->list, &dev->priv.free_list);
238 }
239}
240
241static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
242{
243 struct page *page;
244 u64 addr;
245 int err;
246
247 page = alloc_page(GFP_HIGHUSER);
248 if (!page) {
249 mlx5_core_warn(dev, "failed to allocate page\n");
250 return -ENOMEM;
251 }
252 addr = dma_map_page(&dev->pdev->dev, page, 0,
253 PAGE_SIZE, DMA_BIDIRECTIONAL);
254 if (dma_mapping_error(&dev->pdev->dev, addr)) {
255 mlx5_core_warn(dev, "failed dma mapping page\n");
256 err = -ENOMEM;
257 goto out_alloc;
258 }
259 err = insert_page(dev, addr, page, func_id);
260 if (err) {
261 mlx5_core_err(dev, "failed to track allocated page\n");
262 goto out_mapping;
263 }
264
265 return 0;
266
267out_mapping:
268 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
269
270out_alloc:
271 __free_page(page);
272
273 return err;
274}
179static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, 275static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
180 int notify_fail) 276 int notify_fail)
181{ 277{
182 struct mlx5_manage_pages_inbox *in; 278 struct mlx5_manage_pages_inbox *in;
183 struct mlx5_manage_pages_outbox out; 279 struct mlx5_manage_pages_outbox out;
184 struct mlx5_manage_pages_inbox *nin; 280 struct mlx5_manage_pages_inbox *nin;
185 struct page *page;
186 int inlen; 281 int inlen;
187 u64 addr; 282 u64 addr;
188 int err; 283 int err;
@@ -197,27 +292,15 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
197 memset(&out, 0, sizeof(out)); 292 memset(&out, 0, sizeof(out));
198 293
199 for (i = 0; i < npages; i++) { 294 for (i = 0; i < npages; i++) {
200 page = alloc_page(GFP_HIGHUSER); 295retry:
201 if (!page) { 296 err = alloc_4k(dev, &addr);
202 err = -ENOMEM;
203 mlx5_core_warn(dev, "failed to allocate page\n");
204 goto out_alloc;
205 }
206 addr = dma_map_page(&dev->pdev->dev, page, 0,
207 PAGE_SIZE, DMA_BIDIRECTIONAL);
208 if (dma_mapping_error(&dev->pdev->dev, addr)) {
209 mlx5_core_warn(dev, "failed dma mapping page\n");
210 __free_page(page);
211 err = -ENOMEM;
212 goto out_alloc;
213 }
214 err = insert_page(dev, addr, page, func_id);
215 if (err) { 297 if (err) {
216 mlx5_core_err(dev, "failed to track allocated page\n"); 298 if (err == -ENOMEM)
217 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); 299 err = alloc_system_page(dev, func_id);
218 __free_page(page); 300 if (err)
219 err = -ENOMEM; 301 goto out_4k;
220 goto out_alloc; 302
303 goto retry;
221 } 304 }
222 in->pas[i] = cpu_to_be64(addr); 305 in->pas[i] = cpu_to_be64(addr);
223 } 306 }
@@ -227,7 +310,6 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
227 in->func_id = cpu_to_be16(func_id); 310 in->func_id = cpu_to_be16(func_id);
228 in->num_entries = cpu_to_be32(npages); 311 in->num_entries = cpu_to_be32(npages);
229 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 312 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
230 mlx5_core_dbg(dev, "err %d\n", err);
231 if (err) { 313 if (err) {
232 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err); 314 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
233 goto out_alloc; 315 goto out_alloc;
@@ -251,7 +333,7 @@ out_alloc:
251 nin = kzalloc(sizeof(*nin), GFP_KERNEL); 333 nin = kzalloc(sizeof(*nin), GFP_KERNEL);
252 if (!nin) { 334 if (!nin) {
253 mlx5_core_warn(dev, "allocation failed\n"); 335 mlx5_core_warn(dev, "allocation failed\n");
254 goto unmap; 336 goto out_4k;
255 } 337 }
256 memset(&out, 0, sizeof(out)); 338 memset(&out, 0, sizeof(out));
257 nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); 339 nin->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
@@ -261,19 +343,9 @@ out_alloc:
261 kfree(nin); 343 kfree(nin);
262 } 344 }
263 345
264unmap: 346out_4k:
265 for (i--; i >= 0; i--) { 347 for (i--; i >= 0; i--)
266 addr = be64_to_cpu(in->pas[i]); 348 free_4k(dev, be64_to_cpu(in->pas[i]));
267 page = remove_page(dev, addr);
268 if (!page) {
269 mlx5_core_err(dev, "BUG: can't remove page at addr 0x%llx\n",
270 addr);
271 continue;
272 }
273 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
274 __free_page(page);
275 }
276
277out_free: 349out_free:
278 mlx5_vfree(in); 350 mlx5_vfree(in);
279 return err; 351 return err;
@@ -284,7 +356,6 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
284{ 356{
285 struct mlx5_manage_pages_inbox in; 357 struct mlx5_manage_pages_inbox in;
286 struct mlx5_manage_pages_outbox *out; 358 struct mlx5_manage_pages_outbox *out;
287 struct page *page;
288 int num_claimed; 359 int num_claimed;
289 int outlen; 360 int outlen;
290 u64 addr; 361 u64 addr;
@@ -323,13 +394,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
323 394
324 for (i = 0; i < num_claimed; i++) { 395 for (i = 0; i < num_claimed; i++) {
325 addr = be64_to_cpu(out->pas[i]); 396 addr = be64_to_cpu(out->pas[i]);
326 page = remove_page(dev, addr); 397 free_4k(dev, addr);
327 if (!page) {
328 mlx5_core_warn(dev, "FW reported unknown DMA address 0x%llx\n", addr);
329 } else {
330 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
331 __free_page(page);
332 }
333 } 398 }
334 399
335out_free: 400out_free:
@@ -435,6 +500,7 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
435void mlx5_pagealloc_init(struct mlx5_core_dev *dev) 500void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
436{ 501{
437 dev->priv.page_root = RB_ROOT; 502 dev->priv.page_root = RB_ROOT;
503 INIT_LIST_HEAD(&dev->priv.free_list);
438} 504}
439 505
440void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) 506void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)