aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx4/mr.c
diff options
context:
space:
mode:
authorJack Morgenstein <jackm@dev.mellanox.co.il>2011-12-12 23:13:48 -0500
committerDavid S. Miller <davem@davemloft.net>2011-12-13 13:56:06 -0500
commitea51b377abcdf0f3fab0119879be373bda69afb1 (patch)
tree6b2b991009896c58305dde3469641c96fbafb006 /drivers/net/ethernet/mellanox/mlx4/mr.c
parentd7233386b21775a8b099d7d5dcc36d1e4642b896 (diff)
mlx4_core: mtt modifications for SRIOV
MTTs are resources which are allocated and tracked by the PF driver. In multifunction mode, the allocation and icm mapping is done in the resource tracker (later patch in this sequence). To accomplish this, we have "work" functions whose names start with "__", and "request" functions (same name, no __). If we are operating in multifunction mode, the request function actually results in comm-channel commands being sent (ALLOC_RES or FREE_RES). The PF-driver comm-channel handler will ultimately invoke the "work" (__) function and return the result. If we are not in multifunction mode, the "work" handler is invoked immediately. Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx4/mr.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c401
1 files changed, 360 insertions, 41 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 057b22d64a05..916eba4572b7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,9 +32,11 @@
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35#include <linux/init.h>
35#include <linux/errno.h> 36#include <linux/errno.h>
36#include <linux/export.h> 37#include <linux/export.h>
37#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/kernel.h>
38 40
39#include <linux/mlx4/cmd.h> 41#include <linux/mlx4/cmd.h>
40 42
@@ -180,7 +182,7 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
180 kfree(buddy->num_free); 182 kfree(buddy->num_free);
181} 183}
182 184
183static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order) 185static u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
184{ 186{
185 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 187 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
186 u32 seg; 188 u32 seg;
@@ -198,6 +200,26 @@ static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
198 return seg; 200 return seg;
199} 201}
200 202
203static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
204{
205 u64 in_param;
206 u64 out_param;
207 int err;
208
209 if (mlx4_is_mfunc(dev)) {
210 set_param_l(&in_param, order);
211 err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
212 RES_OP_RESERVE_AND_MAP,
213 MLX4_CMD_ALLOC_RES,
214 MLX4_CMD_TIME_CLASS_A,
215 MLX4_CMD_WRAPPED);
216 if (err)
217 return -1;
218 return get_param_l(&out_param);
219 }
220 return __mlx4_alloc_mtt_range(dev, order);
221}
222
201int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, 223int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
202 struct mlx4_mtt *mtt) 224 struct mlx4_mtt *mtt)
203{ 225{
@@ -221,16 +243,42 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
221} 243}
222EXPORT_SYMBOL_GPL(mlx4_mtt_init); 244EXPORT_SYMBOL_GPL(mlx4_mtt_init);
223 245
224void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) 246static void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg,
247 int order)
225{ 248{
226 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 249 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
227 250
251 mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, order);
252 mlx4_table_put_range(dev, &mr_table->mtt_table, first_seg,
253 first_seg + (1 << order) - 1);
254}
255
256static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order)
257{
258 u64 in_param;
259 int err;
260
261 if (mlx4_is_mfunc(dev)) {
262 set_param_l(&in_param, first_seg);
263 set_param_h(&in_param, order);
264 err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
265 MLX4_CMD_FREE_RES,
266 MLX4_CMD_TIME_CLASS_A,
267 MLX4_CMD_WRAPPED);
268 if (err)
269 mlx4_warn(dev, "Failed to free mtt range at:%d"
270 " order:%d\n", first_seg, order);
271 return;
272 }
273 __mlx4_free_mtt_range(dev, first_seg, order);
274}
275
276void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
277{
228 if (mtt->order < 0) 278 if (mtt->order < 0)
229 return; 279 return;
230 280
231 mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order); 281 mlx4_free_mtt_range(dev, mtt->first_seg, mtt->order);
232 mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
233 mtt->first_seg + (1 << mtt->order) - 1);
234} 282}
235EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup); 283EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
236 284
@@ -253,8 +301,9 @@ static u32 key_to_hw_index(u32 key)
253static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 301static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
254 int mpt_index) 302 int mpt_index)
255{ 303{
256 return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT, 304 return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
257 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 305 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
306 MLX4_CMD_WRAPPED);
258} 307}
259 308
260static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, 309static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -265,58 +314,192 @@ static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
265 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); 314 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
266} 315}
267 316
268int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, 317static int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
269 int npages, int page_shift, struct mlx4_mr *mr) 318 u32 *base_mridx)
270{ 319{
271 struct mlx4_priv *priv = mlx4_priv(dev); 320 struct mlx4_priv *priv = mlx4_priv(dev);
272 u32 index; 321 u32 mridx;
273 int err;
274 322
275 index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap); 323 mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
276 if (index == -1) 324 if (mridx == -1)
277 return -ENOMEM; 325 return -ENOMEM;
278 326
327 *base_mridx = mridx;
328 return 0;
329
330}
331EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
332
333static void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
334{
335 struct mlx4_priv *priv = mlx4_priv(dev);
336 mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
337}
338EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
339
340static int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
341 u64 iova, u64 size, u32 access, int npages,
342 int page_shift, struct mlx4_mr *mr)
343{
279 mr->iova = iova; 344 mr->iova = iova;
280 mr->size = size; 345 mr->size = size;
281 mr->pd = pd; 346 mr->pd = pd;
282 mr->access = access; 347 mr->access = access;
283 mr->enabled = 0; 348 mr->enabled = MLX4_MR_DISABLED;
284 mr->key = hw_index_to_key(index); 349 mr->key = hw_index_to_key(mridx);
350
351 return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
352}
353EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
354
355static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
356 struct mlx4_cmd_mailbox *mailbox,
357 int num_entries)
358{
359 return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
360 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
361}
362
363static int __mlx4_mr_reserve(struct mlx4_dev *dev)
364{
365 struct mlx4_priv *priv = mlx4_priv(dev);
366
367 return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
368}
285 369
286 err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); 370static int mlx4_mr_reserve(struct mlx4_dev *dev)
371{
372 u64 out_param;
373
374 if (mlx4_is_mfunc(dev)) {
375 if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
376 MLX4_CMD_ALLOC_RES,
377 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
378 return -1;
379 return get_param_l(&out_param);
380 }
381 return __mlx4_mr_reserve(dev);
382}
383
384static void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
385{
386 struct mlx4_priv *priv = mlx4_priv(dev);
387
388 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
389}
390
391static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
392{
393 u64 in_param;
394
395 if (mlx4_is_mfunc(dev)) {
396 set_param_l(&in_param, index);
397 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
398 MLX4_CMD_FREE_RES,
399 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
400 mlx4_warn(dev, "Failed to release mr index:%d\n",
401 index);
402 return;
403 }
404 __mlx4_mr_release(dev, index);
405}
406
407static int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
408{
409 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
410
411 return mlx4_table_get(dev, &mr_table->dmpt_table, index);
412}
413
414static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
415{
416 u64 param;
417
418 if (mlx4_is_mfunc(dev)) {
419 set_param_l(&param, index);
420 return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
421 MLX4_CMD_ALLOC_RES,
422 MLX4_CMD_TIME_CLASS_A,
423 MLX4_CMD_WRAPPED);
424 }
425 return __mlx4_mr_alloc_icm(dev, index);
426}
427
428static void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
429{
430 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
431
432 mlx4_table_put(dev, &mr_table->dmpt_table, index);
433}
434
435static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
436{
437 u64 in_param;
438
439 if (mlx4_is_mfunc(dev)) {
440 set_param_l(&in_param, index);
441 if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
442 MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
443 MLX4_CMD_WRAPPED))
444 mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
445 index);
446 return;
447 }
448 return __mlx4_mr_free_icm(dev, index);
449}
450
451int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
452 int npages, int page_shift, struct mlx4_mr *mr)
453{
454 u32 index;
455 int err;
456
457 index = mlx4_mr_reserve(dev);
458 if (index == -1)
459 return -ENOMEM;
460
461 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
462 access, npages, page_shift, mr);
287 if (err) 463 if (err)
288 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index); 464 mlx4_mr_release(dev, index);
289 465
290 return err; 466 return err;
291} 467}
292EXPORT_SYMBOL_GPL(mlx4_mr_alloc); 468EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
293 469
294void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr) 470static void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
295{ 471{
296 struct mlx4_priv *priv = mlx4_priv(dev);
297 int err; 472 int err;
298 473
299 if (mr->enabled) { 474 if (mr->enabled == MLX4_MR_EN_HW) {
300 err = mlx4_HW2SW_MPT(dev, NULL, 475 err = mlx4_HW2SW_MPT(dev, NULL,
301 key_to_hw_index(mr->key) & 476 key_to_hw_index(mr->key) &
302 (dev->caps.num_mpts - 1)); 477 (dev->caps.num_mpts - 1));
303 if (err) 478 if (err)
304 mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err); 479 mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
305 }
306 480
481 mr->enabled = MLX4_MR_EN_SW;
482 }
307 mlx4_mtt_cleanup(dev, &mr->mtt); 483 mlx4_mtt_cleanup(dev, &mr->mtt);
308 mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key)); 484}
485EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
486
487void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
488{
489 mlx4_mr_free_reserved(dev, mr);
490 if (mr->enabled)
491 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
492 mlx4_mr_release(dev, key_to_hw_index(mr->key));
309} 493}
310EXPORT_SYMBOL_GPL(mlx4_mr_free); 494EXPORT_SYMBOL_GPL(mlx4_mr_free);
311 495
312int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) 496int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
313{ 497{
314 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
315 struct mlx4_cmd_mailbox *mailbox; 498 struct mlx4_cmd_mailbox *mailbox;
316 struct mlx4_mpt_entry *mpt_entry; 499 struct mlx4_mpt_entry *mpt_entry;
317 int err; 500 int err;
318 501
319 err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); 502 err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
320 if (err) 503 if (err)
321 return err; 504 return err;
322 505
@@ -363,8 +546,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
363 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err); 546 mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
364 goto err_cmd; 547 goto err_cmd;
365 } 548 }
366 549 mr->enabled = MLX4_MR_EN_HW;
367 mr->enabled = 1;
368 550
369 mlx4_free_cmd_mailbox(dev, mailbox); 551 mlx4_free_cmd_mailbox(dev, mailbox);
370 552
@@ -374,7 +556,7 @@ err_cmd:
374 mlx4_free_cmd_mailbox(dev, mailbox); 556 mlx4_free_cmd_mailbox(dev, mailbox);
375 557
376err_table: 558err_table:
377 mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key)); 559 mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
378 return err; 560 return err;
379} 561}
380EXPORT_SYMBOL_GPL(mlx4_mr_enable); 562EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -413,27 +595,74 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
413 return 0; 595 return 0;
414} 596}
415 597
416int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 598static int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
417 int start_index, int npages, u64 *page_list) 599 int start_index, int npages, u64 *page_list)
418{ 600{
601 int err = 0;
419 int chunk; 602 int chunk;
420 int err;
421
422 if (mtt->order < 0)
423 return -EINVAL;
424 603
425 while (npages > 0) { 604 while (npages > 0) {
426 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages); 605 chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
427 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list); 606 err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
428 if (err) 607 if (err)
429 return err; 608 return err;
430
431 npages -= chunk; 609 npages -= chunk;
432 start_index += chunk; 610 start_index += chunk;
433 page_list += chunk; 611 page_list += chunk;
434 } 612 }
613 return err;
614}
435 615
436 return 0; 616int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
617 int start_index, int npages, u64 *page_list)
618{
619 struct mlx4_cmd_mailbox *mailbox = NULL;
620 __be64 *inbox = NULL;
621 int chunk;
622 int err = 0;
623 int i;
624
625 if (mtt->order < 0)
626 return -EINVAL;
627
628 if (mlx4_is_mfunc(dev)) {
629 mailbox = mlx4_alloc_cmd_mailbox(dev);
630 if (IS_ERR(mailbox))
631 return PTR_ERR(mailbox);
632 inbox = mailbox->buf;
633
634 while (npages > 0) {
635 int s = mtt->first_seg * dev->caps.mtts_per_seg +
636 start_index;
637 chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) -
638 dev->caps.mtts_per_seg, npages);
639 if (s / (PAGE_SIZE / sizeof(u64)) !=
640 (s + chunk - 1) / (PAGE_SIZE / sizeof(u64)))
641 chunk = PAGE_SIZE / sizeof(u64) -
642 (s % (PAGE_SIZE / sizeof(u64)));
643
644 inbox[0] = cpu_to_be64(mtt->first_seg *
645 dev->caps.mtts_per_seg +
646 start_index);
647 inbox[1] = 0;
648 for (i = 0; i < chunk; ++i)
649 inbox[i + 2] = cpu_to_be64(page_list[i] |
650 MLX4_MTT_FLAG_PRESENT);
651 err = mlx4_WRITE_MTT(dev, mailbox, chunk);
652 if (err) {
653 mlx4_free_cmd_mailbox(dev, mailbox);
654 return err;
655 }
656
657 npages -= chunk;
658 start_index += chunk;
659 page_list += chunk;
660 }
661 mlx4_free_cmd_mailbox(dev, mailbox);
662 return err;
663 }
664
665 return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
437} 666}
438EXPORT_SYMBOL_GPL(mlx4_write_mtt); 667EXPORT_SYMBOL_GPL(mlx4_write_mtt);
439 668
@@ -463,9 +692,18 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
463 692
464int mlx4_init_mr_table(struct mlx4_dev *dev) 693int mlx4_init_mr_table(struct mlx4_dev *dev)
465{ 694{
466 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 695 struct mlx4_priv *priv = mlx4_priv(dev);
696 struct mlx4_mr_table *mr_table = &priv->mr_table;
467 int err; 697 int err;
468 698
699 if (!is_power_of_2(dev->caps.num_mpts))
700 return -EINVAL;
701
702 /* Nothing to do for slaves - all MR handling is forwarded
703 * to the master */
704 if (mlx4_is_slave(dev))
705 return 0;
706
469 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts, 707 err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
470 ~0, dev->caps.reserved_mrws, 0); 708 ~0, dev->caps.reserved_mrws, 0);
471 if (err) 709 if (err)
@@ -477,7 +715,10 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
477 goto err_buddy; 715 goto err_buddy;
478 716
479 if (dev->caps.reserved_mtts) { 717 if (dev->caps.reserved_mtts) {
480 if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) { 718 priv->reserved_mtts =
719 mlx4_alloc_mtt_range(dev,
720 fls(dev->caps.reserved_mtts - 1));
721 if (priv->reserved_mtts < 0) {
481 mlx4_warn(dev, "MTT table of order %d is too small.\n", 722 mlx4_warn(dev, "MTT table of order %d is too small.\n",
482 mr_table->mtt_buddy.max_order); 723 mr_table->mtt_buddy.max_order);
483 err = -ENOMEM; 724 err = -ENOMEM;
@@ -498,8 +739,14 @@ err_buddy:
498 739
499void mlx4_cleanup_mr_table(struct mlx4_dev *dev) 740void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
500{ 741{
501 struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; 742 struct mlx4_priv *priv = mlx4_priv(dev);
743 struct mlx4_mr_table *mr_table = &priv->mr_table;
502 744
745 if (mlx4_is_slave(dev))
746 return;
747 if (priv->reserved_mtts >= 0)
748 mlx4_free_mtt_range(dev, priv->reserved_mtts,
749 fls(dev->caps.reserved_mtts - 1));
503 mlx4_buddy_cleanup(&mr_table->mtt_buddy); 750 mlx4_buddy_cleanup(&mr_table->mtt_buddy);
504 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap); 751 mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
505} 752}
@@ -620,6 +867,46 @@ err_free:
620} 867}
621EXPORT_SYMBOL_GPL(mlx4_fmr_alloc); 868EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
622 869
870static int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
871 u32 pd, u32 access, int max_pages,
872 int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
873{
874 struct mlx4_priv *priv = mlx4_priv(dev);
875 int err = -ENOMEM;
876
877 if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
878 return -EINVAL;
879
880 /* All MTTs must fit in the same page */
881 if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
882 return -EINVAL;
883
884 fmr->page_shift = page_shift;
885 fmr->max_pages = max_pages;
886 fmr->max_maps = max_maps;
887 fmr->maps = 0;
888
889 err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
890 page_shift, &fmr->mr);
891 if (err)
892 return err;
893
894 fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
895 fmr->mr.mtt.first_seg,
896 &fmr->dma_handle);
897 if (!fmr->mtts) {
898 err = -ENOMEM;
899 goto err_free;
900 }
901
902 return 0;
903
904err_free:
905 mlx4_mr_free_reserved(dev, &fmr->mr);
906 return err;
907}
908EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
909
623int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr) 910int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
624{ 911{
625 struct mlx4_priv *priv = mlx4_priv(dev); 912 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -641,12 +928,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
641void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, 928void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
642 u32 *lkey, u32 *rkey) 929 u32 *lkey, u32 *rkey)
643{ 930{
931 struct mlx4_cmd_mailbox *mailbox;
932 int err;
933
644 if (!fmr->maps) 934 if (!fmr->maps)
645 return; 935 return;
646 936
647 fmr->maps = 0; 937 fmr->maps = 0;
648 938
649 *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW; 939 mailbox = mlx4_alloc_cmd_mailbox(dev);
940 if (IS_ERR(mailbox)) {
941 err = PTR_ERR(mailbox);
942 printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
943 " failed (%d)\n", err);
944 return;
945 }
946
947 err = mlx4_HW2SW_MPT(dev, NULL,
948 key_to_hw_index(fmr->mr.key) &
949 (dev->caps.num_mpts - 1));
950 mlx4_free_cmd_mailbox(dev, mailbox);
951 if (err) {
952 printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
953 err);
954 return;
955 }
956 fmr->mr.enabled = MLX4_MR_EN_SW;
650} 957}
651EXPORT_SYMBOL_GPL(mlx4_fmr_unmap); 958EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
652 959
@@ -655,13 +962,25 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
655 if (fmr->maps) 962 if (fmr->maps)
656 return -EBUSY; 963 return -EBUSY;
657 964
658 fmr->mr.enabled = 0;
659 mlx4_mr_free(dev, &fmr->mr); 965 mlx4_mr_free(dev, &fmr->mr);
966 fmr->mr.enabled = MLX4_MR_DISABLED;
660 967
661 return 0; 968 return 0;
662} 969}
663EXPORT_SYMBOL_GPL(mlx4_fmr_free); 970EXPORT_SYMBOL_GPL(mlx4_fmr_free);
664 971
972static int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
973{
974 if (fmr->maps)
975 return -EBUSY;
976
977 mlx4_mr_free_reserved(dev, &fmr->mr);
978 fmr->mr.enabled = MLX4_MR_DISABLED;
979
980 return 0;
981}
982EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
983
665int mlx4_SYNC_TPT(struct mlx4_dev *dev) 984int mlx4_SYNC_TPT(struct mlx4_dev *dev)
666{ 985{
667 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000, 986 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,