aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/amso1100/c2_provider.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/amso1100/c2_provider.c')
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c42
1 files changed, 27 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index 109166223c09..997cf1530762 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -56,6 +56,7 @@
56#include <asm/byteorder.h> 56#include <asm/byteorder.h>
57 57
58#include <rdma/ib_smi.h> 58#include <rdma/ib_smi.h>
59#include <rdma/ib_umem.h>
59#include <rdma/ib_user_verbs.h> 60#include <rdma/ib_user_verbs.h>
60#include "c2.h" 61#include "c2.h"
61#include "c2_provider.h" 62#include "c2_provider.h"
@@ -396,6 +397,7 @@ static struct ib_mr *c2_reg_phys_mr(struct ib_pd *ib_pd,
396 } 397 }
397 398
398 mr->pd = to_c2pd(ib_pd); 399 mr->pd = to_c2pd(ib_pd);
400 mr->umem = NULL;
399 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, " 401 pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
400 "*iova_start %llx, first pa %llx, last pa %llx\n", 402 "*iova_start %llx, first pa %llx, last pa %llx\n",
401 __FUNCTION__, page_shift, pbl_depth, total_len, 403 __FUNCTION__, page_shift, pbl_depth, total_len,
@@ -428,8 +430,8 @@ static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
428 return c2_reg_phys_mr(pd, &bl, 1, acc, &kva); 430 return c2_reg_phys_mr(pd, &bl, 1, acc, &kva);
429} 431}
430 432
431static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, 433static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
432 int acc, struct ib_udata *udata) 434 u64 virt, int acc, struct ib_udata *udata)
433{ 435{
434 u64 *pages; 436 u64 *pages;
435 u64 kva = 0; 437 u64 kva = 0;
@@ -441,15 +443,23 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
441 struct c2_mr *c2mr; 443 struct c2_mr *c2mr;
442 444
443 pr_debug("%s:%u\n", __FUNCTION__, __LINE__); 445 pr_debug("%s:%u\n", __FUNCTION__, __LINE__);
444 shift = ffs(region->page_size) - 1;
445 446
446 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL); 447 c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
447 if (!c2mr) 448 if (!c2mr)
448 return ERR_PTR(-ENOMEM); 449 return ERR_PTR(-ENOMEM);
449 c2mr->pd = c2pd; 450 c2mr->pd = c2pd;
450 451
452 c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
453 if (IS_ERR(c2mr->umem)) {
454 err = PTR_ERR(c2mr->umem);
455 kfree(c2mr);
456 return ERR_PTR(err);
457 }
458
459 shift = ffs(c2mr->umem->page_size) - 1;
460
451 n = 0; 461 n = 0;
452 list_for_each_entry(chunk, &region->chunk_list, list) 462 list_for_each_entry(chunk, &c2mr->umem->chunk_list, list)
453 n += chunk->nents; 463 n += chunk->nents;
454 464
455 pages = kmalloc(n * sizeof(u64), GFP_KERNEL); 465 pages = kmalloc(n * sizeof(u64), GFP_KERNEL);
@@ -459,35 +469,34 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
459 } 469 }
460 470
461 i = 0; 471 i = 0;
462 list_for_each_entry(chunk, &region->chunk_list, list) { 472 list_for_each_entry(chunk, &c2mr->umem->chunk_list, list) {
463 for (j = 0; j < chunk->nmap; ++j) { 473 for (j = 0; j < chunk->nmap; ++j) {
464 len = sg_dma_len(&chunk->page_list[j]) >> shift; 474 len = sg_dma_len(&chunk->page_list[j]) >> shift;
465 for (k = 0; k < len; ++k) { 475 for (k = 0; k < len; ++k) {
466 pages[i++] = 476 pages[i++] =
467 sg_dma_address(&chunk->page_list[j]) + 477 sg_dma_address(&chunk->page_list[j]) +
468 (region->page_size * k); 478 (c2mr->umem->page_size * k);
469 } 479 }
470 } 480 }
471 } 481 }
472 482
473 kva = (u64)region->virt_base; 483 kva = virt;
474 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), 484 err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
475 pages, 485 pages,
476 region->page_size, 486 c2mr->umem->page_size,
477 i, 487 i,
478 region->length, 488 length,
479 region->offset, 489 c2mr->umem->offset,
480 &kva, 490 &kva,
481 c2_convert_access(acc), 491 c2_convert_access(acc),
482 c2mr); 492 c2mr);
483 kfree(pages); 493 kfree(pages);
484 if (err) { 494 if (err)
485 kfree(c2mr); 495 goto err;
486 return ERR_PTR(err);
487 }
488 return &c2mr->ibmr; 496 return &c2mr->ibmr;
489 497
490err: 498err:
499 ib_umem_release(c2mr->umem);
491 kfree(c2mr); 500 kfree(c2mr);
492 return ERR_PTR(err); 501 return ERR_PTR(err);
493} 502}
@@ -502,8 +511,11 @@ static int c2_dereg_mr(struct ib_mr *ib_mr)
502 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey); 511 err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
503 if (err) 512 if (err)
504 pr_debug("c2_stag_dealloc failed: %d\n", err); 513 pr_debug("c2_stag_dealloc failed: %d\n", err);
505 else 514 else {
515 if (mr->umem)
516 ib_umem_release(mr->umem);
506 kfree(mr); 517 kfree(mr);
518 }
507 519
508 return err; 520 return err;
509} 521}