aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/staging/rdma/hfi1/keys.c55
-rw-r--r--drivers/staging/rdma/hfi1/mr.c33
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c9
-rw-r--r--drivers/staging/rdma/hfi1/verbs.h8
4 files changed, 3 insertions, 102 deletions
diff --git a/drivers/staging/rdma/hfi1/keys.c b/drivers/staging/rdma/hfi1/keys.c
index 82c21b1c0263..cb4e6087dfdb 100644
--- a/drivers/staging/rdma/hfi1/keys.c
+++ b/drivers/staging/rdma/hfi1/keys.c
@@ -354,58 +354,3 @@ bail:
354 rcu_read_unlock(); 354 rcu_read_unlock();
355 return 0; 355 return 0;
356} 356}
357
358/*
359 * Initialize the memory region specified by the work request.
360 */
361int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_fast_reg_wr *wr)
362{
363 struct hfi1_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
364 struct hfi1_pd *pd = to_ipd(qp->ibqp.pd);
365 struct hfi1_mregion *mr;
366 u32 rkey = wr->rkey;
367 unsigned i, n, m;
368 int ret = -EINVAL;
369 unsigned long flags;
370 u64 *page_list;
371 size_t ps;
372
373 spin_lock_irqsave(&rkt->lock, flags);
374 if (pd->user || rkey == 0)
375 goto bail;
376
377 mr = rcu_dereference_protected(
378 rkt->table[(rkey >> (32 - hfi1_lkey_table_size))],
379 lockdep_is_held(&rkt->lock));
380 if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd))
381 goto bail;
382
383 if (wr->page_list_len > mr->max_segs)
384 goto bail;
385
386 ps = 1UL << wr->page_shift;
387 if (wr->length > ps * wr->page_list_len)
388 goto bail;
389
390 mr->user_base = wr->iova_start;
391 mr->iova = wr->iova_start;
392 mr->lkey = rkey;
393 mr->length = wr->length;
394 mr->access_flags = wr->access_flags;
395 page_list = wr->page_list->page_list;
396 m = 0;
397 n = 0;
398 for (i = 0; i < wr->page_list_len; i++) {
399 mr->map[m]->segs[n].vaddr = (void *) page_list[i];
400 mr->map[m]->segs[n].length = ps;
401 if (++n == HFI1_SEGSZ) {
402 m++;
403 n = 0;
404 }
405 }
406
407 ret = 0;
408bail:
409 spin_unlock_irqrestore(&rkt->lock, flags);
410 return ret;
411}
diff --git a/drivers/staging/rdma/hfi1/mr.c b/drivers/staging/rdma/hfi1/mr.c
index bd64e4f986f9..402bd6414176 100644
--- a/drivers/staging/rdma/hfi1/mr.c
+++ b/drivers/staging/rdma/hfi1/mr.c
@@ -344,9 +344,10 @@ out:
344 344
345/* 345/*
346 * Allocate a memory region usable with the 346 * Allocate a memory region usable with the
347 * IB_WR_FAST_REG_MR send work request. 347 * IB_WR_REG_MR send work request.
348 * 348 *
349 * Return the memory region on success, otherwise return an errno. 349 * Return the memory region on success, otherwise return an errno.
350 * FIXME: IB_WR_REG_MR is not supported
350 */ 351 */
351struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd, 352struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
352 enum ib_mr_type mr_type, 353 enum ib_mr_type mr_type,
@@ -364,36 +365,6 @@ struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
364 return &mr->ibmr; 365 return &mr->ibmr;
365} 366}
366 367
367struct ib_fast_reg_page_list *
368hfi1_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
369{
370 unsigned size = page_list_len * sizeof(u64);
371 struct ib_fast_reg_page_list *pl;
372
373 if (size > PAGE_SIZE)
374 return ERR_PTR(-EINVAL);
375
376 pl = kzalloc(sizeof(*pl), GFP_KERNEL);
377 if (!pl)
378 return ERR_PTR(-ENOMEM);
379
380 pl->page_list = kzalloc(size, GFP_KERNEL);
381 if (!pl->page_list)
382 goto err_free;
383
384 return pl;
385
386err_free:
387 kfree(pl);
388 return ERR_PTR(-ENOMEM);
389}
390
391void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl)
392{
393 kfree(pl->page_list);
394 kfree(pl);
395}
396
397/** 368/**
398 * hfi1_alloc_fmr - allocate a fast memory region 369 * hfi1_alloc_fmr - allocate a fast memory region
399 * @pd: the protection domain for this memory region 370 * @pd: the protection domain for this memory region
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 981e6c1b79a3..6e2da7ee6d2f 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -380,9 +380,7 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
380 * undefined operations. 380 * undefined operations.
381 * Make sure buffer is large enough to hold the result for atomics. 381 * Make sure buffer is large enough to hold the result for atomics.
382 */ 382 */
383 if (wr->opcode == IB_WR_FAST_REG_MR) { 383 if (qp->ibqp.qp_type == IB_QPT_UC) {
384 return -EINVAL;
385 } else if (qp->ibqp.qp_type == IB_QPT_UC) {
386 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) 384 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
387 return -EINVAL; 385 return -EINVAL;
388 } else if (qp->ibqp.qp_type != IB_QPT_RC) { 386 } else if (qp->ibqp.qp_type != IB_QPT_RC) {
@@ -417,9 +415,6 @@ static int post_one_send(struct hfi1_qp *qp, struct ib_send_wr *wr)
417 if (qp->ibqp.qp_type != IB_QPT_UC && 415 if (qp->ibqp.qp_type != IB_QPT_UC &&
418 qp->ibqp.qp_type != IB_QPT_RC) 416 qp->ibqp.qp_type != IB_QPT_RC)
419 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr)); 417 memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
420 else if (wr->opcode == IB_WR_FAST_REG_MR)
421 memcpy(&wqe->fast_reg_wr, fast_reg_wr(wr),
422 sizeof(wqe->fast_reg_wr));
423 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM || 418 else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
424 wr->opcode == IB_WR_RDMA_WRITE || 419 wr->opcode == IB_WR_RDMA_WRITE ||
425 wr->opcode == IB_WR_RDMA_READ) 420 wr->opcode == IB_WR_RDMA_READ)
@@ -2065,8 +2060,6 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
2065 ibdev->reg_user_mr = hfi1_reg_user_mr; 2060 ibdev->reg_user_mr = hfi1_reg_user_mr;
2066 ibdev->dereg_mr = hfi1_dereg_mr; 2061 ibdev->dereg_mr = hfi1_dereg_mr;
2067 ibdev->alloc_mr = hfi1_alloc_mr; 2062 ibdev->alloc_mr = hfi1_alloc_mr;
2068 ibdev->alloc_fast_reg_page_list = hfi1_alloc_fast_reg_page_list;
2069 ibdev->free_fast_reg_page_list = hfi1_free_fast_reg_page_list;
2070 ibdev->alloc_fmr = hfi1_alloc_fmr; 2063 ibdev->alloc_fmr = hfi1_alloc_fmr;
2071 ibdev->map_phys_fmr = hfi1_map_phys_fmr; 2064 ibdev->map_phys_fmr = hfi1_map_phys_fmr;
2072 ibdev->unmap_fmr = hfi1_unmap_fmr; 2065 ibdev->unmap_fmr = hfi1_unmap_fmr;
diff --git a/drivers/staging/rdma/hfi1/verbs.h b/drivers/staging/rdma/hfi1/verbs.h
index cf5a3c956284..159ec08bfcd8 100644
--- a/drivers/staging/rdma/hfi1/verbs.h
+++ b/drivers/staging/rdma/hfi1/verbs.h
@@ -353,7 +353,6 @@ struct hfi1_swqe {
353 struct ib_rdma_wr rdma_wr; 353 struct ib_rdma_wr rdma_wr;
354 struct ib_atomic_wr atomic_wr; 354 struct ib_atomic_wr atomic_wr;
355 struct ib_ud_wr ud_wr; 355 struct ib_ud_wr ud_wr;
356 struct ib_fast_reg_wr fast_reg_wr;
357 }; 356 };
358 u32 psn; /* first packet sequence number */ 357 u32 psn; /* first packet sequence number */
359 u32 lpsn; /* last packet sequence number */ 358 u32 lpsn; /* last packet sequence number */
@@ -1026,13 +1025,6 @@ struct ib_mr *hfi1_alloc_mr(struct ib_pd *pd,
1026 enum ib_mr_type mr_type, 1025 enum ib_mr_type mr_type,
1027 u32 max_entries); 1026 u32 max_entries);
1028 1027
1029struct ib_fast_reg_page_list *hfi1_alloc_fast_reg_page_list(
1030 struct ib_device *ibdev, int page_list_len);
1031
1032void hfi1_free_fast_reg_page_list(struct ib_fast_reg_page_list *pl);
1033
1034int hfi1_fast_reg_mr(struct hfi1_qp *qp, struct ib_fast_reg_wr *wr);
1035
1036struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags, 1028struct ib_fmr *hfi1_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1037 struct ib_fmr_attr *fmr_attr); 1029 struct ib_fmr_attr *fmr_attr);
1038 1030