aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-10-23 16:28:39 -0400
committerDavid S. Miller <davem@davemloft.net>2013-10-23 16:49:34 -0400
commitc3fa32b9764dc45dcf8a2231b1c110abc4a63e0b (patch)
tree6cf2896a77b65bec64284681e1c3851eb3263e09 /drivers/infiniband/hw/mlx5/mr.c
parent34d92d5315b64a3e5292b7e9511c1bb617227fb6 (diff)
parent320437af954cbe66478f1f5e8b34cb5a8d072191 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/usb/qmi_wwan.c include/net/dst.h Trivial merge conflicts, both were overlapping changes. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c70
1 files changed, 33 insertions, 37 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bd41df95b6f0..3453580b1eb2 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -42,6 +42,10 @@ enum {
42 DEF_CACHE_SIZE = 10, 42 DEF_CACHE_SIZE = 10,
43}; 43};
44 44
45enum {
46 MLX5_UMR_ALIGN = 2048
47};
48
45static __be64 *mr_align(__be64 *ptr, int align) 49static __be64 *mr_align(__be64 *ptr, int align)
46{ 50{
47 unsigned long mask = align - 1; 51 unsigned long mask = align - 1;
@@ -61,13 +65,11 @@ static int order2idx(struct mlx5_ib_dev *dev, int order)
61 65
62static int add_keys(struct mlx5_ib_dev *dev, int c, int num) 66static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
63{ 67{
64 struct device *ddev = dev->ib_dev.dma_device;
65 struct mlx5_mr_cache *cache = &dev->cache; 68 struct mlx5_mr_cache *cache = &dev->cache;
66 struct mlx5_cache_ent *ent = &cache->ent[c]; 69 struct mlx5_cache_ent *ent = &cache->ent[c];
67 struct mlx5_create_mkey_mbox_in *in; 70 struct mlx5_create_mkey_mbox_in *in;
68 struct mlx5_ib_mr *mr; 71 struct mlx5_ib_mr *mr;
69 int npages = 1 << ent->order; 72 int npages = 1 << ent->order;
70 int size = sizeof(u64) * npages;
71 int err = 0; 73 int err = 0;
72 int i; 74 int i;
73 75
@@ -83,21 +85,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
83 } 85 }
84 mr->order = ent->order; 86 mr->order = ent->order;
85 mr->umred = 1; 87 mr->umred = 1;
86 mr->pas = kmalloc(size + 0x3f, GFP_KERNEL);
87 if (!mr->pas) {
88 kfree(mr);
89 err = -ENOMEM;
90 goto out;
91 }
92 mr->dma = dma_map_single(ddev, mr_align(mr->pas, 0x40), size,
93 DMA_TO_DEVICE);
94 if (dma_mapping_error(ddev, mr->dma)) {
95 kfree(mr->pas);
96 kfree(mr);
97 err = -ENOMEM;
98 goto out;
99 }
100
101 in->seg.status = 1 << 6; 88 in->seg.status = 1 << 6;
102 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2); 89 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
103 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); 90 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
@@ -108,8 +95,6 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
108 sizeof(*in)); 95 sizeof(*in));
109 if (err) { 96 if (err) {
110 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 97 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
111 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
112 kfree(mr->pas);
113 kfree(mr); 98 kfree(mr);
114 goto out; 99 goto out;
115 } 100 }
@@ -129,11 +114,9 @@ out:
129 114
130static void remove_keys(struct mlx5_ib_dev *dev, int c, int num) 115static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
131{ 116{
132 struct device *ddev = dev->ib_dev.dma_device;
133 struct mlx5_mr_cache *cache = &dev->cache; 117 struct mlx5_mr_cache *cache = &dev->cache;
134 struct mlx5_cache_ent *ent = &cache->ent[c]; 118 struct mlx5_cache_ent *ent = &cache->ent[c];
135 struct mlx5_ib_mr *mr; 119 struct mlx5_ib_mr *mr;
136 int size;
137 int err; 120 int err;
138 int i; 121 int i;
139 122
@@ -149,14 +132,10 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
149 ent->size--; 132 ent->size--;
150 spin_unlock(&ent->lock); 133 spin_unlock(&ent->lock);
151 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 134 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
152 if (err) { 135 if (err)
153 mlx5_ib_warn(dev, "failed destroy mkey\n"); 136 mlx5_ib_warn(dev, "failed destroy mkey\n");
154 } else { 137 else
155 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
156 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
157 kfree(mr->pas);
158 kfree(mr); 138 kfree(mr);
159 }
160 } 139 }
161} 140}
162 141
@@ -408,13 +387,12 @@ static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
408 387
409static void clean_keys(struct mlx5_ib_dev *dev, int c) 388static void clean_keys(struct mlx5_ib_dev *dev, int c)
410{ 389{
411 struct device *ddev = dev->ib_dev.dma_device;
412 struct mlx5_mr_cache *cache = &dev->cache; 390 struct mlx5_mr_cache *cache = &dev->cache;
413 struct mlx5_cache_ent *ent = &cache->ent[c]; 391 struct mlx5_cache_ent *ent = &cache->ent[c];
414 struct mlx5_ib_mr *mr; 392 struct mlx5_ib_mr *mr;
415 int size;
416 int err; 393 int err;
417 394
395 cancel_delayed_work(&ent->dwork);
418 while (1) { 396 while (1) {
419 spin_lock(&ent->lock); 397 spin_lock(&ent->lock);
420 if (list_empty(&ent->head)) { 398 if (list_empty(&ent->head)) {
@@ -427,14 +405,10 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
427 ent->size--; 405 ent->size--;
428 spin_unlock(&ent->lock); 406 spin_unlock(&ent->lock);
429 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr); 407 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
430 if (err) { 408 if (err)
431 mlx5_ib_warn(dev, "failed destroy mkey\n"); 409 mlx5_ib_warn(dev, "failed destroy mkey\n");
432 } else { 410 else
433 size = ALIGN(sizeof(u64) * (1 << mr->order), 0x40);
434 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
435 kfree(mr->pas);
436 kfree(mr); 411 kfree(mr);
437 }
438 } 412 }
439} 413}
440 414
@@ -540,13 +514,15 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
540 int i; 514 int i;
541 515
542 dev->cache.stopped = 1; 516 dev->cache.stopped = 1;
543 destroy_workqueue(dev->cache.wq); 517 flush_workqueue(dev->cache.wq);
544 518
545 mlx5_mr_cache_debugfs_cleanup(dev); 519 mlx5_mr_cache_debugfs_cleanup(dev);
546 520
547 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) 521 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
548 clean_keys(dev, i); 522 clean_keys(dev, i);
549 523
524 destroy_workqueue(dev->cache.wq);
525
550 return 0; 526 return 0;
551} 527}
552 528
@@ -675,10 +651,12 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
675 int page_shift, int order, int access_flags) 651 int page_shift, int order, int access_flags)
676{ 652{
677 struct mlx5_ib_dev *dev = to_mdev(pd->device); 653 struct mlx5_ib_dev *dev = to_mdev(pd->device);
654 struct device *ddev = dev->ib_dev.dma_device;
678 struct umr_common *umrc = &dev->umrc; 655 struct umr_common *umrc = &dev->umrc;
679 struct ib_send_wr wr, *bad; 656 struct ib_send_wr wr, *bad;
680 struct mlx5_ib_mr *mr; 657 struct mlx5_ib_mr *mr;
681 struct ib_sge sg; 658 struct ib_sge sg;
659 int size = sizeof(u64) * npages;
682 int err; 660 int err;
683 int i; 661 int i;
684 662
@@ -697,7 +675,22 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
697 if (!mr) 675 if (!mr)
698 return ERR_PTR(-EAGAIN); 676 return ERR_PTR(-EAGAIN);
699 677
700 mlx5_ib_populate_pas(dev, umem, page_shift, mr_align(mr->pas, 0x40), 1); 678 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
679 if (!mr->pas) {
680 err = -ENOMEM;
681 goto error;
682 }
683
684 mlx5_ib_populate_pas(dev, umem, page_shift,
685 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
686
687 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
688 DMA_TO_DEVICE);
689 if (dma_mapping_error(ddev, mr->dma)) {
690 kfree(mr->pas);
691 err = -ENOMEM;
692 goto error;
693 }
701 694
702 memset(&wr, 0, sizeof(wr)); 695 memset(&wr, 0, sizeof(wr));
703 wr.wr_id = (u64)(unsigned long)mr; 696 wr.wr_id = (u64)(unsigned long)mr;
@@ -718,6 +711,9 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
718 wait_for_completion(&mr->done); 711 wait_for_completion(&mr->done);
719 up(&umrc->sem); 712 up(&umrc->sem);
720 713
714 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
715 kfree(mr->pas);
716
721 if (mr->status != IB_WC_SUCCESS) { 717 if (mr->status != IB_WC_SUCCESS) {
722 mlx5_ib_warn(dev, "reg umr failed\n"); 718 mlx5_ib_warn(dev, "reg umr failed\n");
723 err = -EFAULT; 719 err = -EFAULT;