aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mem.c
diff options
context:
space:
mode:
authorYishai Hadas <yishaih@mellanox.com>2014-01-28 06:40:15 -0500
committerRoland Dreier <roland@purestorage.com>2014-03-04 13:34:28 -0500
commiteeb8461e36c99fdf2d058751be924a2aab215005 (patch)
treec92498349f842be5985194c840e2dd12201df861 /drivers/infiniband/hw/mlx5/mem.c
parentcfbf8d4857c26a8a307fb7cd258074c9dcd8c691 (diff)
IB: Refactor umem to use linear SG table
This patch refactors the IB core umem code and vendor drivers to use a linear (chained) SG table instead of chunk list. With this change the relevant code becomes clearer—no need for nested loops to build and use umem. Signed-off-by: Shachar Raindel <raindel@mellanox.com> Signed-off-by: Yishai Hadas <yishaih@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mem.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 3a5322870b96..8499aec94db6 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -44,16 +44,17 @@
44void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift, 44void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
45 int *ncont, int *order) 45 int *ncont, int *order)
46{ 46{
47 struct ib_umem_chunk *chunk;
48 unsigned long tmp; 47 unsigned long tmp;
49 unsigned long m; 48 unsigned long m;
50 int i, j, k; 49 int i, k;
51 u64 base = 0; 50 u64 base = 0;
52 int p = 0; 51 int p = 0;
53 int skip; 52 int skip;
54 int mask; 53 int mask;
55 u64 len; 54 u64 len;
56 u64 pfn; 55 u64 pfn;
56 struct scatterlist *sg;
57 int entry;
57 58
58 addr = addr >> PAGE_SHIFT; 59 addr = addr >> PAGE_SHIFT;
59 tmp = (unsigned long)addr; 60 tmp = (unsigned long)addr;
@@ -61,32 +62,31 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift,
61 skip = 1 << m; 62 skip = 1 << m;
62 mask = skip - 1; 63 mask = skip - 1;
63 i = 0; 64 i = 0;
64 list_for_each_entry(chunk, &umem->chunk_list, list) 65 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
65 for (j = 0; j < chunk->nmap; j++) { 66 len = sg_dma_len(sg) >> PAGE_SHIFT;
66 len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; 67 pfn = sg_dma_address(sg) >> PAGE_SHIFT;
67 pfn = sg_dma_address(&chunk->page_list[j]) >> PAGE_SHIFT; 68 for (k = 0; k < len; k++) {
68 for (k = 0; k < len; k++) { 69 if (!(i & mask)) {
69 if (!(i & mask)) { 70 tmp = (unsigned long)pfn;
70 tmp = (unsigned long)pfn; 71 m = min(m, find_first_bit(&tmp, sizeof(tmp)));
71 m = min(m, find_first_bit(&tmp, sizeof(tmp))); 72 skip = 1 << m;
73 mask = skip - 1;
74 base = pfn;
75 p = 0;
76 } else {
77 if (base + p != pfn) {
78 tmp = (unsigned long)p;
79 m = find_first_bit(&tmp, sizeof(tmp));
72 skip = 1 << m; 80 skip = 1 << m;
73 mask = skip - 1; 81 mask = skip - 1;
74 base = pfn; 82 base = pfn;
75 p = 0; 83 p = 0;
76 } else {
77 if (base + p != pfn) {
78 tmp = (unsigned long)p;
79 m = find_first_bit(&tmp, sizeof(tmp));
80 skip = 1 << m;
81 mask = skip - 1;
82 base = pfn;
83 p = 0;
84 }
85 } 84 }
86 p++;
87 i++;
88 } 85 }
86 p++;
87 i++;
89 } 88 }
89 }
90 90
91 if (i) { 91 if (i) {
92 m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m); 92 m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
@@ -112,32 +112,32 @@ void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
112{ 112{
113 int shift = page_shift - PAGE_SHIFT; 113 int shift = page_shift - PAGE_SHIFT;
114 int mask = (1 << shift) - 1; 114 int mask = (1 << shift) - 1;
115 struct ib_umem_chunk *chunk; 115 int i, k;
116 int i, j, k;
117 u64 cur = 0; 116 u64 cur = 0;
118 u64 base; 117 u64 base;
119 int len; 118 int len;
119 struct scatterlist *sg;
120 int entry;
120 121
121 i = 0; 122 i = 0;
122 list_for_each_entry(chunk, &umem->chunk_list, list) 123 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
123 for (j = 0; j < chunk->nmap; j++) { 124 len = sg_dma_len(sg) >> PAGE_SHIFT;
124 len = sg_dma_len(&chunk->page_list[j]) >> PAGE_SHIFT; 125 base = sg_dma_address(sg);
125 base = sg_dma_address(&chunk->page_list[j]); 126 for (k = 0; k < len; k++) {
126 for (k = 0; k < len; k++) { 127 if (!(i & mask)) {
127 if (!(i & mask)) { 128 cur = base + (k << PAGE_SHIFT);
128 cur = base + (k << PAGE_SHIFT); 129 if (umr)
129 if (umr) 130 cur |= 3;
130 cur |= 3;
131 131
132 pas[i >> shift] = cpu_to_be64(cur); 132 pas[i >> shift] = cpu_to_be64(cur);
133 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n", 133 mlx5_ib_dbg(dev, "pas[%d] 0x%llx\n",
134 i >> shift, be64_to_cpu(pas[i >> shift])); 134 i >> shift, be64_to_cpu(pas[i >> shift]));
135 } else 135 } else
136 mlx5_ib_dbg(dev, "=====> 0x%llx\n", 136 mlx5_ib_dbg(dev, "=====> 0x%llx\n",
137 base + (k << PAGE_SHIFT)); 137 base + (k << PAGE_SHIFT));
138 i++; 138 i++;
139 }
140 } 139 }
140 }
141} 141}
142 142
143int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset) 143int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)