aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/icm.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:43:13 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-11 22:43:13 -0400
commitce9d3c9a6a9aef61525be07fe6ba27d937236aa2 (patch)
tree1b29bcb8f60fc6b59fa0d7b833cc733b8ebe17c9 /drivers/net/mlx4/icm.c
parent038a5008b2f395c85e6e71d6ddf3c684e7c405b0 (diff)
parent3d73c2884f45f9a297cbc956cea101405a9703f2 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (87 commits) mlx4_core: Fix section mismatches IPoIB: Allow setting policy to ignore multicast groups IB/mthca: Mark error paths as unlikely() in post_srq_recv functions IB/ipath: Minor fix to ordering of freeing and zeroing of tid pages. IB/ipath: Remove redundant link state checks IB/ipath: Fix IB_EVENT_PORT_ERR event IB/ipath: Better handling of unexpected GPIO interrupts IB/ipath: Maintain active time on all chips IB/ipath: Fix QHT7040 serial number check IB/ipath: Indicate a couple of chip bugs to userspace IB/ipath: iba6110 rev4 no longer needs recv header overrun workaround IB/ipath: Use counters in ipath_poll and cleanup interrupts in ipath_close IB/ipath: Remove duplicate copy of LMC IB/ipath: Add ability to set the LMC via the sysfs debugging interface IB/ipath: Optimize completion queue entry insertion and polling IB/ipath: Implement IB_EVENT_QP_LAST_WQE_REACHED IB/ipath: Generate flush CQE when QP is in error state IB/ipath: Remove redundant code IB/ipath: Future proof eeprom checksum code (contents reading) IB/ipath: UC RDMA WRITE with IMMEDIATE doesn't send the immediate ...
Diffstat (limited to 'drivers/net/mlx4/icm.c')
-rw-r--r--drivers/net/mlx4/icm.c134
1 files changed, 103 insertions, 31 deletions
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c
index b7a4aa8476f..4b3c109d5ea 100644
--- a/drivers/net/mlx4/icm.c
+++ b/drivers/net/mlx4/icm.c
@@ -34,6 +34,7 @@
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <linux/mm.h> 36#include <linux/mm.h>
37#include <linux/scatterlist.h>
37 38
38#include <linux/mlx4/cmd.h> 39#include <linux/mlx4/cmd.h>
39 40
@@ -50,19 +51,41 @@ enum {
50 MLX4_TABLE_CHUNK_SIZE = 1 << 18 51 MLX4_TABLE_CHUNK_SIZE = 1 << 18
51}; 52};
52 53
53void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm) 54static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
54{ 55{
55 struct mlx4_icm_chunk *chunk, *tmp;
56 int i; 56 int i;
57 57
58 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { 58 if (chunk->nsg > 0)
59 if (chunk->nsg > 0) 59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
60 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, 60 PCI_DMA_BIDIRECTIONAL);
61 PCI_DMA_BIDIRECTIONAL); 61
62 for (i = 0; i < chunk->npages; ++i)
63 __free_pages(chunk->mem[i].page,
64 get_order(chunk->mem[i].length));
65}
66
67static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
68{
69 int i;
70
71 for (i = 0; i < chunk->npages; ++i)
72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
73 lowmem_page_address(chunk->mem[i].page),
74 sg_dma_address(&chunk->mem[i]));
75}
76
77void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
78{
79 struct mlx4_icm_chunk *chunk, *tmp;
62 80
63 for (i = 0; i < chunk->npages; ++i) 81 if (!icm)
64 __free_pages(chunk->mem[i].page, 82 return;
65 get_order(chunk->mem[i].length)); 83
84 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
85 if (coherent)
86 mlx4_free_icm_coherent(dev, chunk);
87 else
88 mlx4_free_icm_pages(dev, chunk);
66 89
67 kfree(chunk); 90 kfree(chunk);
68 } 91 }
@@ -70,16 +93,45 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm)
70 kfree(icm); 93 kfree(icm);
71} 94}
72 95
96static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
97{
98 mem->page = alloc_pages(gfp_mask, order);
99 if (!mem->page)
100 return -ENOMEM;
101
102 mem->length = PAGE_SIZE << order;
103 mem->offset = 0;
104 return 0;
105}
106
107static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
108 int order, gfp_t gfp_mask)
109{
110 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
111 &sg_dma_address(mem), gfp_mask);
112 if (!buf)
113 return -ENOMEM;
114
115 sg_set_buf(mem, buf, PAGE_SIZE << order);
116 BUG_ON(mem->offset);
117 sg_dma_len(mem) = PAGE_SIZE << order;
118 return 0;
119}
120
73struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, 121struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
74 gfp_t gfp_mask) 122 gfp_t gfp_mask, int coherent)
75{ 123{
76 struct mlx4_icm *icm; 124 struct mlx4_icm *icm;
77 struct mlx4_icm_chunk *chunk = NULL; 125 struct mlx4_icm_chunk *chunk = NULL;
78 int cur_order; 126 int cur_order;
127 int ret;
128
129 /* We use sg_set_buf for coherent allocs, which assumes low memory */
130 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
79 131
80 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); 132 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
81 if (!icm) 133 if (!icm)
82 return icm; 134 return NULL;
83 135
84 icm->refcount = 0; 136 icm->refcount = 0;
85 INIT_LIST_HEAD(&icm->chunk_list); 137 INIT_LIST_HEAD(&icm->chunk_list);
@@ -101,12 +153,20 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
101 while (1 << cur_order > npages) 153 while (1 << cur_order > npages)
102 --cur_order; 154 --cur_order;
103 155
104 chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order); 156 if (coherent)
105 if (chunk->mem[chunk->npages].page) { 157 ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
106 chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order; 158 &chunk->mem[chunk->npages],
107 chunk->mem[chunk->npages].offset = 0; 159 cur_order, gfp_mask);
160 else
161 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
162 cur_order, gfp_mask);
163
164 if (!ret) {
165 ++chunk->npages;
108 166
109 if (++chunk->npages == MLX4_ICM_CHUNK_LEN) { 167 if (coherent)
168 ++chunk->nsg;
169 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
110 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 170 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
111 chunk->npages, 171 chunk->npages,
112 PCI_DMA_BIDIRECTIONAL); 172 PCI_DMA_BIDIRECTIONAL);
@@ -125,7 +185,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
125 } 185 }
126 } 186 }
127 187
128 if (chunk) { 188 if (!coherent && chunk) {
129 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, 189 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
130 chunk->npages, 190 chunk->npages,
131 PCI_DMA_BIDIRECTIONAL); 191 PCI_DMA_BIDIRECTIONAL);
@@ -137,7 +197,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
137 return icm; 197 return icm;
138 198
139fail: 199fail:
140 mlx4_free_icm(dev, icm); 200 mlx4_free_icm(dev, icm, coherent);
141 return NULL; 201 return NULL;
142} 202}
143 203
@@ -202,7 +262,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
202 262
203 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, 263 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
204 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 264 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
205 __GFP_NOWARN); 265 __GFP_NOWARN, table->coherent);
206 if (!table->icm[i]) { 266 if (!table->icm[i]) {
207 ret = -ENOMEM; 267 ret = -ENOMEM;
208 goto out; 268 goto out;
@@ -210,7 +270,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
210 270
211 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt + 271 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
212 (u64) i * MLX4_TABLE_CHUNK_SIZE)) { 272 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
213 mlx4_free_icm(dev, table->icm[i]); 273 mlx4_free_icm(dev, table->icm[i], table->coherent);
214 table->icm[i] = NULL; 274 table->icm[i] = NULL;
215 ret = -ENOMEM; 275 ret = -ENOMEM;
216 goto out; 276 goto out;
@@ -234,16 +294,16 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
234 if (--table->icm[i]->refcount == 0) { 294 if (--table->icm[i]->refcount == 0) {
235 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, 295 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
236 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 296 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
237 mlx4_free_icm(dev, table->icm[i]); 297 mlx4_free_icm(dev, table->icm[i], table->coherent);
238 table->icm[i] = NULL; 298 table->icm[i] = NULL;
239 } 299 }
240 300
241 mutex_unlock(&table->mutex); 301 mutex_unlock(&table->mutex);
242} 302}
243 303
244void *mlx4_table_find(struct mlx4_icm_table *table, int obj) 304void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
245{ 305{
246 int idx, offset, i; 306 int idx, offset, dma_offset, i;
247 struct mlx4_icm_chunk *chunk; 307 struct mlx4_icm_chunk *chunk;
248 struct mlx4_icm *icm; 308 struct mlx4_icm *icm;
249 struct page *page = NULL; 309 struct page *page = NULL;
@@ -253,15 +313,26 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj)
253 313
254 mutex_lock(&table->mutex); 314 mutex_lock(&table->mutex);
255 315
256 idx = obj & (table->num_obj - 1); 316 idx = (obj & (table->num_obj - 1)) * table->obj_size;
257 icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)]; 317 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
258 offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size); 318 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
259 319
260 if (!icm) 320 if (!icm)
261 goto out; 321 goto out;
262 322
263 list_for_each_entry(chunk, &icm->chunk_list, list) { 323 list_for_each_entry(chunk, &icm->chunk_list, list) {
264 for (i = 0; i < chunk->npages; ++i) { 324 for (i = 0; i < chunk->npages; ++i) {
325 if (dma_handle && dma_offset >= 0) {
326 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
327 *dma_handle = sg_dma_address(&chunk->mem[i]) +
328 dma_offset;
329 dma_offset -= sg_dma_len(&chunk->mem[i]);
330 }
331 /*
332 * DMA mapping can merge pages but not split them,
333 * so if we found the page, dma_handle has already
334 * been assigned to.
335 */
265 if (chunk->mem[i].length > offset) { 336 if (chunk->mem[i].length > offset) {
266 page = chunk->mem[i].page; 337 page = chunk->mem[i].page;
267 goto out; 338 goto out;
@@ -309,7 +380,7 @@ void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
309 380
310int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, 381int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
311 u64 virt, int obj_size, int nobj, int reserved, 382 u64 virt, int obj_size, int nobj, int reserved,
312 int use_lowmem) 383 int use_lowmem, int use_coherent)
313{ 384{
314 int obj_per_chunk; 385 int obj_per_chunk;
315 int num_icm; 386 int num_icm;
@@ -327,6 +398,7 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
327 table->num_obj = nobj; 398 table->num_obj = nobj;
328 table->obj_size = obj_size; 399 table->obj_size = obj_size;
329 table->lowmem = use_lowmem; 400 table->lowmem = use_lowmem;
401 table->coherent = use_coherent;
330 mutex_init(&table->mutex); 402 mutex_init(&table->mutex);
331 403
332 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { 404 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
@@ -336,11 +408,11 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
336 408
337 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, 409 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
338 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | 410 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
339 __GFP_NOWARN); 411 __GFP_NOWARN, use_coherent);
340 if (!table->icm[i]) 412 if (!table->icm[i])
341 goto err; 413 goto err;
342 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) { 414 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
343 mlx4_free_icm(dev, table->icm[i]); 415 mlx4_free_icm(dev, table->icm[i], use_coherent);
344 table->icm[i] = NULL; 416 table->icm[i] = NULL;
345 goto err; 417 goto err;
346 } 418 }
@@ -359,7 +431,7 @@ err:
359 if (table->icm[i]) { 431 if (table->icm[i]) {
360 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE, 432 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
361 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 433 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
362 mlx4_free_icm(dev, table->icm[i]); 434 mlx4_free_icm(dev, table->icm[i], use_coherent);
363 } 435 }
364 436
365 return -ENOMEM; 437 return -ENOMEM;
@@ -373,7 +445,7 @@ void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
373 if (table->icm[i]) { 445 if (table->icm[i]) {
374 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, 446 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
375 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); 447 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
376 mlx4_free_icm(dev, table->icm[i]); 448 mlx4_free_icm(dev, table->icm[i], table->coherent);
377 } 449 }
378 450
379 kfree(table->icm); 451 kfree(table->icm);