diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-22 15:19:53 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-22 15:19:53 -0400 |
commit | 45711f1af6eff1a6d010703b4862e0d2b9afd056 (patch) | |
tree | 3d0048f46e3df9d217d56127462ebe680348bd5a /drivers/net/mlx4/icm.c | |
parent | 78c2f0b8c285c5305b3e67b0595200541e15eb43 (diff) |
[SG] Update drivers to use sg helpers
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/net/mlx4/icm.c')
-rw-r--r-- | drivers/net/mlx4/icm.c | 14 |
1 files changed, 9 insertions, 5 deletions
diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c index 4b3c109d5eae..887633b207d9 100644 --- a/drivers/net/mlx4/icm.c +++ b/drivers/net/mlx4/icm.c | |||
@@ -60,7 +60,7 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu | |||
60 | PCI_DMA_BIDIRECTIONAL); | 60 | PCI_DMA_BIDIRECTIONAL); |
61 | 61 | ||
62 | for (i = 0; i < chunk->npages; ++i) | 62 | for (i = 0; i < chunk->npages; ++i) |
63 | __free_pages(chunk->mem[i].page, | 63 | __free_pages(sg_page(&chunk->mem[i]), |
64 | get_order(chunk->mem[i].length)); | 64 | get_order(chunk->mem[i].length)); |
65 | } | 65 | } |
66 | 66 | ||
@@ -70,7 +70,7 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk * | |||
70 | 70 | ||
71 | for (i = 0; i < chunk->npages; ++i) | 71 | for (i = 0; i < chunk->npages; ++i) |
72 | dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, | 72 | dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, |
73 | lowmem_page_address(chunk->mem[i].page), | 73 | lowmem_page_address(sg_page(&chunk->mem[i])), |
74 | sg_dma_address(&chunk->mem[i])); | 74 | sg_dma_address(&chunk->mem[i])); |
75 | } | 75 | } |
76 | 76 | ||
@@ -95,10 +95,13 @@ void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent) | |||
95 | 95 | ||
96 | static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) | 96 | static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) |
97 | { | 97 | { |
98 | mem->page = alloc_pages(gfp_mask, order); | 98 | struct page *page; |
99 | if (!mem->page) | 99 | |
100 | page = alloc_pages(gfp_mask, order); | ||
101 | if (!page) | ||
100 | return -ENOMEM; | 102 | return -ENOMEM; |
101 | 103 | ||
104 | sg_set_page(mem, page); | ||
102 | mem->length = PAGE_SIZE << order; | 105 | mem->length = PAGE_SIZE << order; |
103 | mem->offset = 0; | 106 | mem->offset = 0; |
104 | return 0; | 107 | return 0; |
@@ -145,6 +148,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, | |||
145 | if (!chunk) | 148 | if (!chunk) |
146 | goto fail; | 149 | goto fail; |
147 | 150 | ||
151 | sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN); | ||
148 | chunk->npages = 0; | 152 | chunk->npages = 0; |
149 | chunk->nsg = 0; | 153 | chunk->nsg = 0; |
150 | list_add_tail(&chunk->list, &icm->chunk_list); | 154 | list_add_tail(&chunk->list, &icm->chunk_list); |
@@ -334,7 +338,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han | |||
334 | * been assigned to. | 338 | * been assigned to. |
335 | */ | 339 | */ |
336 | if (chunk->mem[i].length > offset) { | 340 | if (chunk->mem[i].length > offset) { |
337 | page = chunk->mem[i].page; | 341 | page = sg_page(&chunk->mem[i]); |
338 | goto out; | 342 | goto out; |
339 | } | 343 | } |
340 | offset -= chunk->mem[i].length; | 344 | offset -= chunk->mem[i].length; |