aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/uverbs_mem.c
diff options
context:
space:
mode:
authorRalph Campbell <ralph.campbell@qlogic.com>2006-12-12 17:28:30 -0500
committerRoland Dreier <rolandd@cisco.com>2006-12-12 17:28:30 -0500
commit1527106ff8cf6afb15f68c8820605a0d32263173 (patch)
treeeda9162aca8ffb1acddb6c86f561c40dfe69dd33 /drivers/infiniband/core/uverbs_mem.c
parentf2cbb660ed37294e3eeb98c045de6890079ccb01 (diff)
IB/core: Use the new verbs DMA mapping functions
Convert code in core/ to use the new DMA mapping functions for kernel verbs consumers. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/core/uverbs_mem.c')
-rw-r--r--drivers/infiniband/core/uverbs_mem.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c
index db12cc0841d..c95fe952abd 100644
--- a/drivers/infiniband/core/uverbs_mem.c
+++ b/drivers/infiniband/core/uverbs_mem.c
@@ -52,8 +52,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
52 int i; 52 int i;
53 53
54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) { 54 list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
55 dma_unmap_sg(dev->dma_device, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 if (umem->writable && dirty) 58 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 59 set_page_dirty_lock(chunk->page_list[i].page);
@@ -136,10 +136,10 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
136 chunk->page_list[i].length = PAGE_SIZE; 136 chunk->page_list[i].length = PAGE_SIZE;
137 } 137 }
138 138
139 chunk->nmap = dma_map_sg(dev->dma_device, 139 chunk->nmap = ib_dma_map_sg(dev,
140 &chunk->page_list[0], 140 &chunk->page_list[0],
141 chunk->nents, 141 chunk->nents,
142 DMA_BIDIRECTIONAL); 142 DMA_BIDIRECTIONAL);
143 if (chunk->nmap <= 0) { 143 if (chunk->nmap <= 0) {
144 for (i = 0; i < chunk->nents; ++i) 144 for (i = 0; i < chunk->nents; ++i)
145 put_page(chunk->page_list[i].page); 145 put_page(chunk->page_list[i].page);