aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/umem.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-10-22 15:19:53 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-10-22 15:19:53 -0400
commit45711f1af6eff1a6d010703b4862e0d2b9afd056 (patch)
tree3d0048f46e3df9d217d56127462ebe680348bd5a /drivers/infiniband/core/umem.c
parent78c2f0b8c285c5305b3e67b0595200541e15eb43 (diff)
[SG] Update drivers to use sg helpers
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/infiniband/core/umem.c')
-rw-r--r--drivers/infiniband/core/umem.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2f54e29dc7a6..14159ff29408 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
55 ib_dma_unmap_sg(dev, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 struct page *page = sg_page(&chunk->page_list[i]);
59
58 if (umem->writable && dirty) 60 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 61 set_page_dirty_lock(page);
60 put_page(chunk->page_list[i].page); 62 put_page(page);
61 } 63 }
62 64
63 kfree(chunk); 65 kfree(chunk);
@@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
164 } 166 }
165 167
166 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); 168 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
169 sg_init_table(chunk->page_list, chunk->nents);
167 for (i = 0; i < chunk->nents; ++i) { 170 for (i = 0; i < chunk->nents; ++i) {
168 if (vma_list && 171 if (vma_list &&
169 !is_vm_hugetlb_page(vma_list[i + off])) 172 !is_vm_hugetlb_page(vma_list[i + off]))
170 umem->hugetlb = 0; 173 umem->hugetlb = 0;
171 chunk->page_list[i].page = page_list[i + off]; 174 sg_set_page(&chunk->page_list[i], page_list[i + off]);
172 chunk->page_list[i].offset = 0; 175 chunk->page_list[i].offset = 0;
173 chunk->page_list[i].length = PAGE_SIZE; 176 chunk->page_list[i].length = PAGE_SIZE;
174 } 177 }
@@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
179 DMA_BIDIRECTIONAL); 182 DMA_BIDIRECTIONAL);
180 if (chunk->nmap <= 0) { 183 if (chunk->nmap <= 0) {
181 for (i = 0; i < chunk->nents; ++i) 184 for (i = 0; i < chunk->nents; ++i)
182 put_page(chunk->page_list[i].page); 185 put_page(sg_page(&chunk->page_list[i]));
183 kfree(chunk); 186 kfree(chunk);
184 187
185 ret = -ENOMEM; 188 ret = -ENOMEM;