aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/umem.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_dma.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c24
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c8
5 files changed, 29 insertions, 20 deletions
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 2f54e29dc7a6..14159ff29408 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -55,9 +55,11 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
55 ib_dma_unmap_sg(dev, chunk->page_list, 55 ib_dma_unmap_sg(dev, chunk->page_list,
56 chunk->nents, DMA_BIDIRECTIONAL); 56 chunk->nents, DMA_BIDIRECTIONAL);
57 for (i = 0; i < chunk->nents; ++i) { 57 for (i = 0; i < chunk->nents; ++i) {
58 struct page *page = sg_page(&chunk->page_list[i]);
59
58 if (umem->writable && dirty) 60 if (umem->writable && dirty)
59 set_page_dirty_lock(chunk->page_list[i].page); 61 set_page_dirty_lock(page);
60 put_page(chunk->page_list[i].page); 62 put_page(page);
61 } 63 }
62 64
63 kfree(chunk); 65 kfree(chunk);
@@ -164,11 +166,12 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
164 } 166 }
165 167
166 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK); 168 chunk->nents = min_t(int, ret, IB_UMEM_MAX_PAGE_CHUNK);
169 sg_init_table(chunk->page_list, chunk->nents);
167 for (i = 0; i < chunk->nents; ++i) { 170 for (i = 0; i < chunk->nents; ++i) {
168 if (vma_list && 171 if (vma_list &&
169 !is_vm_hugetlb_page(vma_list[i + off])) 172 !is_vm_hugetlb_page(vma_list[i + off]))
170 umem->hugetlb = 0; 173 umem->hugetlb = 0;
171 chunk->page_list[i].page = page_list[i + off]; 174 sg_set_page(&chunk->page_list[i], page_list[i + off]);
172 chunk->page_list[i].offset = 0; 175 chunk->page_list[i].offset = 0;
173 chunk->page_list[i].length = PAGE_SIZE; 176 chunk->page_list[i].length = PAGE_SIZE;
174 } 177 }
@@ -179,7 +182,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
179 DMA_BIDIRECTIONAL); 182 DMA_BIDIRECTIONAL);
180 if (chunk->nmap <= 0) { 183 if (chunk->nmap <= 0) {
181 for (i = 0; i < chunk->nents; ++i) 184 for (i = 0; i < chunk->nents; ++i)
182 put_page(chunk->page_list[i].page); 185 put_page(sg_page(&chunk->page_list[i]));
183 kfree(chunk); 186 kfree(chunk);
184 187
185 ret = -ENOMEM; 188 ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/ipath/ipath_dma.c b/drivers/infiniband/hw/ipath/ipath_dma.c
index 22709a4f8fc8..e90a0ea538a0 100644
--- a/drivers/infiniband/hw/ipath/ipath_dma.c
+++ b/drivers/infiniband/hw/ipath/ipath_dma.c
@@ -108,7 +108,7 @@ static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
108 BUG_ON(!valid_dma_direction(direction)); 108 BUG_ON(!valid_dma_direction(direction));
109 109
110 for_each_sg(sgl, sg, nents, i) { 110 for_each_sg(sgl, sg, nents, i) {
111 addr = (u64) page_address(sg->page); 111 addr = (u64) page_address(sg_page(sg));
112 /* TODO: handle highmem pages */ 112 /* TODO: handle highmem pages */
113 if (!addr) { 113 if (!addr) {
114 ret = 0; 114 ret = 0;
@@ -127,7 +127,7 @@ static void ipath_unmap_sg(struct ib_device *dev,
127 127
128static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg) 128static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
129{ 129{
130 u64 addr = (u64) page_address(sg->page); 130 u64 addr = (u64) page_address(sg_page(sg));
131 131
132 if (addr) 132 if (addr)
133 addr += sg->offset; 133 addr += sg->offset;
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c
index e442470a2375..db4ba92f79fc 100644
--- a/drivers/infiniband/hw/ipath/ipath_mr.c
+++ b/drivers/infiniband/hw/ipath/ipath_mr.c
@@ -225,7 +225,7 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
225 for (i = 0; i < chunk->nents; i++) { 225 for (i = 0; i < chunk->nents; i++) {
226 void *vaddr; 226 void *vaddr;
227 227
228 vaddr = page_address(chunk->page_list[i].page); 228 vaddr = page_address(sg_page(&chunk->page_list[i]));
229 if (!vaddr) { 229 if (!vaddr) {
230 ret = ERR_PTR(-EINVAL); 230 ret = ERR_PTR(-EINVAL);
231 goto bail; 231 goto bail;
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index e61f3e626980..007b38157fc4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
71 PCI_DMA_BIDIRECTIONAL); 71 PCI_DMA_BIDIRECTIONAL);
72 72
73 for (i = 0; i < chunk->npages; ++i) 73 for (i = 0; i < chunk->npages; ++i)
74 __free_pages(chunk->mem[i].page, 74 __free_pages(sg_page(&chunk->mem[i]),
75 get_order(chunk->mem[i].length)); 75 get_order(chunk->mem[i].length));
76} 76}
77 77
@@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun
81 81
82 for (i = 0; i < chunk->npages; ++i) { 82 for (i = 0; i < chunk->npages; ++i) {
83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
84 lowmem_page_address(chunk->mem[i].page), 84 lowmem_page_address(sg_page(&chunk->mem[i])),
85 sg_dma_address(&chunk->mem[i])); 85 sg_dma_address(&chunk->mem[i]));
86 } 86 }
87} 87}
@@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
107 107
108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
109{ 109{
110 mem->page = alloc_pages(gfp_mask, order); 110 struct page *page;
111 if (!mem->page) 111
112 page = alloc_pages(gfp_mask, order);
113 if (!page)
112 return -ENOMEM; 114 return -ENOMEM;
113 115
116 sg_set_page(mem, page);
114 mem->length = PAGE_SIZE << order; 117 mem->length = PAGE_SIZE << order;
115 mem->offset = 0; 118 mem->offset = 0;
116 return 0; 119 return 0;
@@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
157 if (!chunk) 160 if (!chunk)
158 goto fail; 161 goto fail;
159 162
163 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
160 chunk->npages = 0; 164 chunk->npages = 0;
161 chunk->nsg = 0; 165 chunk->nsg = 0;
162 list_add_tail(&chunk->list, &icm->chunk_list); 166 list_add_tail(&chunk->list, &icm->chunk_list);
@@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h
304 * so if we found the page, dma_handle has already 308 * so if we found the page, dma_handle has already
305 * been assigned to. */ 309 * been assigned to. */
306 if (chunk->mem[i].length > offset) { 310 if (chunk->mem[i].length > offset) {
307 page = chunk->mem[i].page; 311 page = sg_page(&chunk->mem[i]);
308 goto out; 312 goto out;
309 } 313 }
310 offset -= chunk->mem[i].length; 314 offset -= chunk->mem[i].length;
@@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
445int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 449int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
446 struct mthca_user_db_table *db_tab, int index, u64 uaddr) 450 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
447{ 451{
452 struct page *pages[1];
448 int ret = 0; 453 int ret = 0;
449 u8 status; 454 u8 status;
450 int i; 455 int i;
@@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
472 } 477 }
473 478
474 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, 479 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
475 &db_tab->page[i].mem.page, NULL); 480 pages, NULL);
476 if (ret < 0) 481 if (ret < 0)
477 goto out; 482 goto out;
478 483
484 sg_set_page(&db_tab->page[i].mem, pages[0]);
479 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; 485 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
480 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; 486 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
481 487
482 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 488 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
483 if (ret < 0) { 489 if (ret < 0) {
484 put_page(db_tab->page[i].mem.page); 490 put_page(pages[0]);
485 goto out; 491 goto out;
486 } 492 }
487 493
@@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
491 ret = -EINVAL; 497 ret = -EINVAL;
492 if (ret) { 498 if (ret) {
493 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 499 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
494 put_page(db_tab->page[i].mem.page); 500 put_page(sg_page(&db_tab->page[i].mem));
495 goto out; 501 goto out;
496 } 502 }
497 503
@@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
557 if (db_tab->page[i].uvirt) { 563 if (db_tab->page[i].uvirt) {
558 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 564 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
559 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 565 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
560 put_page(db_tab->page[i].mem.page); 566 put_page(sg_page(&db_tab->page[i].mem));
561 } 567 }
562 } 568 }
563 569
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index f3529b6f0a33..d68798061795 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -131,7 +131,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
131 131
132 p = mem; 132 p = mem;
133 for_each_sg(sgl, sg, data->size, i) { 133 for_each_sg(sgl, sg, data->size, i) {
134 from = kmap_atomic(sg->page, KM_USER0); 134 from = kmap_atomic(sg_page(sg), KM_USER0);
135 memcpy(p, 135 memcpy(p,
136 from + sg->offset, 136 from + sg->offset,
137 sg->length); 137 sg->length);
@@ -191,7 +191,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_cmd_task *iser_ctask,
191 191
192 p = mem; 192 p = mem;
193 for_each_sg(sgl, sg, sg_size, i) { 193 for_each_sg(sgl, sg, sg_size, i) {
194 to = kmap_atomic(sg->page, KM_SOFTIRQ0); 194 to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
195 memcpy(to + sg->offset, 195 memcpy(to + sg->offset,
196 p, 196 p,
197 sg->length); 197 sg->length);
@@ -300,7 +300,7 @@ static unsigned int iser_data_buf_aligned_len(struct iser_data_buf *data,
300 for_each_sg(sgl, sg, data->dma_nents, i) { 300 for_each_sg(sgl, sg, data->dma_nents, i) {
301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX " 301 /* iser_dbg("Checking sg iobuf [%d]: phys=0x%08lX "
302 "offset: %ld sz: %ld\n", i, 302 "offset: %ld sz: %ld\n", i,
303 (unsigned long)page_to_phys(sg->page), 303 (unsigned long)sg_phys(sg),
304 (unsigned long)sg->offset, 304 (unsigned long)sg->offset,
305 (unsigned long)sg->length); */ 305 (unsigned long)sg->length); */
306 end_addr = ib_sg_dma_address(ibdev, sg) + 306 end_addr = ib_sg_dma_address(ibdev, sg) +
@@ -336,7 +336,7 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
336 iser_err("sg[%d] dma_addr:0x%lX page:0x%p " 336 iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
337 "off:0x%x sz:0x%x dma_len:0x%x\n", 337 "off:0x%x sz:0x%x dma_len:0x%x\n",
338 i, (unsigned long)ib_sg_dma_address(ibdev, sg), 338 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
339 sg->page, sg->offset, 339 sg_page(sg), sg->offset,
340 sg->length, ib_sg_dma_len(ibdev, sg)); 340 sg->length, ib_sg_dma_len(ibdev, sg));
341} 341}
342 342