aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_memfree.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-10-22 15:19:53 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-10-22 15:19:53 -0400
commit45711f1af6eff1a6d010703b4862e0d2b9afd056 (patch)
tree3d0048f46e3df9d217d56127462ebe680348bd5a /drivers/infiniband/hw/mthca/mthca_memfree.c
parent78c2f0b8c285c5305b3e67b0595200541e15eb43 (diff)
[SG] Update drivers to use sg helpers
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_memfree.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index e61f3e626980..007b38157fc4 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -71,7 +71,7 @@ static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *
71 PCI_DMA_BIDIRECTIONAL); 71 PCI_DMA_BIDIRECTIONAL);
72 72
73 for (i = 0; i < chunk->npages; ++i) 73 for (i = 0; i < chunk->npages; ++i)
74 __free_pages(chunk->mem[i].page, 74 __free_pages(sg_page(&chunk->mem[i]),
75 get_order(chunk->mem[i].length)); 75 get_order(chunk->mem[i].length));
76} 76}
77 77
@@ -81,7 +81,7 @@ static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chun
81 81
82 for (i = 0; i < chunk->npages; ++i) { 82 for (i = 0; i < chunk->npages; ++i) {
83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length, 83 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
84 lowmem_page_address(chunk->mem[i].page), 84 lowmem_page_address(sg_page(&chunk->mem[i])),
85 sg_dma_address(&chunk->mem[i])); 85 sg_dma_address(&chunk->mem[i]));
86 } 86 }
87} 87}
@@ -107,10 +107,13 @@ void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
107 107
108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask) 108static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
109{ 109{
110 mem->page = alloc_pages(gfp_mask, order); 110 struct page *page;
111 if (!mem->page) 111
112 page = alloc_pages(gfp_mask, order);
113 if (!page)
112 return -ENOMEM; 114 return -ENOMEM;
113 115
116 sg_set_page(mem, page);
114 mem->length = PAGE_SIZE << order; 117 mem->length = PAGE_SIZE << order;
115 mem->offset = 0; 118 mem->offset = 0;
116 return 0; 119 return 0;
@@ -157,6 +160,7 @@ struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
157 if (!chunk) 160 if (!chunk)
158 goto fail; 161 goto fail;
159 162
163 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
160 chunk->npages = 0; 164 chunk->npages = 0;
161 chunk->nsg = 0; 165 chunk->nsg = 0;
162 list_add_tail(&chunk->list, &icm->chunk_list); 166 list_add_tail(&chunk->list, &icm->chunk_list);
@@ -304,7 +308,7 @@ void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_h
304 * so if we found the page, dma_handle has already 308 * so if we found the page, dma_handle has already
305 * been assigned to. */ 309 * been assigned to. */
306 if (chunk->mem[i].length > offset) { 310 if (chunk->mem[i].length > offset) {
307 page = chunk->mem[i].page; 311 page = sg_page(&chunk->mem[i]);
308 goto out; 312 goto out;
309 } 313 }
310 offset -= chunk->mem[i].length; 314 offset -= chunk->mem[i].length;
@@ -445,6 +449,7 @@ static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int pag
445int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, 449int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
446 struct mthca_user_db_table *db_tab, int index, u64 uaddr) 450 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
447{ 451{
452 struct page *pages[1];
448 int ret = 0; 453 int ret = 0;
449 u8 status; 454 u8 status;
450 int i; 455 int i;
@@ -472,16 +477,17 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
472 } 477 }
473 478
474 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0, 479 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
475 &db_tab->page[i].mem.page, NULL); 480 pages, NULL);
476 if (ret < 0) 481 if (ret < 0)
477 goto out; 482 goto out;
478 483
484 sg_set_page(&db_tab->page[i].mem, pages[0]);
479 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE; 485 db_tab->page[i].mem.length = MTHCA_ICM_PAGE_SIZE;
480 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK; 486 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
481 487
482 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 488 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
483 if (ret < 0) { 489 if (ret < 0) {
484 put_page(db_tab->page[i].mem.page); 490 put_page(pages[0]);
485 goto out; 491 goto out;
486 } 492 }
487 493
@@ -491,7 +497,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
491 ret = -EINVAL; 497 ret = -EINVAL;
492 if (ret) { 498 if (ret) {
493 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 499 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
494 put_page(db_tab->page[i].mem.page); 500 put_page(sg_page(&db_tab->page[i].mem));
495 goto out; 501 goto out;
496 } 502 }
497 503
@@ -557,7 +563,7 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
557 if (db_tab->page[i].uvirt) { 563 if (db_tab->page[i].uvirt) {
558 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 564 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
559 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 565 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
560 put_page(db_tab->page[i].mem.page); 566 put_page(sg_page(&db_tab->page[i].mem));
561 } 567 }
562 } 568 }
563 569