aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r--arch/powerpc/kernel/iommu.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 4eba60a32890..7cb77c20fc5d 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -418,10 +418,11 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
418 * Build a iommu_table structure. This contains a bit map which 418 * Build a iommu_table structure. This contains a bit map which
419 * is used to manage allocation of the tce space. 419 * is used to manage allocation of the tce space.
420 */ 420 */
421struct iommu_table *iommu_init_table(struct iommu_table *tbl) 421struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
422{ 422{
423 unsigned long sz; 423 unsigned long sz;
424 static int welcomed = 0; 424 static int welcomed = 0;
425 struct page *page;
425 426
426 /* Set aside 1/4 of the table for large allocations. */ 427 /* Set aside 1/4 of the table for large allocations. */
427 tbl->it_halfpoint = tbl->it_size * 3 / 4; 428 tbl->it_halfpoint = tbl->it_size * 3 / 4;
@@ -429,10 +430,10 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl)
429 /* number of bytes needed for the bitmap */ 430 /* number of bytes needed for the bitmap */
430 sz = (tbl->it_size + 7) >> 3; 431 sz = (tbl->it_size + 7) >> 3;
431 432
432 tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz)); 433 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz));
433 if (!tbl->it_map) 434 if (!page)
434 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 435 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
435 436 tbl->it_map = page_address(page);
436 memset(tbl->it_map, 0, sz); 437 memset(tbl->it_map, 0, sz);
437 438
438 tbl->it_hint = 0; 439 tbl->it_hint = 0;
@@ -536,11 +537,12 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
536 * to the dma address (mapping) of the first page. 537 * to the dma address (mapping) of the first page.
537 */ 538 */
538void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, 539void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
539 dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) 540 dma_addr_t *dma_handle, unsigned long mask, gfp_t flag, int node)
540{ 541{
541 void *ret = NULL; 542 void *ret = NULL;
542 dma_addr_t mapping; 543 dma_addr_t mapping;
543 unsigned int npages, order; 544 unsigned int npages, order;
545 struct page *page;
544 546
545 size = PAGE_ALIGN(size); 547 size = PAGE_ALIGN(size);
546 npages = size >> PAGE_SHIFT; 548 npages = size >> PAGE_SHIFT;
@@ -560,9 +562,10 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
560 return NULL; 562 return NULL;
561 563
562 /* Alloc enough pages (and possibly more) */ 564 /* Alloc enough pages (and possibly more) */
563 ret = (void *)__get_free_pages(flag, order); 565 page = alloc_pages_node(node, flag, order);
564 if (!ret) 566 if (!page)
565 return NULL; 567 return NULL;
568 ret = page_address(page);
566 memset(ret, 0, size); 569 memset(ret, 0, size);
567 570
568 /* Set up tces to cover the allocated range */ 571 /* Set up tces to cover the allocated range */
@@ -570,9 +573,9 @@ void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
570 mask >> PAGE_SHIFT, order); 573 mask >> PAGE_SHIFT, order);
571 if (mapping == DMA_ERROR_CODE) { 574 if (mapping == DMA_ERROR_CODE) {
572 free_pages((unsigned long)ret, order); 575 free_pages((unsigned long)ret, order);
573 ret = NULL; 576 return NULL;
574 } else 577 }
575 *dma_handle = mapping; 578 *dma_handle = mapping;
576 return ret; 579 return ret;
577} 580}
578 581