aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-calgary_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/pci-calgary_64.c')
-rw-r--r--arch/x86/kernel/pci-calgary_64.c36
1 files changed, 21 insertions, 15 deletions
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index dcdac6c826e9..e1e731d78f38 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -217,16 +217,6 @@ static inline unsigned long verify_bit_range(unsigned long* bitmap,
217 217
218#endif /* CONFIG_IOMMU_DEBUG */ 218#endif /* CONFIG_IOMMU_DEBUG */
219 219
220static inline unsigned int num_dma_pages(unsigned long dma, unsigned int dmalen)
221{
222 unsigned int npages;
223
224 npages = PAGE_ALIGN(dma + dmalen) - (dma & PAGE_MASK);
225 npages >>= PAGE_SHIFT;
226
227 return npages;
228}
229
230static inline int translation_enabled(struct iommu_table *tbl) 220static inline int translation_enabled(struct iommu_table *tbl)
231{ 221{
232 /* only PHBs with translation enabled have an IOMMU table */ 222 /* only PHBs with translation enabled have an IOMMU table */
@@ -261,7 +251,7 @@ static void iommu_range_reserve(struct iommu_table *tbl,
261 badbit, tbl, start_addr, npages); 251 badbit, tbl, start_addr, npages);
262 } 252 }
263 253
264 set_bit_string(tbl->it_map, index, npages); 254 iommu_area_reserve(tbl->it_map, index, npages);
265 255
266 spin_unlock_irqrestore(&tbl->it_lock, flags); 256 spin_unlock_irqrestore(&tbl->it_lock, flags);
267} 257}
@@ -408,7 +398,7 @@ static void calgary_unmap_sg(struct device *dev,
408 if (dmalen == 0) 398 if (dmalen == 0)
409 break; 399 break;
410 400
411 npages = num_dma_pages(dma, dmalen); 401 npages = iommu_num_pages(dma, dmalen, PAGE_SIZE);
412 iommu_free(tbl, dma, npages); 402 iommu_free(tbl, dma, npages);
413 } 403 }
414} 404}
@@ -427,7 +417,7 @@ static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
427 BUG_ON(!sg_page(s)); 417 BUG_ON(!sg_page(s));
428 418
429 vaddr = (unsigned long) sg_virt(s); 419 vaddr = (unsigned long) sg_virt(s);
430 npages = num_dma_pages(vaddr, s->length); 420 npages = iommu_num_pages(vaddr, s->length, PAGE_SIZE);
431 421
432 entry = iommu_range_alloc(dev, tbl, npages); 422 entry = iommu_range_alloc(dev, tbl, npages);
433 if (entry == bad_dma_address) { 423 if (entry == bad_dma_address) {
@@ -464,7 +454,7 @@ static dma_addr_t calgary_map_single(struct device *dev, phys_addr_t paddr,
464 struct iommu_table *tbl = find_iommu_table(dev); 454 struct iommu_table *tbl = find_iommu_table(dev);
465 455
466 uaddr = (unsigned long)vaddr; 456 uaddr = (unsigned long)vaddr;
467 npages = num_dma_pages(uaddr, size); 457 npages = iommu_num_pages(uaddr, size, PAGE_SIZE);
468 458
469 return iommu_alloc(dev, tbl, vaddr, npages, direction); 459 return iommu_alloc(dev, tbl, vaddr, npages, direction);
470} 460}
@@ -475,7 +465,7 @@ static void calgary_unmap_single(struct device *dev, dma_addr_t dma_handle,
475 struct iommu_table *tbl = find_iommu_table(dev); 465 struct iommu_table *tbl = find_iommu_table(dev);
476 unsigned int npages; 466 unsigned int npages;
477 467
478 npages = num_dma_pages(dma_handle, size); 468 npages = iommu_num_pages(dma_handle, size, PAGE_SIZE);
479 iommu_free(tbl, dma_handle, npages); 469 iommu_free(tbl, dma_handle, npages);
480} 470}
481 471
@@ -491,6 +481,8 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size,
491 npages = size >> PAGE_SHIFT; 481 npages = size >> PAGE_SHIFT;
492 order = get_order(size); 482 order = get_order(size);
493 483
484 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
485
494 /* alloc enough pages (and possibly more) */ 486 /* alloc enough pages (and possibly more) */
495 ret = (void *)__get_free_pages(flag, order); 487 ret = (void *)__get_free_pages(flag, order);
496 if (!ret) 488 if (!ret)
@@ -510,8 +502,22 @@ error:
510 return ret; 502 return ret;
511} 503}
512 504
505static void calgary_free_coherent(struct device *dev, size_t size,
506 void *vaddr, dma_addr_t dma_handle)
507{
508 unsigned int npages;
509 struct iommu_table *tbl = find_iommu_table(dev);
510
511 size = PAGE_ALIGN(size);
512 npages = size >> PAGE_SHIFT;
513
514 iommu_free(tbl, dma_handle, npages);
515 free_pages((unsigned long)vaddr, get_order(size));
516}
517
513static struct dma_mapping_ops calgary_dma_ops = { 518static struct dma_mapping_ops calgary_dma_ops = {
514 .alloc_coherent = calgary_alloc_coherent, 519 .alloc_coherent = calgary_alloc_coherent,
520 .free_coherent = calgary_free_coherent,
515 .map_single = calgary_map_single, 521 .map_single = calgary_map_single,
516 .unmap_single = calgary_unmap_single, 522 .unmap_single = calgary_unmap_single,
517 .map_sg = calgary_map_sg, 523 .map_sg = calgary_map_sg,