aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-gart_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r--arch/x86/kernel/pci-gart_64.c47
1 files changed, 28 insertions, 19 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
index 49285f8fd4d5..1a895a582534 100644
--- a/arch/x86/kernel/pci-gart_64.c
+++ b/arch/x86/kernel/pci-gart_64.c
@@ -82,7 +82,8 @@ AGPEXTERN __u32 *agp_gatt_table;
82static unsigned long next_bit; /* protected by iommu_bitmap_lock */ 82static unsigned long next_bit; /* protected by iommu_bitmap_lock */
83static int need_flush; /* global flush state. set for each gart wrap */ 83static int need_flush; /* global flush state. set for each gart wrap */
84 84
85static unsigned long alloc_iommu(struct device *dev, int size) 85static unsigned long alloc_iommu(struct device *dev, int size,
86 unsigned long align_mask)
86{ 87{
87 unsigned long offset, flags; 88 unsigned long offset, flags;
88 unsigned long boundary_size; 89 unsigned long boundary_size;
@@ -90,16 +91,17 @@ static unsigned long alloc_iommu(struct device *dev, int size)
90 91
91 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), 92 base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
92 PAGE_SIZE) >> PAGE_SHIFT; 93 PAGE_SIZE) >> PAGE_SHIFT;
93 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 94 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
94 PAGE_SIZE) >> PAGE_SHIFT; 95 PAGE_SIZE) >> PAGE_SHIFT;
95 96
96 spin_lock_irqsave(&iommu_bitmap_lock, flags); 97 spin_lock_irqsave(&iommu_bitmap_lock, flags);
97 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, 98 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
98 size, base_index, boundary_size, 0); 99 size, base_index, boundary_size, align_mask);
99 if (offset == -1) { 100 if (offset == -1) {
100 need_flush = 1; 101 need_flush = 1;
101 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, 102 offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0,
102 size, base_index, boundary_size, 0); 103 size, base_index, boundary_size,
104 align_mask);
103 } 105 }
104 if (offset != -1) { 106 if (offset != -1) {
105 next_bit = offset+size; 107 next_bit = offset+size;
@@ -236,10 +238,10 @@ nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
236 * Caller needs to check if the iommu is needed and flush. 238 * Caller needs to check if the iommu is needed and flush.
237 */ 239 */
238static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, 240static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
239 size_t size, int dir) 241 size_t size, int dir, unsigned long align_mask)
240{ 242{
241 unsigned long npages = iommu_num_pages(phys_mem, size); 243 unsigned long npages = iommu_num_pages(phys_mem, size);
242 unsigned long iommu_page = alloc_iommu(dev, npages); 244 unsigned long iommu_page = alloc_iommu(dev, npages, align_mask);
243 int i; 245 int i;
244 246
245 if (iommu_page == -1) { 247 if (iommu_page == -1) {
@@ -262,7 +264,11 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem,
262static dma_addr_t 264static dma_addr_t
263gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) 265gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir)
264{ 266{
265 dma_addr_t map = dma_map_area(dev, paddr, size, dir); 267 dma_addr_t map;
268 unsigned long align_mask;
269
270 align_mask = (1UL << get_order(size)) - 1;
271 map = dma_map_area(dev, paddr, size, dir, align_mask);
266 272
267 flush_gart(); 273 flush_gart();
268 274
@@ -281,7 +287,8 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
281 if (!need_iommu(dev, paddr, size)) 287 if (!need_iommu(dev, paddr, size))
282 return paddr; 288 return paddr;
283 289
284 bus = gart_map_simple(dev, paddr, size, dir); 290 bus = dma_map_area(dev, paddr, size, dir, 0);
291 flush_gart();
285 292
286 return bus; 293 return bus;
287} 294}
@@ -340,7 +347,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg,
340 unsigned long addr = sg_phys(s); 347 unsigned long addr = sg_phys(s);
341 348
342 if (nonforced_iommu(dev, addr, s->length)) { 349 if (nonforced_iommu(dev, addr, s->length)) {
343 addr = dma_map_area(dev, addr, s->length, dir); 350 addr = dma_map_area(dev, addr, s->length, dir, 0);
344 if (addr == bad_dma_address) { 351 if (addr == bad_dma_address) {
345 if (i > 0) 352 if (i > 0)
346 gart_unmap_sg(dev, sg, i, dir); 353 gart_unmap_sg(dev, sg, i, dir);
@@ -362,7 +369,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start,
362 int nelems, struct scatterlist *sout, 369 int nelems, struct scatterlist *sout,
363 unsigned long pages) 370 unsigned long pages)
364{ 371{
365 unsigned long iommu_start = alloc_iommu(dev, pages); 372 unsigned long iommu_start = alloc_iommu(dev, pages, 0);
366 unsigned long iommu_page = iommu_start; 373 unsigned long iommu_page = iommu_start;
367 struct scatterlist *s; 374 struct scatterlist *s;
368 int i; 375 int i;
@@ -626,7 +633,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
626 struct pci_dev *dev; 633 struct pci_dev *dev;
627 void *gatt; 634 void *gatt;
628 int i, error; 635 int i, error;
629 unsigned long start_pfn, end_pfn;
630 636
631 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); 637 printk(KERN_INFO "PCI-DMA: Disabling AGP.\n");
632 aper_size = aper_base = info->aper_size = 0; 638 aper_size = aper_base = info->aper_size = 0;
@@ -672,12 +678,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
672 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", 678 printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n",
673 aper_base, aper_size>>10); 679 aper_base, aper_size>>10);
674 680
675 /* need to map that range */
676 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
677 if (end_pfn > max_low_pfn_mapped) {
678 start_pfn = (aper_base>>PAGE_SHIFT);
679 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
680 }
681 return 0; 681 return 0;
682 682
683 nommu: 683 nommu:
@@ -727,7 +727,8 @@ void __init gart_iommu_init(void)
727{ 727{
728 struct agp_kern_info info; 728 struct agp_kern_info info;
729 unsigned long iommu_start; 729 unsigned long iommu_start;
730 unsigned long aper_size; 730 unsigned long aper_base, aper_size;
731 unsigned long start_pfn, end_pfn;
731 unsigned long scratch; 732 unsigned long scratch;
732 long i; 733 long i;
733 734
@@ -765,8 +766,16 @@ void __init gart_iommu_init(void)
765 return; 766 return;
766 } 767 }
767 768
769 /* need to map that range */
770 aper_size = info.aper_size << 20;
771 aper_base = info.aper_base;
772 end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT);
773 if (end_pfn > max_low_pfn_mapped) {
774 start_pfn = (aper_base>>PAGE_SHIFT);
775 init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT);
776 }
777
768 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); 778 printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n");
769 aper_size = info.aper_size * 1024 * 1024;
770 iommu_size = check_iommu_size(info.aper_base, aper_size); 779 iommu_size = check_iommu_size(info.aper_base, aper_size);
771 iommu_pages = iommu_size >> PAGE_SHIFT; 780 iommu_pages = iommu_size >> PAGE_SHIFT;
772 781