diff options
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 176 |
1 files changed, 95 insertions, 81 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index df5f142657d2..145f1c83369f 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -27,8 +27,8 @@ | |||
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/iommu-helper.h> | 28 | #include <linux/iommu-helper.h> |
29 | #include <linux/sysdev.h> | 29 | #include <linux/sysdev.h> |
30 | #include <linux/io.h> | ||
30 | #include <asm/atomic.h> | 31 | #include <asm/atomic.h> |
31 | #include <asm/io.h> | ||
32 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
33 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
34 | #include <asm/proto.h> | 34 | #include <asm/proto.h> |
@@ -67,9 +67,6 @@ static u32 gart_unmapped_entry; | |||
67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) | 67 | (((x) & 0xfffff000) | (((x) >> 32) << 4) | GPTE_VALID | GPTE_COHERENT) |
68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) | 68 | #define GPTE_DECODE(x) (((x) & 0xfffff000) | (((u64)(x) & 0xff0) << 28)) |
69 | 69 | ||
70 | #define to_pages(addr, size) \ | ||
71 | (round_up(((addr) & ~PAGE_MASK) + (size), PAGE_SIZE) >> PAGE_SHIFT) | ||
72 | |||
73 | #define EMERGENCY_PAGES 32 /* = 128KB */ | 70 | #define EMERGENCY_PAGES 32 /* = 128KB */ |
74 | 71 | ||
75 | #ifdef CONFIG_AGP | 72 | #ifdef CONFIG_AGP |
@@ -83,9 +80,10 @@ AGPEXTERN int agp_memory_reserved; | |||
83 | AGPEXTERN __u32 *agp_gatt_table; | 80 | AGPEXTERN __u32 *agp_gatt_table; |
84 | 81 | ||
85 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | 82 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ |
86 | static int need_flush; /* global flush state. set for each gart wrap */ | 83 | static bool need_flush; /* global flush state. set for each gart wrap */ |
87 | 84 | ||
88 | static unsigned long alloc_iommu(struct device *dev, int size) | 85 | static unsigned long alloc_iommu(struct device *dev, int size, |
86 | unsigned long align_mask) | ||
89 | { | 87 | { |
90 | unsigned long offset, flags; | 88 | unsigned long offset, flags; |
91 | unsigned long boundary_size; | 89 | unsigned long boundary_size; |
@@ -93,26 +91,27 @@ static unsigned long alloc_iommu(struct device *dev, int size) | |||
93 | 91 | ||
94 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), | 92 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), |
95 | PAGE_SIZE) >> PAGE_SHIFT; | 93 | PAGE_SIZE) >> PAGE_SHIFT; |
96 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 94 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, |
97 | PAGE_SIZE) >> PAGE_SHIFT; | 95 | PAGE_SIZE) >> PAGE_SHIFT; |
98 | 96 | ||
99 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 97 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
100 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, | 98 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
101 | size, base_index, boundary_size, 0); | 99 | size, base_index, boundary_size, align_mask); |
102 | if (offset == -1) { | 100 | if (offset == -1) { |
103 | need_flush = 1; | 101 | need_flush = true; |
104 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, | 102 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, |
105 | size, base_index, boundary_size, 0); | 103 | size, base_index, boundary_size, |
104 | align_mask); | ||
106 | } | 105 | } |
107 | if (offset != -1) { | 106 | if (offset != -1) { |
108 | next_bit = offset+size; | 107 | next_bit = offset+size; |
109 | if (next_bit >= iommu_pages) { | 108 | if (next_bit >= iommu_pages) { |
110 | next_bit = 0; | 109 | next_bit = 0; |
111 | need_flush = 1; | 110 | need_flush = true; |
112 | } | 111 | } |
113 | } | 112 | } |
114 | if (iommu_fullflush) | 113 | if (iommu_fullflush) |
115 | need_flush = 1; | 114 | need_flush = true; |
116 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 115 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
117 | 116 | ||
118 | return offset; | 117 | return offset; |
@@ -137,7 +136,7 @@ static void flush_gart(void) | |||
137 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 136 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
138 | if (need_flush) { | 137 | if (need_flush) { |
139 | k8_flush_garts(); | 138 | k8_flush_garts(); |
140 | need_flush = 0; | 139 | need_flush = false; |
141 | } | 140 | } |
142 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 141 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
143 | } | 142 | } |
@@ -176,7 +175,8 @@ static void dump_leak(void) | |||
176 | iommu_leak_pages); | 175 | iommu_leak_pages); |
177 | for (i = 0; i < iommu_leak_pages; i += 2) { | 176 | for (i = 0; i < iommu_leak_pages; i += 2) { |
178 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); | 177 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); |
179 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0); | 178 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], |
179 | 0); | ||
180 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); | 180 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); |
181 | } | 181 | } |
182 | printk(KERN_DEBUG "\n"); | 182 | printk(KERN_DEBUG "\n"); |
@@ -215,34 +215,24 @@ static void iommu_full(struct device *dev, size_t size, int dir) | |||
215 | static inline int | 215 | static inline int |
216 | need_iommu(struct device *dev, unsigned long addr, size_t size) | 216 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
217 | { | 217 | { |
218 | u64 mask = *dev->dma_mask; | 218 | return force_iommu || |
219 | int high = addr + size > mask; | 219 | !is_buffer_dma_capable(*dev->dma_mask, addr, size); |
220 | int mmu = high; | ||
221 | |||
222 | if (force_iommu) | ||
223 | mmu = 1; | ||
224 | |||
225 | return mmu; | ||
226 | } | 220 | } |
227 | 221 | ||
228 | static inline int | 222 | static inline int |
229 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 223 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
230 | { | 224 | { |
231 | u64 mask = *dev->dma_mask; | 225 | return !is_buffer_dma_capable(*dev->dma_mask, addr, size); |
232 | int high = addr + size > mask; | ||
233 | int mmu = high; | ||
234 | |||
235 | return mmu; | ||
236 | } | 226 | } |
237 | 227 | ||
238 | /* Map a single continuous physical area into the IOMMU. | 228 | /* Map a single continuous physical area into the IOMMU. |
239 | * Caller needs to check if the iommu is needed and flush. | 229 | * Caller needs to check if the iommu is needed and flush. |
240 | */ | 230 | */ |
241 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | 231 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
242 | size_t size, int dir) | 232 | size_t size, int dir, unsigned long align_mask) |
243 | { | 233 | { |
244 | unsigned long npages = to_pages(phys_mem, size); | 234 | unsigned long npages = iommu_num_pages(phys_mem, size); |
245 | unsigned long iommu_page = alloc_iommu(dev, npages); | 235 | unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); |
246 | int i; | 236 | int i; |
247 | 237 | ||
248 | if (iommu_page == -1) { | 238 | if (iommu_page == -1) { |
@@ -262,16 +252,6 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
262 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | 252 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); |
263 | } | 253 | } |
264 | 254 | ||
265 | static dma_addr_t | ||
266 | gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) | ||
267 | { | ||
268 | dma_addr_t map = dma_map_area(dev, paddr, size, dir); | ||
269 | |||
270 | flush_gart(); | ||
271 | |||
272 | return map; | ||
273 | } | ||
274 | |||
275 | /* Map a single area into the IOMMU */ | 255 | /* Map a single area into the IOMMU */ |
276 | static dma_addr_t | 256 | static dma_addr_t |
277 | gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | 257 | gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) |
@@ -279,12 +259,13 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | |||
279 | unsigned long bus; | 259 | unsigned long bus; |
280 | 260 | ||
281 | if (!dev) | 261 | if (!dev) |
282 | dev = &fallback_dev; | 262 | dev = &x86_dma_fallback_dev; |
283 | 263 | ||
284 | if (!need_iommu(dev, paddr, size)) | 264 | if (!need_iommu(dev, paddr, size)) |
285 | return paddr; | 265 | return paddr; |
286 | 266 | ||
287 | bus = gart_map_simple(dev, paddr, size, dir); | 267 | bus = dma_map_area(dev, paddr, size, dir, 0); |
268 | flush_gart(); | ||
288 | 269 | ||
289 | return bus; | 270 | return bus; |
290 | } | 271 | } |
@@ -304,7 +285,7 @@ static void gart_unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
304 | return; | 285 | return; |
305 | 286 | ||
306 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; | 287 | iommu_page = (dma_addr - iommu_bus_base)>>PAGE_SHIFT; |
307 | npages = to_pages(dma_addr, size); | 288 | npages = iommu_num_pages(dma_addr, size); |
308 | for (i = 0; i < npages; i++) { | 289 | for (i = 0; i < npages; i++) { |
309 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | 290 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
310 | CLEAR_LEAK(iommu_page + i); | 291 | CLEAR_LEAK(iommu_page + i); |
@@ -343,7 +324,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
343 | unsigned long addr = sg_phys(s); | 324 | unsigned long addr = sg_phys(s); |
344 | 325 | ||
345 | if (nonforced_iommu(dev, addr, s->length)) { | 326 | if (nonforced_iommu(dev, addr, s->length)) { |
346 | addr = dma_map_area(dev, addr, s->length, dir); | 327 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
347 | if (addr == bad_dma_address) { | 328 | if (addr == bad_dma_address) { |
348 | if (i > 0) | 329 | if (i > 0) |
349 | gart_unmap_sg(dev, sg, i, dir); | 330 | gart_unmap_sg(dev, sg, i, dir); |
@@ -365,7 +346,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
365 | int nelems, struct scatterlist *sout, | 346 | int nelems, struct scatterlist *sout, |
366 | unsigned long pages) | 347 | unsigned long pages) |
367 | { | 348 | { |
368 | unsigned long iommu_start = alloc_iommu(dev, pages); | 349 | unsigned long iommu_start = alloc_iommu(dev, pages, 0); |
369 | unsigned long iommu_page = iommu_start; | 350 | unsigned long iommu_page = iommu_start; |
370 | struct scatterlist *s; | 351 | struct scatterlist *s; |
371 | int i; | 352 | int i; |
@@ -387,7 +368,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
387 | } | 368 | } |
388 | 369 | ||
389 | addr = phys_addr; | 370 | addr = phys_addr; |
390 | pages = to_pages(s->offset, s->length); | 371 | pages = iommu_num_pages(s->offset, s->length); |
391 | while (pages--) { | 372 | while (pages--) { |
392 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | 373 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
393 | SET_LEAK(iommu_page); | 374 | SET_LEAK(iommu_page); |
@@ -430,7 +411,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
430 | return 0; | 411 | return 0; |
431 | 412 | ||
432 | if (!dev) | 413 | if (!dev) |
433 | dev = &fallback_dev; | 414 | dev = &x86_dma_fallback_dev; |
434 | 415 | ||
435 | out = 0; | 416 | out = 0; |
436 | start = 0; | 417 | start = 0; |
@@ -470,7 +451,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
470 | 451 | ||
471 | seg_size += s->length; | 452 | seg_size += s->length; |
472 | need = nextneed; | 453 | need = nextneed; |
473 | pages += to_pages(s->offset, s->length); | 454 | pages += iommu_num_pages(s->offset, s->length); |
474 | ps = s; | 455 | ps = s; |
475 | } | 456 | } |
476 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) | 457 | if (dma_map_cont(dev, start_sg, i - start, sgmap, pages, need) < 0) |
@@ -502,6 +483,46 @@ error: | |||
502 | return 0; | 483 | return 0; |
503 | } | 484 | } |
504 | 485 | ||
486 | /* allocate and map a coherent mapping */ | ||
487 | static void * | ||
488 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | ||
489 | gfp_t flag) | ||
490 | { | ||
491 | dma_addr_t paddr; | ||
492 | unsigned long align_mask; | ||
493 | struct page *page; | ||
494 | |||
495 | if (force_iommu && !(flag & GFP_DMA)) { | ||
496 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | ||
497 | page = alloc_pages(flag | __GFP_ZERO, get_order(size)); | ||
498 | if (!page) | ||
499 | return NULL; | ||
500 | |||
501 | align_mask = (1UL << get_order(size)) - 1; | ||
502 | paddr = dma_map_area(dev, page_to_phys(page), size, | ||
503 | DMA_BIDIRECTIONAL, align_mask); | ||
504 | |||
505 | flush_gart(); | ||
506 | if (paddr != bad_dma_address) { | ||
507 | *dma_addr = paddr; | ||
508 | return page_address(page); | ||
509 | } | ||
510 | __free_pages(page, get_order(size)); | ||
511 | } else | ||
512 | return dma_generic_alloc_coherent(dev, size, dma_addr, flag); | ||
513 | |||
514 | return NULL; | ||
515 | } | ||
516 | |||
517 | /* free a coherent mapping */ | ||
518 | static void | ||
519 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
520 | dma_addr_t dma_addr) | ||
521 | { | ||
522 | gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); | ||
523 | free_pages((unsigned long)vaddr, get_order(size)); | ||
524 | } | ||
525 | |||
505 | static int no_agp; | 526 | static int no_agp; |
506 | 527 | ||
507 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | 528 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) |
@@ -629,7 +650,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
629 | struct pci_dev *dev; | 650 | struct pci_dev *dev; |
630 | void *gatt; | 651 | void *gatt; |
631 | int i, error; | 652 | int i, error; |
632 | unsigned long start_pfn, end_pfn; | ||
633 | 653 | ||
634 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); | 654 | printk(KERN_INFO "PCI-DMA: Disabling AGP.\n"); |
635 | aper_size = aper_base = info->aper_size = 0; | 655 | aper_size = aper_base = info->aper_size = 0; |
@@ -653,13 +673,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
653 | info->aper_size = aper_size >> 20; | 673 | info->aper_size = aper_size >> 20; |
654 | 674 | ||
655 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); | 675 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
656 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | 676 | gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
677 | get_order(gatt_size)); | ||
657 | if (!gatt) | 678 | if (!gatt) |
658 | panic("Cannot allocate GATT table"); | 679 | panic("Cannot allocate GATT table"); |
659 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) | 680 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) |
660 | panic("Could not set GART PTEs to uncacheable pages"); | 681 | panic("Could not set GART PTEs to uncacheable pages"); |
661 | 682 | ||
662 | memset(gatt, 0, gatt_size); | ||
663 | agp_gatt_table = gatt; | 683 | agp_gatt_table = gatt; |
664 | 684 | ||
665 | enable_gart_translations(); | 685 | enable_gart_translations(); |
@@ -668,19 +688,14 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
668 | if (!error) | 688 | if (!error) |
669 | error = sysdev_register(&device_gart); | 689 | error = sysdev_register(&device_gart); |
670 | if (error) | 690 | if (error) |
671 | panic("Could not register gart_sysdev -- would corrupt data on next suspend"); | 691 | panic("Could not register gart_sysdev -- " |
692 | "would corrupt data on next suspend"); | ||
672 | 693 | ||
673 | flush_gart(); | 694 | flush_gart(); |
674 | 695 | ||
675 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", | 696 | printk(KERN_INFO "PCI-DMA: aperture base @ %x size %u KB\n", |
676 | aper_base, aper_size>>10); | 697 | aper_base, aper_size>>10); |
677 | 698 | ||
678 | /* need to map that range */ | ||
679 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | ||
680 | if (end_pfn > max_low_pfn_mapped) { | ||
681 | start_pfn = (aper_base>>PAGE_SHIFT); | ||
682 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
683 | } | ||
684 | return 0; | 699 | return 0; |
685 | 700 | ||
686 | nommu: | 701 | nommu: |
@@ -690,21 +705,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
690 | return -1; | 705 | return -1; |
691 | } | 706 | } |
692 | 707 | ||
693 | extern int agp_amd64_init(void); | 708 | static struct dma_mapping_ops gart_dma_ops = { |
694 | |||
695 | static const struct dma_mapping_ops gart_dma_ops = { | ||
696 | .mapping_error = NULL, | ||
697 | .map_single = gart_map_single, | 709 | .map_single = gart_map_single, |
698 | .map_simple = gart_map_simple, | ||
699 | .unmap_single = gart_unmap_single, | 710 | .unmap_single = gart_unmap_single, |
700 | .sync_single_for_cpu = NULL, | ||
701 | .sync_single_for_device = NULL, | ||
702 | .sync_single_range_for_cpu = NULL, | ||
703 | .sync_single_range_for_device = NULL, | ||
704 | .sync_sg_for_cpu = NULL, | ||
705 | .sync_sg_for_device = NULL, | ||
706 | .map_sg = gart_map_sg, | 711 | .map_sg = gart_map_sg, |
707 | .unmap_sg = gart_unmap_sg, | 712 | .unmap_sg = gart_unmap_sg, |
713 | .alloc_coherent = gart_alloc_coherent, | ||
714 | .free_coherent = gart_free_coherent, | ||
708 | }; | 715 | }; |
709 | 716 | ||
710 | void gart_iommu_shutdown(void) | 717 | void gart_iommu_shutdown(void) |
@@ -731,7 +738,8 @@ void __init gart_iommu_init(void) | |||
731 | { | 738 | { |
732 | struct agp_kern_info info; | 739 | struct agp_kern_info info; |
733 | unsigned long iommu_start; | 740 | unsigned long iommu_start; |
734 | unsigned long aper_size; | 741 | unsigned long aper_base, aper_size; |
742 | unsigned long start_pfn, end_pfn; | ||
735 | unsigned long scratch; | 743 | unsigned long scratch; |
736 | long i; | 744 | long i; |
737 | 745 | ||
@@ -763,30 +771,35 @@ void __init gart_iommu_init(void) | |||
763 | (no_agp && init_k8_gatt(&info) < 0)) { | 771 | (no_agp && init_k8_gatt(&info) < 0)) { |
764 | if (max_pfn > MAX_DMA32_PFN) { | 772 | if (max_pfn > MAX_DMA32_PFN) { |
765 | printk(KERN_WARNING "More than 4GB of memory " | 773 | printk(KERN_WARNING "More than 4GB of memory " |
766 | "but GART IOMMU not available.\n" | 774 | "but GART IOMMU not available.\n"); |
767 | KERN_WARNING "falling back to iommu=soft.\n"); | 775 | printk(KERN_WARNING "falling back to iommu=soft.\n"); |
768 | } | 776 | } |
769 | return; | 777 | return; |
770 | } | 778 | } |
771 | 779 | ||
780 | /* need to map that range */ | ||
781 | aper_size = info.aper_size << 20; | ||
782 | aper_base = info.aper_base; | ||
783 | end_pfn = (aper_base>>PAGE_SHIFT) + (aper_size>>PAGE_SHIFT); | ||
784 | if (end_pfn > max_low_pfn_mapped) { | ||
785 | start_pfn = (aper_base>>PAGE_SHIFT); | ||
786 | init_memory_mapping(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | ||
787 | } | ||
788 | |||
772 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); | 789 | printk(KERN_INFO "PCI-DMA: using GART IOMMU.\n"); |
773 | aper_size = info.aper_size * 1024 * 1024; | ||
774 | iommu_size = check_iommu_size(info.aper_base, aper_size); | 790 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
775 | iommu_pages = iommu_size >> PAGE_SHIFT; | 791 | iommu_pages = iommu_size >> PAGE_SHIFT; |
776 | 792 | ||
777 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, | 793 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
778 | get_order(iommu_pages/8)); | 794 | get_order(iommu_pages/8)); |
779 | if (!iommu_gart_bitmap) | 795 | if (!iommu_gart_bitmap) |
780 | panic("Cannot allocate iommu bitmap\n"); | 796 | panic("Cannot allocate iommu bitmap\n"); |
781 | memset(iommu_gart_bitmap, 0, iommu_pages/8); | ||
782 | 797 | ||
783 | #ifdef CONFIG_IOMMU_LEAK | 798 | #ifdef CONFIG_IOMMU_LEAK |
784 | if (leak_trace) { | 799 | if (leak_trace) { |
785 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | 800 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, |
786 | get_order(iommu_pages*sizeof(void *))); | 801 | get_order(iommu_pages*sizeof(void *))); |
787 | if (iommu_leak_tab) | 802 | if (!iommu_leak_tab) |
788 | memset(iommu_leak_tab, 0, iommu_pages * 8); | ||
789 | else | ||
790 | printk(KERN_DEBUG | 803 | printk(KERN_DEBUG |
791 | "PCI-DMA: Cannot allocate leak trace area\n"); | 804 | "PCI-DMA: Cannot allocate leak trace area\n"); |
792 | } | 805 | } |
@@ -796,7 +809,7 @@ void __init gart_iommu_init(void) | |||
796 | * Out of IOMMU space handling. | 809 | * Out of IOMMU space handling. |
797 | * Reserve some invalid pages at the beginning of the GART. | 810 | * Reserve some invalid pages at the beginning of the GART. |
798 | */ | 811 | */ |
799 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | 812 | iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); |
800 | 813 | ||
801 | agp_memory_reserved = iommu_size; | 814 | agp_memory_reserved = iommu_size; |
802 | printk(KERN_INFO | 815 | printk(KERN_INFO |
@@ -854,7 +867,8 @@ void __init gart_parse_options(char *p) | |||
854 | if (!strncmp(p, "leak", 4)) { | 867 | if (!strncmp(p, "leak", 4)) { |
855 | leak_trace = 1; | 868 | leak_trace = 1; |
856 | p += 4; | 869 | p += 4; |
857 | if (*p == '=') ++p; | 870 | if (*p == '=') |
871 | ++p; | ||
858 | if (isdigit(*p) && get_option(&p, &arg)) | 872 | if (isdigit(*p) && get_option(&p, &arg)) |
859 | iommu_leak_pages = arg; | 873 | iommu_leak_pages = arg; |
860 | } | 874 | } |