diff options
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 142 |
1 files changed, 79 insertions, 63 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index be33a5442d82..145f1c83369f 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -27,8 +27,8 @@ | |||
27 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
28 | #include <linux/iommu-helper.h> | 28 | #include <linux/iommu-helper.h> |
29 | #include <linux/sysdev.h> | 29 | #include <linux/sysdev.h> |
30 | #include <linux/io.h> | ||
30 | #include <asm/atomic.h> | 31 | #include <asm/atomic.h> |
31 | #include <asm/io.h> | ||
32 | #include <asm/mtrr.h> | 32 | #include <asm/mtrr.h> |
33 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
34 | #include <asm/proto.h> | 34 | #include <asm/proto.h> |
@@ -80,9 +80,10 @@ AGPEXTERN int agp_memory_reserved; | |||
80 | AGPEXTERN __u32 *agp_gatt_table; | 80 | AGPEXTERN __u32 *agp_gatt_table; |
81 | 81 | ||
82 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ | 82 | static unsigned long next_bit; /* protected by iommu_bitmap_lock */ |
83 | static int need_flush; /* global flush state. set for each gart wrap */ | 83 | static bool need_flush; /* global flush state. set for each gart wrap */ |
84 | 84 | ||
85 | static unsigned long alloc_iommu(struct device *dev, int size) | 85 | static unsigned long alloc_iommu(struct device *dev, int size, |
86 | unsigned long align_mask) | ||
86 | { | 87 | { |
87 | unsigned long offset, flags; | 88 | unsigned long offset, flags; |
88 | unsigned long boundary_size; | 89 | unsigned long boundary_size; |
@@ -90,26 +91,27 @@ static unsigned long alloc_iommu(struct device *dev, int size) | |||
90 | 91 | ||
91 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), | 92 | base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), |
92 | PAGE_SIZE) >> PAGE_SHIFT; | 93 | PAGE_SIZE) >> PAGE_SHIFT; |
93 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 94 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, |
94 | PAGE_SIZE) >> PAGE_SHIFT; | 95 | PAGE_SIZE) >> PAGE_SHIFT; |
95 | 96 | ||
96 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 97 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
97 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, | 98 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, |
98 | size, base_index, boundary_size, 0); | 99 | size, base_index, boundary_size, align_mask); |
99 | if (offset == -1) { | 100 | if (offset == -1) { |
100 | need_flush = 1; | 101 | need_flush = true; |
101 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, | 102 | offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, 0, |
102 | size, base_index, boundary_size, 0); | 103 | size, base_index, boundary_size, |
104 | align_mask); | ||
103 | } | 105 | } |
104 | if (offset != -1) { | 106 | if (offset != -1) { |
105 | next_bit = offset+size; | 107 | next_bit = offset+size; |
106 | if (next_bit >= iommu_pages) { | 108 | if (next_bit >= iommu_pages) { |
107 | next_bit = 0; | 109 | next_bit = 0; |
108 | need_flush = 1; | 110 | need_flush = true; |
109 | } | 111 | } |
110 | } | 112 | } |
111 | if (iommu_fullflush) | 113 | if (iommu_fullflush) |
112 | need_flush = 1; | 114 | need_flush = true; |
113 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 115 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
114 | 116 | ||
115 | return offset; | 117 | return offset; |
@@ -134,7 +136,7 @@ static void flush_gart(void) | |||
134 | spin_lock_irqsave(&iommu_bitmap_lock, flags); | 136 | spin_lock_irqsave(&iommu_bitmap_lock, flags); |
135 | if (need_flush) { | 137 | if (need_flush) { |
136 | k8_flush_garts(); | 138 | k8_flush_garts(); |
137 | need_flush = 0; | 139 | need_flush = false; |
138 | } | 140 | } |
139 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); | 141 | spin_unlock_irqrestore(&iommu_bitmap_lock, flags); |
140 | } | 142 | } |
@@ -173,7 +175,8 @@ static void dump_leak(void) | |||
173 | iommu_leak_pages); | 175 | iommu_leak_pages); |
174 | for (i = 0; i < iommu_leak_pages; i += 2) { | 176 | for (i = 0; i < iommu_leak_pages; i += 2) { |
175 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); | 177 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); |
176 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], 0); | 178 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], |
179 | 0); | ||
177 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); | 180 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); |
178 | } | 181 | } |
179 | printk(KERN_DEBUG "\n"); | 182 | printk(KERN_DEBUG "\n"); |
@@ -212,34 +215,24 @@ static void iommu_full(struct device *dev, size_t size, int dir) | |||
212 | static inline int | 215 | static inline int |
213 | need_iommu(struct device *dev, unsigned long addr, size_t size) | 216 | need_iommu(struct device *dev, unsigned long addr, size_t size) |
214 | { | 217 | { |
215 | u64 mask = *dev->dma_mask; | 218 | return force_iommu || |
216 | int high = addr + size > mask; | 219 | !is_buffer_dma_capable(*dev->dma_mask, addr, size); |
217 | int mmu = high; | ||
218 | |||
219 | if (force_iommu) | ||
220 | mmu = 1; | ||
221 | |||
222 | return mmu; | ||
223 | } | 220 | } |
224 | 221 | ||
225 | static inline int | 222 | static inline int |
226 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) | 223 | nonforced_iommu(struct device *dev, unsigned long addr, size_t size) |
227 | { | 224 | { |
228 | u64 mask = *dev->dma_mask; | 225 | return !is_buffer_dma_capable(*dev->dma_mask, addr, size); |
229 | int high = addr + size > mask; | ||
230 | int mmu = high; | ||
231 | |||
232 | return mmu; | ||
233 | } | 226 | } |
234 | 227 | ||
235 | /* Map a single continuous physical area into the IOMMU. | 228 | /* Map a single continuous physical area into the IOMMU. |
236 | * Caller needs to check if the iommu is needed and flush. | 229 | * Caller needs to check if the iommu is needed and flush. |
237 | */ | 230 | */ |
238 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | 231 | static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, |
239 | size_t size, int dir) | 232 | size_t size, int dir, unsigned long align_mask) |
240 | { | 233 | { |
241 | unsigned long npages = iommu_num_pages(phys_mem, size); | 234 | unsigned long npages = iommu_num_pages(phys_mem, size); |
242 | unsigned long iommu_page = alloc_iommu(dev, npages); | 235 | unsigned long iommu_page = alloc_iommu(dev, npages, align_mask); |
243 | int i; | 236 | int i; |
244 | 237 | ||
245 | if (iommu_page == -1) { | 238 | if (iommu_page == -1) { |
@@ -259,16 +252,6 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
259 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | 252 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); |
260 | } | 253 | } |
261 | 254 | ||
262 | static dma_addr_t | ||
263 | gart_map_simple(struct device *dev, phys_addr_t paddr, size_t size, int dir) | ||
264 | { | ||
265 | dma_addr_t map = dma_map_area(dev, paddr, size, dir); | ||
266 | |||
267 | flush_gart(); | ||
268 | |||
269 | return map; | ||
270 | } | ||
271 | |||
272 | /* Map a single area into the IOMMU */ | 255 | /* Map a single area into the IOMMU */ |
273 | static dma_addr_t | 256 | static dma_addr_t |
274 | gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | 257 | gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) |
@@ -276,12 +259,13 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) | |||
276 | unsigned long bus; | 259 | unsigned long bus; |
277 | 260 | ||
278 | if (!dev) | 261 | if (!dev) |
279 | dev = &fallback_dev; | 262 | dev = &x86_dma_fallback_dev; |
280 | 263 | ||
281 | if (!need_iommu(dev, paddr, size)) | 264 | if (!need_iommu(dev, paddr, size)) |
282 | return paddr; | 265 | return paddr; |
283 | 266 | ||
284 | bus = gart_map_simple(dev, paddr, size, dir); | 267 | bus = dma_map_area(dev, paddr, size, dir, 0); |
268 | flush_gart(); | ||
285 | 269 | ||
286 | return bus; | 270 | return bus; |
287 | } | 271 | } |
@@ -340,7 +324,7 @@ static int dma_map_sg_nonforce(struct device *dev, struct scatterlist *sg, | |||
340 | unsigned long addr = sg_phys(s); | 324 | unsigned long addr = sg_phys(s); |
341 | 325 | ||
342 | if (nonforced_iommu(dev, addr, s->length)) { | 326 | if (nonforced_iommu(dev, addr, s->length)) { |
343 | addr = dma_map_area(dev, addr, s->length, dir); | 327 | addr = dma_map_area(dev, addr, s->length, dir, 0); |
344 | if (addr == bad_dma_address) { | 328 | if (addr == bad_dma_address) { |
345 | if (i > 0) | 329 | if (i > 0) |
346 | gart_unmap_sg(dev, sg, i, dir); | 330 | gart_unmap_sg(dev, sg, i, dir); |
@@ -362,7 +346,7 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
362 | int nelems, struct scatterlist *sout, | 346 | int nelems, struct scatterlist *sout, |
363 | unsigned long pages) | 347 | unsigned long pages) |
364 | { | 348 | { |
365 | unsigned long iommu_start = alloc_iommu(dev, pages); | 349 | unsigned long iommu_start = alloc_iommu(dev, pages, 0); |
366 | unsigned long iommu_page = iommu_start; | 350 | unsigned long iommu_page = iommu_start; |
367 | struct scatterlist *s; | 351 | struct scatterlist *s; |
368 | int i; | 352 | int i; |
@@ -427,7 +411,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) | |||
427 | return 0; | 411 | return 0; |
428 | 412 | ||
429 | if (!dev) | 413 | if (!dev) |
430 | dev = &fallback_dev; | 414 | dev = &x86_dma_fallback_dev; |
431 | 415 | ||
432 | out = 0; | 416 | out = 0; |
433 | start = 0; | 417 | start = 0; |
@@ -499,6 +483,46 @@ error: | |||
499 | return 0; | 483 | return 0; |
500 | } | 484 | } |
501 | 485 | ||
486 | /* allocate and map a coherent mapping */ | ||
487 | static void * | ||
488 | gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, | ||
489 | gfp_t flag) | ||
490 | { | ||
491 | dma_addr_t paddr; | ||
492 | unsigned long align_mask; | ||
493 | struct page *page; | ||
494 | |||
495 | if (force_iommu && !(flag & GFP_DMA)) { | ||
496 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | ||
497 | page = alloc_pages(flag | __GFP_ZERO, get_order(size)); | ||
498 | if (!page) | ||
499 | return NULL; | ||
500 | |||
501 | align_mask = (1UL << get_order(size)) - 1; | ||
502 | paddr = dma_map_area(dev, page_to_phys(page), size, | ||
503 | DMA_BIDIRECTIONAL, align_mask); | ||
504 | |||
505 | flush_gart(); | ||
506 | if (paddr != bad_dma_address) { | ||
507 | *dma_addr = paddr; | ||
508 | return page_address(page); | ||
509 | } | ||
510 | __free_pages(page, get_order(size)); | ||
511 | } else | ||
512 | return dma_generic_alloc_coherent(dev, size, dma_addr, flag); | ||
513 | |||
514 | return NULL; | ||
515 | } | ||
516 | |||
517 | /* free a coherent mapping */ | ||
518 | static void | ||
519 | gart_free_coherent(struct device *dev, size_t size, void *vaddr, | ||
520 | dma_addr_t dma_addr) | ||
521 | { | ||
522 | gart_unmap_single(dev, dma_addr, size, DMA_BIDIRECTIONAL); | ||
523 | free_pages((unsigned long)vaddr, get_order(size)); | ||
524 | } | ||
525 | |||
502 | static int no_agp; | 526 | static int no_agp; |
503 | 527 | ||
504 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) | 528 | static __init unsigned long check_iommu_size(unsigned long aper, u64 aper_size) |
@@ -649,13 +673,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
649 | info->aper_size = aper_size >> 20; | 673 | info->aper_size = aper_size >> 20; |
650 | 674 | ||
651 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); | 675 | gatt_size = (aper_size >> PAGE_SHIFT) * sizeof(u32); |
652 | gatt = (void *)__get_free_pages(GFP_KERNEL, get_order(gatt_size)); | 676 | gatt = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
677 | get_order(gatt_size)); | ||
653 | if (!gatt) | 678 | if (!gatt) |
654 | panic("Cannot allocate GATT table"); | 679 | panic("Cannot allocate GATT table"); |
655 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) | 680 | if (set_memory_uc((unsigned long)gatt, gatt_size >> PAGE_SHIFT)) |
656 | panic("Could not set GART PTEs to uncacheable pages"); | 681 | panic("Could not set GART PTEs to uncacheable pages"); |
657 | 682 | ||
658 | memset(gatt, 0, gatt_size); | ||
659 | agp_gatt_table = gatt; | 683 | agp_gatt_table = gatt; |
660 | 684 | ||
661 | enable_gart_translations(); | 685 | enable_gart_translations(); |
@@ -664,7 +688,8 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
664 | if (!error) | 688 | if (!error) |
665 | error = sysdev_register(&device_gart); | 689 | error = sysdev_register(&device_gart); |
666 | if (error) | 690 | if (error) |
667 | panic("Could not register gart_sysdev -- would corrupt data on next suspend"); | 691 | panic("Could not register gart_sysdev -- " |
692 | "would corrupt data on next suspend"); | ||
668 | 693 | ||
669 | flush_gart(); | 694 | flush_gart(); |
670 | 695 | ||
@@ -680,20 +705,13 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
680 | return -1; | 705 | return -1; |
681 | } | 706 | } |
682 | 707 | ||
683 | extern int agp_amd64_init(void); | ||
684 | |||
685 | static struct dma_mapping_ops gart_dma_ops = { | 708 | static struct dma_mapping_ops gart_dma_ops = { |
686 | .map_single = gart_map_single, | 709 | .map_single = gart_map_single, |
687 | .map_simple = gart_map_simple, | ||
688 | .unmap_single = gart_unmap_single, | 710 | .unmap_single = gart_unmap_single, |
689 | .sync_single_for_cpu = NULL, | ||
690 | .sync_single_for_device = NULL, | ||
691 | .sync_single_range_for_cpu = NULL, | ||
692 | .sync_single_range_for_device = NULL, | ||
693 | .sync_sg_for_cpu = NULL, | ||
694 | .sync_sg_for_device = NULL, | ||
695 | .map_sg = gart_map_sg, | 711 | .map_sg = gart_map_sg, |
696 | .unmap_sg = gart_unmap_sg, | 712 | .unmap_sg = gart_unmap_sg, |
713 | .alloc_coherent = gart_alloc_coherent, | ||
714 | .free_coherent = gart_free_coherent, | ||
697 | }; | 715 | }; |
698 | 716 | ||
699 | void gart_iommu_shutdown(void) | 717 | void gart_iommu_shutdown(void) |
@@ -753,8 +771,8 @@ void __init gart_iommu_init(void) | |||
753 | (no_agp && init_k8_gatt(&info) < 0)) { | 771 | (no_agp && init_k8_gatt(&info) < 0)) { |
754 | if (max_pfn > MAX_DMA32_PFN) { | 772 | if (max_pfn > MAX_DMA32_PFN) { |
755 | printk(KERN_WARNING "More than 4GB of memory " | 773 | printk(KERN_WARNING "More than 4GB of memory " |
756 | "but GART IOMMU not available.\n" | 774 | "but GART IOMMU not available.\n"); |
757 | KERN_WARNING "falling back to iommu=soft.\n"); | 775 | printk(KERN_WARNING "falling back to iommu=soft.\n"); |
758 | } | 776 | } |
759 | return; | 777 | return; |
760 | } | 778 | } |
@@ -772,19 +790,16 @@ void __init gart_iommu_init(void) | |||
772 | iommu_size = check_iommu_size(info.aper_base, aper_size); | 790 | iommu_size = check_iommu_size(info.aper_base, aper_size); |
773 | iommu_pages = iommu_size >> PAGE_SHIFT; | 791 | iommu_pages = iommu_size >> PAGE_SHIFT; |
774 | 792 | ||
775 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL, | 793 | iommu_gart_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, |
776 | get_order(iommu_pages/8)); | 794 | get_order(iommu_pages/8)); |
777 | if (!iommu_gart_bitmap) | 795 | if (!iommu_gart_bitmap) |
778 | panic("Cannot allocate iommu bitmap\n"); | 796 | panic("Cannot allocate iommu bitmap\n"); |
779 | memset(iommu_gart_bitmap, 0, iommu_pages/8); | ||
780 | 797 | ||
781 | #ifdef CONFIG_IOMMU_LEAK | 798 | #ifdef CONFIG_IOMMU_LEAK |
782 | if (leak_trace) { | 799 | if (leak_trace) { |
783 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL, | 800 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, |
784 | get_order(iommu_pages*sizeof(void *))); | 801 | get_order(iommu_pages*sizeof(void *))); |
785 | if (iommu_leak_tab) | 802 | if (!iommu_leak_tab) |
786 | memset(iommu_leak_tab, 0, iommu_pages * 8); | ||
787 | else | ||
788 | printk(KERN_DEBUG | 803 | printk(KERN_DEBUG |
789 | "PCI-DMA: Cannot allocate leak trace area\n"); | 804 | "PCI-DMA: Cannot allocate leak trace area\n"); |
790 | } | 805 | } |
@@ -794,7 +809,7 @@ void __init gart_iommu_init(void) | |||
794 | * Out of IOMMU space handling. | 809 | * Out of IOMMU space handling. |
795 | * Reserve some invalid pages at the beginning of the GART. | 810 | * Reserve some invalid pages at the beginning of the GART. |
796 | */ | 811 | */ |
797 | set_bit_string(iommu_gart_bitmap, 0, EMERGENCY_PAGES); | 812 | iommu_area_reserve(iommu_gart_bitmap, 0, EMERGENCY_PAGES); |
798 | 813 | ||
799 | agp_memory_reserved = iommu_size; | 814 | agp_memory_reserved = iommu_size; |
800 | printk(KERN_INFO | 815 | printk(KERN_INFO |
@@ -852,7 +867,8 @@ void __init gart_parse_options(char *p) | |||
852 | if (!strncmp(p, "leak", 4)) { | 867 | if (!strncmp(p, "leak", 4)) { |
853 | leak_trace = 1; | 868 | leak_trace = 1; |
854 | p += 4; | 869 | p += 4; |
855 | if (*p == '=') ++p; | 870 | if (*p == '=') |
871 | ++p; | ||
856 | if (isdigit(*p) && get_option(&p, &arg)) | 872 | if (isdigit(*p) && get_option(&p, &arg)) |
857 | iommu_leak_pages = arg; | 873 | iommu_leak_pages = arg; |
858 | } | 874 | } |