diff options
Diffstat (limited to 'arch/x86/kernel/pci-gart_64.c')
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 55 |
1 files changed, 16 insertions, 39 deletions
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index b284b58c035c..cfd9f9063896 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -144,48 +144,21 @@ static void flush_gart(void) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | #ifdef CONFIG_IOMMU_LEAK | 146 | #ifdef CONFIG_IOMMU_LEAK |
147 | |||
148 | #define SET_LEAK(x) \ | ||
149 | do { \ | ||
150 | if (iommu_leak_tab) \ | ||
151 | iommu_leak_tab[x] = __builtin_return_address(0);\ | ||
152 | } while (0) | ||
153 | |||
154 | #define CLEAR_LEAK(x) \ | ||
155 | do { \ | ||
156 | if (iommu_leak_tab) \ | ||
157 | iommu_leak_tab[x] = NULL; \ | ||
158 | } while (0) | ||
159 | |||
160 | /* Debugging aid for drivers that don't free their IOMMU tables */ | 147 | /* Debugging aid for drivers that don't free their IOMMU tables */ |
161 | static void **iommu_leak_tab; | ||
162 | static int leak_trace; | 148 | static int leak_trace; |
163 | static int iommu_leak_pages = 20; | 149 | static int iommu_leak_pages = 20; |
164 | 150 | ||
165 | static void dump_leak(void) | 151 | static void dump_leak(void) |
166 | { | 152 | { |
167 | int i; | ||
168 | static int dump; | 153 | static int dump; |
169 | 154 | ||
170 | if (dump || !iommu_leak_tab) | 155 | if (dump) |
171 | return; | 156 | return; |
172 | dump = 1; | 157 | dump = 1; |
173 | show_stack(NULL, NULL); | ||
174 | 158 | ||
175 | /* Very crude. dump some from the end of the table too */ | 159 | show_stack(NULL, NULL); |
176 | printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n", | 160 | debug_dma_dump_mappings(NULL); |
177 | iommu_leak_pages); | ||
178 | for (i = 0; i < iommu_leak_pages; i += 2) { | ||
179 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); | ||
180 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], | ||
181 | 0); | ||
182 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); | ||
183 | } | ||
184 | printk(KERN_DEBUG "\n"); | ||
185 | } | 161 | } |
186 | #else | ||
187 | # define SET_LEAK(x) | ||
188 | # define CLEAR_LEAK(x) | ||
189 | #endif | 162 | #endif |
190 | 163 | ||
191 | static void iommu_full(struct device *dev, size_t size, int dir) | 164 | static void iommu_full(struct device *dev, size_t size, int dir) |
@@ -248,7 +221,6 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
248 | 221 | ||
249 | for (i = 0; i < npages; i++) { | 222 | for (i = 0; i < npages; i++) { |
250 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | 223 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); |
251 | SET_LEAK(iommu_page + i); | ||
252 | phys_mem += PAGE_SIZE; | 224 | phys_mem += PAGE_SIZE; |
253 | } | 225 | } |
254 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | 226 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); |
@@ -294,7 +266,6 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
294 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 266 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
295 | for (i = 0; i < npages; i++) { | 267 | for (i = 0; i < npages; i++) { |
296 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | 268 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
297 | CLEAR_LEAK(iommu_page + i); | ||
298 | } | 269 | } |
299 | free_iommu(iommu_page, npages); | 270 | free_iommu(iommu_page, npages); |
300 | } | 271 | } |
@@ -377,7 +348,6 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
377 | pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); | 348 | pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); |
378 | while (pages--) { | 349 | while (pages--) { |
379 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | 350 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
380 | SET_LEAK(iommu_page); | ||
381 | addr += PAGE_SIZE; | 351 | addr += PAGE_SIZE; |
382 | iommu_page++; | 352 | iommu_page++; |
383 | } | 353 | } |
@@ -688,8 +658,6 @@ static __init int init_k8_gatt(struct agp_kern_info *info) | |||
688 | 658 | ||
689 | agp_gatt_table = gatt; | 659 | agp_gatt_table = gatt; |
690 | 660 | ||
691 | enable_gart_translations(); | ||
692 | |||
693 | error = sysdev_class_register(&gart_sysdev_class); | 661 | error = sysdev_class_register(&gart_sysdev_class); |
694 | if (!error) | 662 | if (!error) |
695 | error = sysdev_register(&device_gart); | 663 | error = sysdev_register(&device_gart); |
@@ -801,11 +769,12 @@ void __init gart_iommu_init(void) | |||
801 | 769 | ||
802 | #ifdef CONFIG_IOMMU_LEAK | 770 | #ifdef CONFIG_IOMMU_LEAK |
803 | if (leak_trace) { | 771 | if (leak_trace) { |
804 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, | 772 | int ret; |
805 | get_order(iommu_pages*sizeof(void *))); | 773 | |
806 | if (!iommu_leak_tab) | 774 | ret = dma_debug_resize_entries(iommu_pages); |
775 | if (ret) | ||
807 | printk(KERN_DEBUG | 776 | printk(KERN_DEBUG |
808 | "PCI-DMA: Cannot allocate leak trace area\n"); | 777 | "PCI-DMA: Cannot trace all the entries\n"); |
809 | } | 778 | } |
810 | #endif | 779 | #endif |
811 | 780 | ||
@@ -845,6 +814,14 @@ void __init gart_iommu_init(void) | |||
845 | * the pages as Not-Present: | 814 | * the pages as Not-Present: |
846 | */ | 815 | */ |
847 | wbinvd(); | 816 | wbinvd(); |
817 | |||
818 | /* | ||
819 | * Now all caches are flushed and we can safely enable | ||
820 | * GART hardware. Doing it early leaves the possibility | ||
821 | * of stale cache entries that can lead to GART PTE | ||
822 | * errors. | ||
823 | */ | ||
824 | enable_gart_translations(); | ||
848 | 825 | ||
849 | /* | 826 | /* |
850 | * Try to workaround a bug (thanks to BenH): | 827 | * Try to workaround a bug (thanks to BenH): |