diff options
| -rw-r--r-- | arch/x86/Kconfig.debug | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 45 |
2 files changed, 9 insertions, 39 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index d8359e73317f..5865712d105d 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
| @@ -161,8 +161,7 @@ config IOMMU_DEBUG | |||
| 161 | 161 | ||
| 162 | config IOMMU_LEAK | 162 | config IOMMU_LEAK |
| 163 | bool "IOMMU leak tracing" | 163 | bool "IOMMU leak tracing" |
| 164 | depends on DEBUG_KERNEL | 164 | depends on IOMMU_DEBUG && DMA_API_DEBUG |
| 165 | depends on IOMMU_DEBUG | ||
| 166 | ---help--- | 165 | ---help--- |
| 167 | Add a simple leak tracer to the IOMMU code. This is useful when you | 166 | Add a simple leak tracer to the IOMMU code. This is useful when you |
| 168 | are debugging a buggy device driver that leaks IOMMU mappings. | 167 | are debugging a buggy device driver that leaks IOMMU mappings. |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index b284b58c035c..1e8920d98f7c 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
| @@ -144,48 +144,21 @@ static void flush_gart(void) | |||
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | #ifdef CONFIG_IOMMU_LEAK | 146 | #ifdef CONFIG_IOMMU_LEAK |
| 147 | |||
| 148 | #define SET_LEAK(x) \ | ||
| 149 | do { \ | ||
| 150 | if (iommu_leak_tab) \ | ||
| 151 | iommu_leak_tab[x] = __builtin_return_address(0);\ | ||
| 152 | } while (0) | ||
| 153 | |||
| 154 | #define CLEAR_LEAK(x) \ | ||
| 155 | do { \ | ||
| 156 | if (iommu_leak_tab) \ | ||
| 157 | iommu_leak_tab[x] = NULL; \ | ||
| 158 | } while (0) | ||
| 159 | |||
| 160 | /* Debugging aid for drivers that don't free their IOMMU tables */ | 147 | /* Debugging aid for drivers that don't free their IOMMU tables */ |
| 161 | static void **iommu_leak_tab; | ||
| 162 | static int leak_trace; | 148 | static int leak_trace; |
| 163 | static int iommu_leak_pages = 20; | 149 | static int iommu_leak_pages = 20; |
| 164 | 150 | ||
| 165 | static void dump_leak(void) | 151 | static void dump_leak(void) |
| 166 | { | 152 | { |
| 167 | int i; | ||
| 168 | static int dump; | 153 | static int dump; |
| 169 | 154 | ||
| 170 | if (dump || !iommu_leak_tab) | 155 | if (dump) |
| 171 | return; | 156 | return; |
| 172 | dump = 1; | 157 | dump = 1; |
| 173 | show_stack(NULL, NULL); | ||
| 174 | 158 | ||
| 175 | /* Very crude. dump some from the end of the table too */ | 159 | show_stack(NULL, NULL); |
| 176 | printk(KERN_DEBUG "Dumping %d pages from end of IOMMU:\n", | 160 | debug_dma_dump_mappings(NULL); |
| 177 | iommu_leak_pages); | ||
| 178 | for (i = 0; i < iommu_leak_pages; i += 2) { | ||
| 179 | printk(KERN_DEBUG "%lu: ", iommu_pages-i); | ||
| 180 | printk_address((unsigned long) iommu_leak_tab[iommu_pages-i], | ||
| 181 | 0); | ||
| 182 | printk(KERN_CONT "%c", (i+1)%2 == 0 ? '\n' : ' '); | ||
| 183 | } | ||
| 184 | printk(KERN_DEBUG "\n"); | ||
| 185 | } | 161 | } |
| 186 | #else | ||
| 187 | # define SET_LEAK(x) | ||
| 188 | # define CLEAR_LEAK(x) | ||
| 189 | #endif | 162 | #endif |
| 190 | 163 | ||
| 191 | static void iommu_full(struct device *dev, size_t size, int dir) | 164 | static void iommu_full(struct device *dev, size_t size, int dir) |
| @@ -248,7 +221,6 @@ static dma_addr_t dma_map_area(struct device *dev, dma_addr_t phys_mem, | |||
| 248 | 221 | ||
| 249 | for (i = 0; i < npages; i++) { | 222 | for (i = 0; i < npages; i++) { |
| 250 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); | 223 | iommu_gatt_base[iommu_page + i] = GPTE_ENCODE(phys_mem); |
| 251 | SET_LEAK(iommu_page + i); | ||
| 252 | phys_mem += PAGE_SIZE; | 224 | phys_mem += PAGE_SIZE; |
| 253 | } | 225 | } |
| 254 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); | 226 | return iommu_bus_base + iommu_page*PAGE_SIZE + (phys_mem & ~PAGE_MASK); |
| @@ -294,7 +266,6 @@ static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr, | |||
| 294 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); | 266 | npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); |
| 295 | for (i = 0; i < npages; i++) { | 267 | for (i = 0; i < npages; i++) { |
| 296 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; | 268 | iommu_gatt_base[iommu_page + i] = gart_unmapped_entry; |
| 297 | CLEAR_LEAK(iommu_page + i); | ||
| 298 | } | 269 | } |
| 299 | free_iommu(iommu_page, npages); | 270 | free_iommu(iommu_page, npages); |
| 300 | } | 271 | } |
| @@ -377,7 +348,6 @@ static int __dma_map_cont(struct device *dev, struct scatterlist *start, | |||
| 377 | pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); | 348 | pages = iommu_num_pages(s->offset, s->length, PAGE_SIZE); |
| 378 | while (pages--) { | 349 | while (pages--) { |
| 379 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); | 350 | iommu_gatt_base[iommu_page] = GPTE_ENCODE(addr); |
| 380 | SET_LEAK(iommu_page); | ||
| 381 | addr += PAGE_SIZE; | 351 | addr += PAGE_SIZE; |
| 382 | iommu_page++; | 352 | iommu_page++; |
| 383 | } | 353 | } |
| @@ -801,11 +771,12 @@ void __init gart_iommu_init(void) | |||
| 801 | 771 | ||
| 802 | #ifdef CONFIG_IOMMU_LEAK | 772 | #ifdef CONFIG_IOMMU_LEAK |
| 803 | if (leak_trace) { | 773 | if (leak_trace) { |
| 804 | iommu_leak_tab = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, | 774 | int ret; |
| 805 | get_order(iommu_pages*sizeof(void *))); | 775 | |
| 806 | if (!iommu_leak_tab) | 776 | ret = dma_debug_resize_entries(iommu_pages); |
| 777 | if (ret) | ||
| 807 | printk(KERN_DEBUG | 778 | printk(KERN_DEBUG |
| 808 | "PCI-DMA: Cannot allocate leak trace area\n"); | 779 | "PCI-DMA: Cannot trace all the entries\n"); |
| 809 | } | 780 | } |
| 810 | #endif | 781 | #endif |
| 811 | 782 | ||
