diff options
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 110 |
1 files changed, 22 insertions, 88 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 9f07cfcbd3a5..b49d00da2aed 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -11,9 +11,8 @@ | |||
11 | #include <asm/iommu.h> | 11 | #include <asm/iommu.h> |
12 | #include <asm/gart.h> | 12 | #include <asm/gart.h> |
13 | #include <asm/calgary.h> | 13 | #include <asm/calgary.h> |
14 | #include <asm/amd_iommu.h> | ||
15 | #include <asm/x86_init.h> | 14 | #include <asm/x86_init.h> |
16 | #include <asm/xen/swiotlb-xen.h> | 15 | #include <asm/iommu_table.h> |
17 | 16 | ||
18 | static int forbid_dac __read_mostly; | 17 | static int forbid_dac __read_mostly; |
19 | 18 | ||
@@ -45,6 +44,8 @@ int iommu_detected __read_mostly = 0; | |||
45 | */ | 44 | */ |
46 | int iommu_pass_through __read_mostly; | 45 | int iommu_pass_through __read_mostly; |
47 | 46 | ||
47 | extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; | ||
48 | |||
48 | /* Dummy device used for NULL arguments (normally ISA). */ | 49 | /* Dummy device used for NULL arguments (normally ISA). */ |
49 | struct device x86_dma_fallback_dev = { | 50 | struct device x86_dma_fallback_dev = { |
50 | .init_name = "fallback device", | 51 | .init_name = "fallback device", |
@@ -67,89 +68,23 @@ int dma_set_mask(struct device *dev, u64 mask) | |||
67 | } | 68 | } |
68 | EXPORT_SYMBOL(dma_set_mask); | 69 | EXPORT_SYMBOL(dma_set_mask); |
69 | 70 | ||
70 | #if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA) | ||
71 | static __initdata void *dma32_bootmem_ptr; | ||
72 | static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); | ||
73 | |||
74 | static int __init parse_dma32_size_opt(char *p) | ||
75 | { | ||
76 | if (!p) | ||
77 | return -EINVAL; | ||
78 | dma32_bootmem_size = memparse(p, &p); | ||
79 | return 0; | ||
80 | } | ||
81 | early_param("dma32_size", parse_dma32_size_opt); | ||
82 | |||
83 | void __init dma32_reserve_bootmem(void) | ||
84 | { | ||
85 | unsigned long size, align; | ||
86 | if (max_pfn <= MAX_DMA32_PFN) | ||
87 | return; | ||
88 | |||
89 | /* | ||
90 | * check aperture_64.c allocate_aperture() for reason about | ||
91 | * using 512M as goal | ||
92 | */ | ||
93 | align = 64ULL<<20; | ||
94 | size = roundup(dma32_bootmem_size, align); | ||
95 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | ||
96 | 512ULL<<20); | ||
97 | /* | ||
98 | * Kmemleak should not scan this block as it may not be mapped via the | ||
99 | * kernel direct mapping. | ||
100 | */ | ||
101 | kmemleak_ignore(dma32_bootmem_ptr); | ||
102 | if (dma32_bootmem_ptr) | ||
103 | dma32_bootmem_size = size; | ||
104 | else | ||
105 | dma32_bootmem_size = 0; | ||
106 | } | ||
107 | static void __init dma32_free_bootmem(void) | ||
108 | { | ||
109 | |||
110 | if (max_pfn <= MAX_DMA32_PFN) | ||
111 | return; | ||
112 | |||
113 | if (!dma32_bootmem_ptr) | ||
114 | return; | ||
115 | |||
116 | free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size); | ||
117 | |||
118 | dma32_bootmem_ptr = NULL; | ||
119 | dma32_bootmem_size = 0; | ||
120 | } | ||
121 | #else | ||
122 | void __init dma32_reserve_bootmem(void) | ||
123 | { | ||
124 | } | ||
125 | static void __init dma32_free_bootmem(void) | ||
126 | { | ||
127 | } | ||
128 | |||
129 | #endif | ||
130 | |||
131 | void __init pci_iommu_alloc(void) | 71 | void __init pci_iommu_alloc(void) |
132 | { | 72 | { |
133 | /* free the range so iommu could get some range less than 4G */ | 73 | struct iommu_table_entry *p; |
134 | dma32_free_bootmem(); | 74 | |
135 | 75 | sort_iommu_table(__iommu_table, __iommu_table_end); | |
136 | if (pci_xen_swiotlb_detect() || pci_swiotlb_detect()) | 76 | check_iommu_entries(__iommu_table, __iommu_table_end); |
137 | goto out; | 77 | |
138 | 78 | for (p = __iommu_table; p < __iommu_table_end; p++) { | |
139 | gart_iommu_hole_init(); | 79 | if (p && p->detect && p->detect() > 0) { |
140 | 80 | p->flags |= IOMMU_DETECTED; | |
141 | detect_calgary(); | 81 | if (p->early_init) |
142 | 82 | p->early_init(); | |
143 | detect_intel_iommu(); | 83 | if (p->flags & IOMMU_FINISH_IF_DETECTED) |
144 | 84 | break; | |
145 | /* needs to be called after gart_iommu_hole_init */ | 85 | } |
146 | amd_iommu_detect(); | 86 | } |
147 | out: | ||
148 | pci_xen_swiotlb_init(); | ||
149 | |||
150 | pci_swiotlb_init(); | ||
151 | } | 87 | } |
152 | |||
153 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 88 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
154 | dma_addr_t *dma_addr, gfp_t flag) | 89 | dma_addr_t *dma_addr, gfp_t flag) |
155 | { | 90 | { |
@@ -292,6 +227,7 @@ EXPORT_SYMBOL(dma_supported); | |||
292 | 227 | ||
293 | static int __init pci_iommu_init(void) | 228 | static int __init pci_iommu_init(void) |
294 | { | 229 | { |
230 | struct iommu_table_entry *p; | ||
295 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 231 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
296 | 232 | ||
297 | #ifdef CONFIG_PCI | 233 | #ifdef CONFIG_PCI |
@@ -299,12 +235,10 @@ static int __init pci_iommu_init(void) | |||
299 | #endif | 235 | #endif |
300 | x86_init.iommu.iommu_init(); | 236 | x86_init.iommu.iommu_init(); |
301 | 237 | ||
302 | if (swiotlb || xen_swiotlb) { | 238 | for (p = __iommu_table; p < __iommu_table_end; p++) { |
303 | printk(KERN_INFO "PCI-DMA: " | 239 | if (p && (p->flags & IOMMU_DETECTED) && p->late_init) |
304 | "Using software bounce buffering for IO (SWIOTLB)\n"); | 240 | p->late_init(); |
305 | swiotlb_print_info(); | 241 | } |
306 | } else | ||
307 | swiotlb_free(); | ||
308 | 242 | ||
309 | return 0; | 243 | return 0; |
310 | } | 244 | } |