aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/pci-dma.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/x86/kernel/pci-dma.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'arch/x86/kernel/pci-dma.c')
-rw-r--r--arch/x86/kernel/pci-dma.c110
1 files changed, 22 insertions, 88 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 9f07cfcbd3a5..b49d00da2aed 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -11,9 +11,8 @@
11#include <asm/iommu.h> 11#include <asm/iommu.h>
12#include <asm/gart.h> 12#include <asm/gart.h>
13#include <asm/calgary.h> 13#include <asm/calgary.h>
14#include <asm/amd_iommu.h>
15#include <asm/x86_init.h> 14#include <asm/x86_init.h>
16#include <asm/xen/swiotlb-xen.h> 15#include <asm/iommu_table.h>
17 16
18static int forbid_dac __read_mostly; 17static int forbid_dac __read_mostly;
19 18
@@ -45,6 +44,8 @@ int iommu_detected __read_mostly = 0;
45 */ 44 */
46int iommu_pass_through __read_mostly; 45int iommu_pass_through __read_mostly;
47 46
47extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
48
48/* Dummy device used for NULL arguments (normally ISA). */ 49/* Dummy device used for NULL arguments (normally ISA). */
49struct device x86_dma_fallback_dev = { 50struct device x86_dma_fallback_dev = {
50 .init_name = "fallback device", 51 .init_name = "fallback device",
@@ -67,89 +68,23 @@ int dma_set_mask(struct device *dev, u64 mask)
67} 68}
68EXPORT_SYMBOL(dma_set_mask); 69EXPORT_SYMBOL(dma_set_mask);
69 70
70#if defined(CONFIG_X86_64) && !defined(CONFIG_NUMA)
71static __initdata void *dma32_bootmem_ptr;
72static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
73
74static int __init parse_dma32_size_opt(char *p)
75{
76 if (!p)
77 return -EINVAL;
78 dma32_bootmem_size = memparse(p, &p);
79 return 0;
80}
81early_param("dma32_size", parse_dma32_size_opt);
82
83void __init dma32_reserve_bootmem(void)
84{
85 unsigned long size, align;
86 if (max_pfn <= MAX_DMA32_PFN)
87 return;
88
89 /*
90 * check aperture_64.c allocate_aperture() for reason about
91 * using 512M as goal
92 */
93 align = 64ULL<<20;
94 size = roundup(dma32_bootmem_size, align);
95 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
96 512ULL<<20);
97 /*
98 * Kmemleak should not scan this block as it may not be mapped via the
99 * kernel direct mapping.
100 */
101 kmemleak_ignore(dma32_bootmem_ptr);
102 if (dma32_bootmem_ptr)
103 dma32_bootmem_size = size;
104 else
105 dma32_bootmem_size = 0;
106}
107static void __init dma32_free_bootmem(void)
108{
109
110 if (max_pfn <= MAX_DMA32_PFN)
111 return;
112
113 if (!dma32_bootmem_ptr)
114 return;
115
116 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
117
118 dma32_bootmem_ptr = NULL;
119 dma32_bootmem_size = 0;
120}
121#else
122void __init dma32_reserve_bootmem(void)
123{
124}
125static void __init dma32_free_bootmem(void)
126{
127}
128
129#endif
130
131void __init pci_iommu_alloc(void) 71void __init pci_iommu_alloc(void)
132{ 72{
133 /* free the range so iommu could get some range less than 4G */ 73 struct iommu_table_entry *p;
134 dma32_free_bootmem(); 74
135 75 sort_iommu_table(__iommu_table, __iommu_table_end);
136 if (pci_xen_swiotlb_detect() || pci_swiotlb_detect()) 76 check_iommu_entries(__iommu_table, __iommu_table_end);
137 goto out; 77
138 78 for (p = __iommu_table; p < __iommu_table_end; p++) {
139 gart_iommu_hole_init(); 79 if (p && p->detect && p->detect() > 0) {
140 80 p->flags |= IOMMU_DETECTED;
141 detect_calgary(); 81 if (p->early_init)
142 82 p->early_init();
143 detect_intel_iommu(); 83 if (p->flags & IOMMU_FINISH_IF_DETECTED)
144 84 break;
145 /* needs to be called after gart_iommu_hole_init */ 85 }
146 amd_iommu_detect(); 86 }
147out:
148 pci_xen_swiotlb_init();
149
150 pci_swiotlb_init();
151} 87}
152
153void *dma_generic_alloc_coherent(struct device *dev, size_t size, 88void *dma_generic_alloc_coherent(struct device *dev, size_t size,
154 dma_addr_t *dma_addr, gfp_t flag) 89 dma_addr_t *dma_addr, gfp_t flag)
155{ 90{
@@ -292,6 +227,7 @@ EXPORT_SYMBOL(dma_supported);
292 227
293static int __init pci_iommu_init(void) 228static int __init pci_iommu_init(void)
294{ 229{
230 struct iommu_table_entry *p;
295 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); 231 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
296 232
297#ifdef CONFIG_PCI 233#ifdef CONFIG_PCI
@@ -299,12 +235,10 @@ static int __init pci_iommu_init(void)
299#endif 235#endif
300 x86_init.iommu.iommu_init(); 236 x86_init.iommu.iommu_init();
301 237
302 if (swiotlb || xen_swiotlb) { 238 for (p = __iommu_table; p < __iommu_table_end; p++) {
303 printk(KERN_INFO "PCI-DMA: " 239 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
304 "Using software bounce buffering for IO (SWIOTLB)\n"); 240 p->late_init();
305 swiotlb_print_info(); 241 }
306 } else
307 swiotlb_free();
308 242
309 return 0; 243 return 0;
310} 244}