diff options
| author | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2010-08-26 13:58:05 -0400 |
|---|---|---|
| committer | H. Peter Anvin <hpa@linux.intel.com> | 2010-08-26 18:14:52 -0400 |
| commit | ee1f284f38c8dfcbc7b656915a039dde016de7d3 (patch) | |
| tree | d89bc3d8bc153ca28e334ed991f0eaf8478b04c3 | |
| parent | 4db77ff3237a88ea74f691dd776e92b2f86a8f3f (diff) | |
x86, iommu: Utilize the IOMMU_INIT macros functionality.
We remove all of the sub-platform detection/init routines and instead
use on the .iommu_table array of structs to call the .early_init if
.detect returned a positive value. Also we can stop detecting other
IOMMUs if the IOMMU used the _FINISH type macro. During the
'pci_iommu_init' stage, we call .init for the second-stage
initialization if it was defined. Currently only SWIOTLB has this
defined and it used to de-allocate the SWIOTLB if the other detected
IOMMUs have deemed it unnecessary to use SWIOTLB.
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
LKML-Reference: <1282845485-8991-11-git-send-email-konrad.wilk@oracle.com>
CC: Fujita Tomonori <fujita.tomonori@lab.ntt.co.jp>
CC: Thomas Gleixner <tglx@linutronix.de>
CC: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
| -rw-r--r-- | arch/x86/kernel/pci-dma.c | 46 |
1 files changed, 21 insertions, 25 deletions
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 1b3beb5075e6..9ea999a4dcc1 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
| @@ -11,9 +11,8 @@ | |||
| 11 | #include <asm/iommu.h> | 11 | #include <asm/iommu.h> |
| 12 | #include <asm/gart.h> | 12 | #include <asm/gart.h> |
| 13 | #include <asm/calgary.h> | 13 | #include <asm/calgary.h> |
| 14 | #include <asm/amd_iommu.h> | ||
| 15 | #include <asm/x86_init.h> | 14 | #include <asm/x86_init.h> |
| 16 | #include <asm/xen/swiotlb-xen.h> | 15 | #include <asm/iommu_table.h> |
| 17 | 16 | ||
| 18 | static int forbid_dac __read_mostly; | 17 | static int forbid_dac __read_mostly; |
| 19 | 18 | ||
| @@ -45,6 +44,8 @@ int iommu_detected __read_mostly = 0; | |||
| 45 | */ | 44 | */ |
| 46 | int iommu_pass_through __read_mostly; | 45 | int iommu_pass_through __read_mostly; |
| 47 | 46 | ||
| 47 | extern struct iommu_table_entry __iommu_table[], __iommu_table_end[]; | ||
| 48 | |||
| 48 | /* Dummy device used for NULL arguments (normally ISA). */ | 49 | /* Dummy device used for NULL arguments (normally ISA). */ |
| 49 | struct device x86_dma_fallback_dev = { | 50 | struct device x86_dma_fallback_dev = { |
| 50 | .init_name = "fallback device", | 51 | .init_name = "fallback device", |
| @@ -130,28 +131,24 @@ static void __init dma32_free_bootmem(void) | |||
| 130 | 131 | ||
| 131 | void __init pci_iommu_alloc(void) | 132 | void __init pci_iommu_alloc(void) |
| 132 | { | 133 | { |
| 134 | struct iommu_table_entry *p; | ||
| 135 | |||
| 133 | /* free the range so iommu could get some range less than 4G */ | 136 | /* free the range so iommu could get some range less than 4G */ |
| 134 | dma32_free_bootmem(); | 137 | dma32_free_bootmem(); |
| 135 | 138 | ||
| 136 | if (pci_xen_swiotlb_detect() || pci_swiotlb_detect_override()) | 139 | sort_iommu_table(__iommu_table, __iommu_table_end); |
| 137 | goto out; | 140 | check_iommu_entries(__iommu_table, __iommu_table_end); |
| 138 | |||
| 139 | pci_swiotlb_detect_4gb(); | ||
| 140 | |||
| 141 | gart_iommu_hole_init(); | ||
| 142 | |||
| 143 | detect_calgary(); | ||
| 144 | |||
| 145 | detect_intel_iommu(); | ||
| 146 | 141 | ||
| 147 | /* needs to be called after gart_iommu_hole_init */ | 142 | for (p = __iommu_table; p < __iommu_table_end; p++) { |
| 148 | amd_iommu_detect(); | 143 | if (p && p->detect && p->detect() > 0) { |
| 149 | out: | 144 | p->flags |= IOMMU_DETECTED; |
| 150 | pci_xen_swiotlb_init(); | 145 | if (p->early_init) |
| 151 | 146 | p->early_init(); | |
| 152 | pci_swiotlb_init(); | 147 | if (p->flags & IOMMU_FINISH_IF_DETECTED) |
| 148 | break; | ||
| 149 | } | ||
| 150 | } | ||
| 153 | } | 151 | } |
| 154 | |||
| 155 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, | 152 | void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
| 156 | dma_addr_t *dma_addr, gfp_t flag) | 153 | dma_addr_t *dma_addr, gfp_t flag) |
| 157 | { | 154 | { |
| @@ -294,6 +291,7 @@ EXPORT_SYMBOL(dma_supported); | |||
| 294 | 291 | ||
| 295 | static int __init pci_iommu_init(void) | 292 | static int __init pci_iommu_init(void) |
| 296 | { | 293 | { |
| 294 | struct iommu_table_entry *p; | ||
| 297 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 295 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
| 298 | 296 | ||
| 299 | #ifdef CONFIG_PCI | 297 | #ifdef CONFIG_PCI |
| @@ -301,12 +299,10 @@ static int __init pci_iommu_init(void) | |||
| 301 | #endif | 299 | #endif |
| 302 | x86_init.iommu.iommu_init(); | 300 | x86_init.iommu.iommu_init(); |
| 303 | 301 | ||
| 304 | if (swiotlb || xen_swiotlb) { | 302 | for (p = __iommu_table; p < __iommu_table_end; p++) { |
| 305 | printk(KERN_INFO "PCI-DMA: " | 303 | if (p && (p->flags & IOMMU_DETECTED) && p->late_init) |
| 306 | "Using software bounce buffering for IO (SWIOTLB)\n"); | 304 | p->late_init(); |
| 307 | swiotlb_print_info(); | 305 | } |
| 308 | } else | ||
| 309 | swiotlb_free(); | ||
| 310 | 306 | ||
| 311 | return 0; | 307 | return 0; |
| 312 | } | 308 | } |
