diff options
| -rw-r--r-- | arch/x86/include/asm/iommu_table.h | 95 | ||||
| -rw-r--r-- | arch/x86/kernel/vmlinux.lds.S | 7 |
2 files changed, 102 insertions, 0 deletions
diff --git a/arch/x86/include/asm/iommu_table.h b/arch/x86/include/asm/iommu_table.h new file mode 100644 index 000000000000..435176f96a56 --- /dev/null +++ b/arch/x86/include/asm/iommu_table.h | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | |||
| 2 | #ifndef _ASM_X86_IOMMU_TABLE_H | ||
| 3 | #define _ASM_X86_IOMMU_TABLE_H | ||
| 4 | |||
| 5 | #include <asm/swiotlb.h> | ||
| 6 | |||
| 7 | /* | ||
| 8 | * History lesson: | ||
| 9 | * The execution chain of IOMMUs in 2.6.36 looks as so: | ||
| 10 | * | ||
| 11 | * [xen-swiotlb] | ||
| 12 | * | | ||
| 13 | * +----[swiotlb *]--+ | ||
| 14 | * / | \ | ||
| 15 | * / | \ | ||
| 16 | * [GART] [Calgary] [Intel VT-d] | ||
| 17 | * / | ||
| 18 | * / | ||
| 19 | * [AMD-Vi] | ||
| 20 | * | ||
| 21 | * *: if SWIOTLB detected 'iommu=soft'/'swiotlb=force' it would skip | ||
| 22 | * over the rest of IOMMUs and unconditionally initialize the SWIOTLB. | ||
| 23 | * Also it would surreptitiously initialize set the swiotlb=1 if there were | ||
| 24 | * more than 4GB and if the user did not pass in 'iommu=off'. The swiotlb | ||
| 25 | * flag would be turned off by all IOMMUs except the Calgary one. | ||
| 26 | * | ||
| 27 | * The IOMMU_INIT* macros allow a similar tree (or more complex if desired) | ||
| 28 | * to be built by defining who we depend on. | ||
| 29 | * | ||
| 30 | * And all that needs to be done is to use one of the macros in the IOMMU | ||
| 31 | * and the pci-dma.c will take care of the rest. | ||
| 32 | */ | ||
| 33 | |||
| 34 | struct iommu_table_entry { | ||
| 35 | initcall_t detect; | ||
| 36 | initcall_t depend; | ||
| 37 | void (*early_init)(void); /* No memory allocate available. */ | ||
| 38 | void (*late_init)(void); /* Yes, can allocate memory. */ | ||
| 39 | #define IOMMU_FINISH_IF_DETECTED (1<<0) | ||
| 40 | #define IOMMU_DETECTED (1<<1) | ||
| 41 | int flags; | ||
| 42 | }; | ||
| 43 | /* | ||
| 44 | * Macro fills out an entry in the .iommu_table that is equivalent | ||
| 45 | * to the fields that 'struct iommu_table_entry' has. The entries | ||
| 46 | * that are put in the .iommu_table section are not put in any order | ||
| 47 | * hence during boot-time we will have to resort them based on | ||
| 48 | * dependency. */ | ||
| 49 | |||
| 50 | |||
| 51 | #define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ | ||
| 52 | static const struct iommu_table_entry const \ | ||
| 53 | __iommu_entry_##_detect __used \ | ||
| 54 | __attribute__ ((unused, __section__(".iommu_table"), \ | ||
| 55 | aligned((sizeof(void *))))) \ | ||
| 56 | = {_detect, _depend, _early_init, _late_init, \ | ||
| 57 | _finish ? IOMMU_FINISH_IF_DETECTED : 0} | ||
| 58 | /* | ||
| 59 | * The simplest IOMMU definition. Provide the detection routine | ||
| 60 | * and it will be run after the SWIOTLB and the other IOMMUs | ||
| 61 | * that utilize this macro. If the IOMMU is detected (ie, the | ||
| 62 | * detect routine returns a positive value), the other IOMMUs | ||
| 63 | * are also checked. You can use IOMMU_INIT_FINISH if you prefer | ||
| 64 | * to stop detecting the other IOMMUs after yours has been detected. | ||
| 65 | */ | ||
| 66 | #define IOMMU_INIT_POST(_detect) \ | ||
| 67 | __IOMMU_INIT(_detect, pci_swiotlb_detect, 0, 0, 0) | ||
| 68 | |||
| 69 | #define IOMMU_INIT_POST_FINISH(detect) \ | ||
| 70 | __IOMMU_INIT(_detect, pci_swiotlb_detect, 0, 0, 1) | ||
| 71 | |||
| 72 | /* | ||
| 73 | * A more sophisticated version of IOMMU_INIT. This variant requires: | ||
| 74 | * a). A detection routine function. | ||
| 75 | * b). The name of the detection routine we depend on to get called | ||
| 76 | * before us. | ||
| 77 | * c). The init routine which gets called if the detection routine | ||
| 78 | * returns a positive value from the pci_iommu_alloc. This means | ||
| 79 | * no presence of a memory allocator. | ||
| 80 | * d). Similar to the 'init', except that this gets called from pci_iommu_init | ||
| 81 | * where we do have a memory allocator. | ||
| 82 | * | ||
| 83 | * The _CONT vs the _EXIT differs in that the _CONT variant will | ||
| 84 | * continue detecting other IOMMUs in the call list after the | ||
| 85 | * the detection routine returns a positive number. The _EXIT will | ||
| 86 | * stop the execution chain. Both will still call the 'init' and | ||
| 87 | * 'late_init' functions if they are set. | ||
| 88 | */ | ||
| 89 | #define IOMMU_INIT_FINISH(_detect, _depend, _init, _late_init) \ | ||
| 90 | __IOMMU_INIT(_detect, _depend, _init, _late_init, 1) | ||
| 91 | |||
| 92 | #define IOMMU_INIT(_detect, _depend, _init, _late_init) \ | ||
| 93 | __IOMMU_INIT(_detect, _depend, _init, _late_init, 0) | ||
| 94 | |||
| 95 | #endif /* _ASM_X86_IOMMU_TABLE_H */ | ||
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index d0bb52296fa3..b92e040466c1 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
| @@ -260,6 +260,13 @@ SECTIONS | |||
| 260 | *(.altinstr_replacement) | 260 | *(.altinstr_replacement) |
| 261 | } | 261 | } |
| 262 | 262 | ||
| 263 | .iommu_table : AT(ADDR(.iommu_table) - LOAD_OFFSET) { | ||
| 264 | __iommu_table = .; | ||
| 265 | *(.iommu_table) | ||
| 266 | . = ALIGN(8); | ||
| 267 | __iommu_table_end = .; | ||
| 268 | } | ||
| 269 | |||
| 263 | /* | 270 | /* |
| 264 | * .exit.text is discard at runtime, not link time, to deal with | 271 | * .exit.text is discard at runtime, not link time, to deal with |
| 265 | * references from .altinstructions and .eh_frame | 272 | * references from .altinstructions and .eh_frame |
