diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2008-09-19 12:23:30 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-22 14:43:37 -0400 |
commit | afa9fdc2f5f8e4d98f3e77bfa204412cbc181346 (patch) | |
tree | 8e6c390d8dc18734f6071d2ddd7a2bca01fe9000 /arch | |
parent | ed6dc4981368aa8ac89b0ea61535cfa2b03533cb (diff) |
iommu: remove fullflush and nofullflush in IOMMU generic option
This patch against tip/x86/iommu virtually reverts
2842e5bf3115193f05dc9dac20f940e7abf44c1a. But just reverting the
commit breaks AMD IOMMU so this patch also includes some fixes.
The above commit adds new two options to x86 IOMMU generic kernel boot
options, fullflush and nofullflush. But such change that affects all
the IOMMUs needs more discussion (all IOMMU parties need the chance to
discuss it):
http://lkml.org/lkml/2008/9/19/106
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/pci-dma.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/pci-gart_64.c | 13 |
4 files changed, 19 insertions, 16 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 70537d117a9..c19212191c9 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -948,7 +948,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
948 | } | 948 | } |
949 | address += offset; | 949 | address += offset; |
950 | 950 | ||
951 | if (unlikely(dma_dom->need_flush && !iommu_fullflush)) { | 951 | if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { |
952 | iommu_flush_tlb(iommu, dma_dom->domain.id); | 952 | iommu_flush_tlb(iommu, dma_dom->domain.id); |
953 | dma_dom->need_flush = false; | 953 | dma_dom->need_flush = false; |
954 | } else if (unlikely(iommu_has_npcache(iommu))) | 954 | } else if (unlikely(iommu_has_npcache(iommu))) |
@@ -985,7 +985,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
985 | 985 | ||
986 | dma_ops_free_addresses(dma_dom, dma_addr, pages); | 986 | dma_ops_free_addresses(dma_dom, dma_addr, pages); |
987 | 987 | ||
988 | if (iommu_fullflush) | 988 | if (amd_iommu_unmap_flush) |
989 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); | 989 | iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); |
990 | } | 990 | } |
991 | 991 | ||
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index db0c83af44d..148fcfe22f1 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -122,6 +122,7 @@ LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings | |||
122 | we find in ACPI */ | 122 | we find in ACPI */ |
123 | unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ | 123 | unsigned amd_iommu_aperture_order = 26; /* size of aperture in power of 2 */ |
124 | int amd_iommu_isolate; /* if 1, device isolation is enabled */ | 124 | int amd_iommu_isolate; /* if 1, device isolation is enabled */ |
125 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ | ||
125 | 126 | ||
126 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the | 127 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
127 | system */ | 128 | system */ |
@@ -1144,7 +1145,7 @@ int __init amd_iommu_init(void) | |||
1144 | else | 1145 | else |
1145 | printk("disabled\n"); | 1146 | printk("disabled\n"); |
1146 | 1147 | ||
1147 | if (iommu_fullflush) | 1148 | if (amd_iommu_unmap_flush) |
1148 | printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); | 1149 | printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); |
1149 | else | 1150 | else |
1150 | printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); | 1151 | printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); |
@@ -1214,6 +1215,8 @@ static int __init parse_amd_iommu_options(char *str) | |||
1214 | for (; *str; ++str) { | 1215 | for (; *str; ++str) { |
1215 | if (strncmp(str, "isolate", 7) == 0) | 1216 | if (strncmp(str, "isolate", 7) == 0) |
1216 | amd_iommu_isolate = 1; | 1217 | amd_iommu_isolate = 1; |
1218 | if (strncmp(str, "fullflush", 11) == 0) | ||
1219 | amd_iommu_unmap_flush = true; | ||
1217 | } | 1220 | } |
1218 | 1221 | ||
1219 | return 1; | 1222 | return 1; |
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index d2f2c0158dc..0a1408abcc6 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c | |||
@@ -16,15 +16,6 @@ EXPORT_SYMBOL(dma_ops); | |||
16 | 16 | ||
17 | static int iommu_sac_force __read_mostly; | 17 | static int iommu_sac_force __read_mostly; |
18 | 18 | ||
19 | /* | ||
20 | * If this is disabled the IOMMU will use an optimized flushing strategy | ||
21 | * of only flushing when an mapping is reused. With it true the GART is | ||
22 | * flushed for every mapping. Problem is that doing the lazy flush seems | ||
23 | * to trigger bugs with some popular PCI cards, in particular 3ware (but | ||
24 | * has been also also seen with Qlogic at least). | ||
25 | */ | ||
26 | int iommu_fullflush; | ||
27 | |||
28 | #ifdef CONFIG_IOMMU_DEBUG | 19 | #ifdef CONFIG_IOMMU_DEBUG |
29 | int panic_on_overflow __read_mostly = 1; | 20 | int panic_on_overflow __read_mostly = 1; |
30 | int force_iommu __read_mostly = 1; | 21 | int force_iommu __read_mostly = 1; |
@@ -180,10 +171,6 @@ static __init int iommu_setup(char *p) | |||
180 | } | 171 | } |
181 | if (!strncmp(p, "nomerge", 7)) | 172 | if (!strncmp(p, "nomerge", 7)) |
182 | iommu_merge = 0; | 173 | iommu_merge = 0; |
183 | if (!strncmp(p, "fullflush", 8)) | ||
184 | iommu_fullflush = 1; | ||
185 | if (!strncmp(p, "nofullflush", 11)) | ||
186 | iommu_fullflush = 0; | ||
187 | if (!strncmp(p, "forcesac", 8)) | 174 | if (!strncmp(p, "forcesac", 8)) |
188 | iommu_sac_force = 1; | 175 | iommu_sac_force = 1; |
189 | if (!strncmp(p, "allowdac", 8)) | 176 | if (!strncmp(p, "allowdac", 8)) |
diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c index 3dcb1ad86e3..9e390f1bd46 100644 --- a/arch/x86/kernel/pci-gart_64.c +++ b/arch/x86/kernel/pci-gart_64.c | |||
@@ -45,6 +45,15 @@ static unsigned long iommu_pages; /* .. and in pages */ | |||
45 | 45 | ||
46 | static u32 *iommu_gatt_base; /* Remapping table */ | 46 | static u32 *iommu_gatt_base; /* Remapping table */ |
47 | 47 | ||
48 | /* | ||
49 | * If this is disabled the IOMMU will use an optimized flushing strategy | ||
50 | * of only flushing when an mapping is reused. With it true the GART is | ||
51 | * flushed for every mapping. Problem is that doing the lazy flush seems | ||
52 | * to trigger bugs with some popular PCI cards, in particular 3ware (but | ||
53 | * has been also also seen with Qlogic at least). | ||
54 | */ | ||
55 | int iommu_fullflush = 1; | ||
56 | |||
48 | /* Allocation bitmap for the remapping area: */ | 57 | /* Allocation bitmap for the remapping area: */ |
49 | static DEFINE_SPINLOCK(iommu_bitmap_lock); | 58 | static DEFINE_SPINLOCK(iommu_bitmap_lock); |
50 | /* Guarded by iommu_bitmap_lock: */ | 59 | /* Guarded by iommu_bitmap_lock: */ |
@@ -892,6 +901,10 @@ void __init gart_parse_options(char *p) | |||
892 | #endif | 901 | #endif |
893 | if (isdigit(*p) && get_option(&p, &arg)) | 902 | if (isdigit(*p) && get_option(&p, &arg)) |
894 | iommu_size = arg; | 903 | iommu_size = arg; |
904 | if (!strncmp(p, "fullflush", 8)) | ||
905 | iommu_fullflush = 1; | ||
906 | if (!strncmp(p, "nofullflush", 11)) | ||
907 | iommu_fullflush = 0; | ||
895 | if (!strncmp(p, "noagp", 5)) | 908 | if (!strncmp(p, "noagp", 5)) |
896 | no_agp = 1; | 909 | no_agp = 1; |
897 | if (!strncmp(p, "noaperture", 10)) | 910 | if (!strncmp(p, "noaperture", 10)) |