diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-27 05:40:33 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-27 08:20:32 -0500 |
commit | 8793abeb783c12cc37f92f6133fd6468152b98df (patch) | |
tree | bdeacd3c9b2870c00ff532b50969f39010179f5b | |
parent | 171e7b3739e175eea7b32eca9dbe189589e14a28 (diff) |
x86/amd-iommu: Remove support for domain sharing
This patch makes device isolation mandatory and removes
support for the amd_iommu=share option. This simplifies the
code in several places.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r-- | arch/x86/include/asm/amd_iommu_types.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 17 |
3 files changed, 2 insertions, 28 deletions
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h index 4899f783df68..02b6a0fd863c 100644 --- a/arch/x86/include/asm/amd_iommu_types.h +++ b/arch/x86/include/asm/amd_iommu_types.h | |||
@@ -451,9 +451,6 @@ extern struct protection_domain **amd_iommu_pd_table; | |||
451 | /* allocation bitmap for domain ids */ | 451 | /* allocation bitmap for domain ids */ |
452 | extern unsigned long *amd_iommu_pd_alloc_bitmap; | 452 | extern unsigned long *amd_iommu_pd_alloc_bitmap; |
453 | 453 | ||
454 | /* will be 1 if device isolation is enabled */ | ||
455 | extern bool amd_iommu_isolate; | ||
456 | |||
457 | /* | 454 | /* |
458 | * If true, the addresses will be flushed on unmap time, not when | 455 | * If true, the addresses will be flushed on unmap time, not when |
459 | * they are reused | 456 | * they are reused |
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 14b60c0cdc70..ed58a1688391 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -148,7 +148,6 @@ DECLARE_STATS_COUNTER(alloced_io_mem); | |||
148 | DECLARE_STATS_COUNTER(total_map_requests); | 148 | DECLARE_STATS_COUNTER(total_map_requests); |
149 | 149 | ||
150 | static struct dentry *stats_dir; | 150 | static struct dentry *stats_dir; |
151 | static struct dentry *de_isolate; | ||
152 | static struct dentry *de_fflush; | 151 | static struct dentry *de_fflush; |
153 | 152 | ||
154 | static void amd_iommu_stats_add(struct __iommu_counter *cnt) | 153 | static void amd_iommu_stats_add(struct __iommu_counter *cnt) |
@@ -166,9 +165,6 @@ static void amd_iommu_stats_init(void) | |||
166 | if (stats_dir == NULL) | 165 | if (stats_dir == NULL) |
167 | return; | 166 | return; |
168 | 167 | ||
169 | de_isolate = debugfs_create_bool("isolation", 0444, stats_dir, | ||
170 | (u32 *)&amd_iommu_isolate); | ||
171 | |||
172 | de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, | 168 | de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir, |
173 | (u32 *)&amd_iommu_unmap_flush); | 169 | (u32 *)&amd_iommu_unmap_flush); |
174 | 170 | ||
@@ -2135,11 +2131,9 @@ int __init amd_iommu_init_dma_ops(void) | |||
2135 | } | 2131 | } |
2136 | 2132 | ||
2137 | /* | 2133 | /* |
2138 | * If device isolation is enabled, pre-allocate the protection | 2134 | * Pre-allocate the protection domains for each device. |
2139 | * domains for each device. | ||
2140 | */ | 2135 | */ |
2141 | if (amd_iommu_isolate) | 2136 | prealloc_protection_domains(); |
2142 | prealloc_protection_domains(); | ||
2143 | 2137 | ||
2144 | iommu_detected = 1; | 2138 | iommu_detected = 1; |
2145 | swiotlb = 0; | 2139 | swiotlb = 0; |
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index fbe4c3c02a91..fe1686f6f91b 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -125,13 +125,6 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have | |||
125 | to handle */ | 125 | to handle */ |
126 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings | 126 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
127 | we find in ACPI */ | 127 | we find in ACPI */ |
128 | #ifdef CONFIG_IOMMU_STRESS | ||
129 | bool amd_iommu_isolate = false; | ||
130 | #else | ||
131 | bool amd_iommu_isolate = true; /* if true, device isolation is | ||
132 | enabled */ | ||
133 | #endif | ||
134 | |||
135 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ | 128 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ |
136 | 129 | ||
137 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the | 130 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
@@ -1308,12 +1301,6 @@ static int __init amd_iommu_init(void) | |||
1308 | if (iommu_pass_through) | 1301 | if (iommu_pass_through) |
1309 | goto out; | 1302 | goto out; |
1310 | 1303 | ||
1311 | printk(KERN_INFO "AMD-Vi: device isolation "); | ||
1312 | if (amd_iommu_isolate) | ||
1313 | printk("enabled\n"); | ||
1314 | else | ||
1315 | printk("disabled\n"); | ||
1316 | |||
1317 | if (amd_iommu_unmap_flush) | 1304 | if (amd_iommu_unmap_flush) |
1318 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); | 1305 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1319 | else | 1306 | else |
@@ -1387,10 +1374,6 @@ static int __init parse_amd_iommu_dump(char *str) | |||
1387 | static int __init parse_amd_iommu_options(char *str) | 1374 | static int __init parse_amd_iommu_options(char *str) |
1388 | { | 1375 | { |
1389 | for (; *str; ++str) { | 1376 | for (; *str; ++str) { |
1390 | if (strncmp(str, "isolate", 7) == 0) | ||
1391 | amd_iommu_isolate = true; | ||
1392 | if (strncmp(str, "share", 5) == 0) | ||
1393 | amd_iommu_isolate = false; | ||
1394 | if (strncmp(str, "fullflush", 9) == 0) | 1377 | if (strncmp(str, "fullflush", 9) == 0) |
1395 | amd_iommu_unmap_flush = true; | 1378 | amd_iommu_unmap_flush = true; |
1396 | } | 1379 | } |