diff options
Diffstat (limited to 'arch/x86/kernel/amd_iommu_init.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu_init.c | 119 |
1 files changed, 61 insertions, 58 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c index e60530a5f524..9c4a6f747552 100644 --- a/arch/x86/kernel/amd_iommu_init.c +++ b/arch/x86/kernel/amd_iommu_init.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | 2 | * Copyright (C) 2007-2009 Advanced Micro Devices, Inc. |
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | 3 | * Author: Joerg Roedel <joerg.roedel@amd.com> |
4 | * Leo Duran <leo.duran@amd.com> | 4 | * Leo Duran <leo.duran@amd.com> |
5 | * | 5 | * |
@@ -25,10 +25,12 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <asm/pci-direct.h> | 27 | #include <asm/pci-direct.h> |
28 | #include <asm/amd_iommu_proto.h> | ||
28 | #include <asm/amd_iommu_types.h> | 29 | #include <asm/amd_iommu_types.h> |
29 | #include <asm/amd_iommu.h> | 30 | #include <asm/amd_iommu.h> |
30 | #include <asm/iommu.h> | 31 | #include <asm/iommu.h> |
31 | #include <asm/gart.h> | 32 | #include <asm/gart.h> |
33 | #include <asm/x86_init.h> | ||
32 | 34 | ||
33 | /* | 35 | /* |
34 | * definitions for the ACPI scanning code | 36 | * definitions for the ACPI scanning code |
@@ -123,18 +125,24 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have | |||
123 | to handle */ | 125 | to handle */ |
124 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings | 126 | LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings |
125 | we find in ACPI */ | 127 | we find in ACPI */ |
126 | #ifdef CONFIG_IOMMU_STRESS | ||
127 | bool amd_iommu_isolate = false; | ||
128 | #else | ||
129 | bool amd_iommu_isolate = true; /* if true, device isolation is | ||
130 | enabled */ | ||
131 | #endif | ||
132 | |||
133 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ | 128 | bool amd_iommu_unmap_flush; /* if true, flush on every unmap */ |
134 | 129 | ||
135 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the | 130 | LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the |
136 | system */ | 131 | system */ |
137 | 132 | ||
133 | /* Array to assign indices to IOMMUs*/ | ||
134 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; | ||
135 | int amd_iommus_present; | ||
136 | |||
137 | /* IOMMUs have a non-present cache? */ | ||
138 | bool amd_iommu_np_cache __read_mostly; | ||
139 | |||
140 | /* | ||
141 | * List of protection domains - used during resume | ||
142 | */ | ||
143 | LIST_HEAD(amd_iommu_pd_list); | ||
144 | spinlock_t amd_iommu_pd_lock; | ||
145 | |||
138 | /* | 146 | /* |
139 | * Pointer to the device table which is shared by all AMD IOMMUs | 147 | * Pointer to the device table which is shared by all AMD IOMMUs |
140 | * it is indexed by the PCI device id or the HT unit id and contains | 148 | * it is indexed by the PCI device id or the HT unit id and contains |
@@ -157,12 +165,6 @@ u16 *amd_iommu_alias_table; | |||
157 | struct amd_iommu **amd_iommu_rlookup_table; | 165 | struct amd_iommu **amd_iommu_rlookup_table; |
158 | 166 | ||
159 | /* | 167 | /* |
160 | * The pd table (protection domain table) is used to find the protection domain | ||
161 | * data structure a device belongs to. Indexed with the PCI device id too. | ||
162 | */ | ||
163 | struct protection_domain **amd_iommu_pd_table; | ||
164 | |||
165 | /* | ||
166 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap | 168 | * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap |
167 | * to know which ones are already in use. | 169 | * to know which ones are already in use. |
168 | */ | 170 | */ |
@@ -240,7 +242,7 @@ static void iommu_feature_enable(struct amd_iommu *iommu, u8 bit) | |||
240 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | 242 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
241 | } | 243 | } |
242 | 244 | ||
243 | static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | 245 | static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) |
244 | { | 246 | { |
245 | u32 ctrl; | 247 | u32 ctrl; |
246 | 248 | ||
@@ -519,6 +521,26 @@ static void set_dev_entry_bit(u16 devid, u8 bit) | |||
519 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); | 521 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); |
520 | } | 522 | } |
521 | 523 | ||
524 | static int get_dev_entry_bit(u16 devid, u8 bit) | ||
525 | { | ||
526 | int i = (bit >> 5) & 0x07; | ||
527 | int _bit = bit & 0x1f; | ||
528 | |||
529 | return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; | ||
530 | } | ||
531 | |||
532 | |||
533 | void amd_iommu_apply_erratum_63(u16 devid) | ||
534 | { | ||
535 | int sysmgt; | ||
536 | |||
537 | sysmgt = get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT1) | | ||
538 | (get_dev_entry_bit(devid, DEV_ENTRY_SYSMGT2) << 1); | ||
539 | |||
540 | if (sysmgt == 0x01) | ||
541 | set_dev_entry_bit(devid, DEV_ENTRY_IW); | ||
542 | } | ||
543 | |||
522 | /* Writes the specific IOMMU for a device into the rlookup table */ | 544 | /* Writes the specific IOMMU for a device into the rlookup table */ |
523 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) | 545 | static void __init set_iommu_for_device(struct amd_iommu *iommu, u16 devid) |
524 | { | 546 | { |
@@ -547,6 +569,8 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, | |||
547 | if (flags & ACPI_DEVFLAG_LINT1) | 569 | if (flags & ACPI_DEVFLAG_LINT1) |
548 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); | 570 | set_dev_entry_bit(devid, DEV_ENTRY_LINT1_PASS); |
549 | 571 | ||
572 | amd_iommu_apply_erratum_63(devid); | ||
573 | |||
550 | set_iommu_for_device(iommu, devid); | 574 | set_iommu_for_device(iommu, devid); |
551 | } | 575 | } |
552 | 576 | ||
@@ -816,7 +840,18 @@ static void __init free_iommu_all(void) | |||
816 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | 840 | static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
817 | { | 841 | { |
818 | spin_lock_init(&iommu->lock); | 842 | spin_lock_init(&iommu->lock); |
843 | |||
844 | /* Add IOMMU to internal data structures */ | ||
819 | list_add_tail(&iommu->list, &amd_iommu_list); | 845 | list_add_tail(&iommu->list, &amd_iommu_list); |
846 | iommu->index = amd_iommus_present++; | ||
847 | |||
848 | if (unlikely(iommu->index >= MAX_IOMMUS)) { | ||
849 | WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); | ||
850 | return -ENOSYS; | ||
851 | } | ||
852 | |||
853 | /* Index is fine - add IOMMU to the array */ | ||
854 | amd_iommus[iommu->index] = iommu; | ||
820 | 855 | ||
821 | /* | 856 | /* |
822 | * Copy data from ACPI table entry to the iommu struct | 857 | * Copy data from ACPI table entry to the iommu struct |
@@ -846,6 +881,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
846 | init_iommu_from_acpi(iommu, h); | 881 | init_iommu_from_acpi(iommu, h); |
847 | init_iommu_devices(iommu); | 882 | init_iommu_devices(iommu); |
848 | 883 | ||
884 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) | ||
885 | amd_iommu_np_cache = true; | ||
886 | |||
849 | return pci_enable_device(iommu->dev); | 887 | return pci_enable_device(iommu->dev); |
850 | } | 888 | } |
851 | 889 | ||
@@ -903,7 +941,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
903 | * | 941 | * |
904 | ****************************************************************************/ | 942 | ****************************************************************************/ |
905 | 943 | ||
906 | static int __init iommu_setup_msi(struct amd_iommu *iommu) | 944 | static int iommu_setup_msi(struct amd_iommu *iommu) |
907 | { | 945 | { |
908 | int r; | 946 | int r; |
909 | 947 | ||
@@ -1154,19 +1192,10 @@ static struct sys_device device_amd_iommu = { | |||
1154 | * functions. Finally it prints some information about AMD IOMMUs and | 1192 | * functions. Finally it prints some information about AMD IOMMUs and |
1155 | * the driver state and enables the hardware. | 1193 | * the driver state and enables the hardware. |
1156 | */ | 1194 | */ |
1157 | int __init amd_iommu_init(void) | 1195 | static int __init amd_iommu_init(void) |
1158 | { | 1196 | { |
1159 | int i, ret = 0; | 1197 | int i, ret = 0; |
1160 | 1198 | ||
1161 | |||
1162 | if (no_iommu) { | ||
1163 | printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); | ||
1164 | return 0; | ||
1165 | } | ||
1166 | |||
1167 | if (!amd_iommu_detected) | ||
1168 | return -ENODEV; | ||
1169 | |||
1170 | /* | 1199 | /* |
1171 | * First parse ACPI tables to find the largest Bus/Dev/Func | 1200 | * First parse ACPI tables to find the largest Bus/Dev/Func |
1172 | * we need to handle. Upon this information the shared data | 1201 | * we need to handle. Upon this information the shared data |
@@ -1203,15 +1232,6 @@ int __init amd_iommu_init(void) | |||
1203 | if (amd_iommu_rlookup_table == NULL) | 1232 | if (amd_iommu_rlookup_table == NULL) |
1204 | goto free; | 1233 | goto free; |
1205 | 1234 | ||
1206 | /* | ||
1207 | * Protection Domain table - maps devices to protection domains | ||
1208 | * This table has the same size as the rlookup_table | ||
1209 | */ | ||
1210 | amd_iommu_pd_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
1211 | get_order(rlookup_table_size)); | ||
1212 | if (amd_iommu_pd_table == NULL) | ||
1213 | goto free; | ||
1214 | |||
1215 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( | 1235 | amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages( |
1216 | GFP_KERNEL | __GFP_ZERO, | 1236 | GFP_KERNEL | __GFP_ZERO, |
1217 | get_order(MAX_DOMAIN_ID/8)); | 1237 | get_order(MAX_DOMAIN_ID/8)); |
@@ -1233,6 +1253,8 @@ int __init amd_iommu_init(void) | |||
1233 | */ | 1253 | */ |
1234 | amd_iommu_pd_alloc_bitmap[0] = 1; | 1254 | amd_iommu_pd_alloc_bitmap[0] = 1; |
1235 | 1255 | ||
1256 | spin_lock_init(&amd_iommu_pd_lock); | ||
1257 | |||
1236 | /* | 1258 | /* |
1237 | * now the data structures are allocated and basically initialized | 1259 | * now the data structures are allocated and basically initialized |
1238 | * start the real acpi table scan | 1260 | * start the real acpi table scan |
@@ -1264,17 +1286,12 @@ int __init amd_iommu_init(void) | |||
1264 | if (iommu_pass_through) | 1286 | if (iommu_pass_through) |
1265 | goto out; | 1287 | goto out; |
1266 | 1288 | ||
1267 | printk(KERN_INFO "AMD-Vi: device isolation "); | ||
1268 | if (amd_iommu_isolate) | ||
1269 | printk("enabled\n"); | ||
1270 | else | ||
1271 | printk("disabled\n"); | ||
1272 | |||
1273 | if (amd_iommu_unmap_flush) | 1289 | if (amd_iommu_unmap_flush) |
1274 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); | 1290 | printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); |
1275 | else | 1291 | else |
1276 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); | 1292 | printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); |
1277 | 1293 | ||
1294 | x86_platform.iommu_shutdown = disable_iommus; | ||
1278 | out: | 1295 | out: |
1279 | return ret; | 1296 | return ret; |
1280 | 1297 | ||
@@ -1282,9 +1299,6 @@ free: | |||
1282 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, | 1299 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
1283 | get_order(MAX_DOMAIN_ID/8)); | 1300 | get_order(MAX_DOMAIN_ID/8)); |
1284 | 1301 | ||
1285 | free_pages((unsigned long)amd_iommu_pd_table, | ||
1286 | get_order(rlookup_table_size)); | ||
1287 | |||
1288 | free_pages((unsigned long)amd_iommu_rlookup_table, | 1302 | free_pages((unsigned long)amd_iommu_rlookup_table, |
1289 | get_order(rlookup_table_size)); | 1303 | get_order(rlookup_table_size)); |
1290 | 1304 | ||
@@ -1301,11 +1315,6 @@ free: | |||
1301 | goto out; | 1315 | goto out; |
1302 | } | 1316 | } |
1303 | 1317 | ||
1304 | void amd_iommu_shutdown(void) | ||
1305 | { | ||
1306 | disable_iommus(); | ||
1307 | } | ||
1308 | |||
1309 | /**************************************************************************** | 1318 | /**************************************************************************** |
1310 | * | 1319 | * |
1311 | * Early detect code. This code runs at IOMMU detection time in the DMA | 1320 | * Early detect code. This code runs at IOMMU detection time in the DMA |
@@ -1320,16 +1329,14 @@ static int __init early_amd_iommu_detect(struct acpi_table_header *table) | |||
1320 | 1329 | ||
1321 | void __init amd_iommu_detect(void) | 1330 | void __init amd_iommu_detect(void) |
1322 | { | 1331 | { |
1323 | if (swiotlb || no_iommu || (iommu_detected && !gart_iommu_aperture)) | 1332 | if (no_iommu || (iommu_detected && !gart_iommu_aperture)) |
1324 | return; | 1333 | return; |
1325 | 1334 | ||
1326 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { | 1335 | if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { |
1327 | iommu_detected = 1; | 1336 | iommu_detected = 1; |
1328 | amd_iommu_detected = 1; | 1337 | amd_iommu_detected = 1; |
1329 | #ifdef CONFIG_GART_IOMMU | 1338 | x86_init.iommu.iommu_init = amd_iommu_init; |
1330 | gart_iommu_aperture_disabled = 1; | 1339 | |
1331 | gart_iommu_aperture = 0; | ||
1332 | #endif | ||
1333 | /* Make sure ACS will be enabled */ | 1340 | /* Make sure ACS will be enabled */ |
1334 | pci_request_acs(); | 1341 | pci_request_acs(); |
1335 | } | 1342 | } |
@@ -1352,10 +1359,6 @@ static int __init parse_amd_iommu_dump(char *str) | |||
1352 | static int __init parse_amd_iommu_options(char *str) | 1359 | static int __init parse_amd_iommu_options(char *str) |
1353 | { | 1360 | { |
1354 | for (; *str; ++str) { | 1361 | for (; *str; ++str) { |
1355 | if (strncmp(str, "isolate", 7) == 0) | ||
1356 | amd_iommu_isolate = true; | ||
1357 | if (strncmp(str, "share", 5) == 0) | ||
1358 | amd_iommu_isolate = false; | ||
1359 | if (strncmp(str, "fullflush", 9) == 0) | 1362 | if (strncmp(str, "fullflush", 9) == 0) |
1360 | amd_iommu_unmap_flush = true; | 1363 | amd_iommu_unmap_flush = true; |
1361 | } | 1364 | } |