aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu_init.c')
-rw-r--r--arch/x86/kernel/amd_iommu_init.c56
1 files changed, 40 insertions, 16 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9dc91b431470..3bacb4d0844c 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -19,8 +19,8 @@
19 19
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/acpi.h> 21#include <linux/acpi.h>
22#include <linux/gfp.h>
23#include <linux/list.h> 22#include <linux/list.h>
23#include <linux/slab.h>
24#include <linux/sysdev.h> 24#include <linux/sysdev.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/msi.h> 26#include <linux/msi.h>
@@ -120,6 +120,7 @@ struct ivmd_header {
120bool amd_iommu_dump; 120bool amd_iommu_dump;
121 121
122static int __initdata amd_iommu_detected; 122static int __initdata amd_iommu_detected;
123static bool __initdata amd_iommu_disabled;
123 124
124u16 amd_iommu_last_bdf; /* largest PCI device id we have 125u16 amd_iommu_last_bdf; /* largest PCI device id we have
125 to handle */ 126 to handle */
@@ -138,9 +139,9 @@ int amd_iommus_present;
138bool amd_iommu_np_cache __read_mostly; 139bool amd_iommu_np_cache __read_mostly;
139 140
140/* 141/*
141 * Set to true if ACPI table parsing and hardware intialization went properly 142 * The ACPI table parsing functions set this variable on an error
142 */ 143 */
143static bool amd_iommu_initialized; 144static int __initdata amd_iommu_init_err;
144 145
145/* 146/*
146 * List of protection domains - used during resume 147 * List of protection domains - used during resume
@@ -391,9 +392,11 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
391 */ 392 */
392 for (i = 0; i < table->length; ++i) 393 for (i = 0; i < table->length; ++i)
393 checksum += p[i]; 394 checksum += p[i];
394 if (checksum != 0) 395 if (checksum != 0) {
395 /* ACPI table corrupt */ 396 /* ACPI table corrupt */
396 return -ENODEV; 397 amd_iommu_init_err = -ENODEV;
398 return 0;
399 }
397 400
398 p += IVRS_HEADER_LENGTH; 401 p += IVRS_HEADER_LENGTH;
399 402
@@ -436,7 +439,7 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
436 if (cmd_buf == NULL) 439 if (cmd_buf == NULL)
437 return NULL; 440 return NULL;
438 441
439 iommu->cmd_buf_size = CMD_BUFFER_SIZE; 442 iommu->cmd_buf_size = CMD_BUFFER_SIZE | CMD_BUFFER_UNINITIALIZED;
440 443
441 return cmd_buf; 444 return cmd_buf;
442} 445}
@@ -472,12 +475,13 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu)
472 &entry, sizeof(entry)); 475 &entry, sizeof(entry));
473 476
474 amd_iommu_reset_cmd_buffer(iommu); 477 amd_iommu_reset_cmd_buffer(iommu);
478 iommu->cmd_buf_size &= ~(CMD_BUFFER_UNINITIALIZED);
475} 479}
476 480
477static void __init free_command_buffer(struct amd_iommu *iommu) 481static void __init free_command_buffer(struct amd_iommu *iommu)
478{ 482{
479 free_pages((unsigned long)iommu->cmd_buf, 483 free_pages((unsigned long)iommu->cmd_buf,
480 get_order(iommu->cmd_buf_size)); 484 get_order(iommu->cmd_buf_size & ~(CMD_BUFFER_UNINITIALIZED)));
481} 485}
482 486
483/* allocates the memory where the IOMMU will log its events to */ 487/* allocates the memory where the IOMMU will log its events to */
@@ -920,11 +924,16 @@ static int __init init_iommu_all(struct acpi_table_header *table)
920 h->mmio_phys); 924 h->mmio_phys);
921 925
922 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL); 926 iommu = kzalloc(sizeof(struct amd_iommu), GFP_KERNEL);
923 if (iommu == NULL) 927 if (iommu == NULL) {
924 return -ENOMEM; 928 amd_iommu_init_err = -ENOMEM;
929 return 0;
930 }
931
925 ret = init_iommu_one(iommu, h); 932 ret = init_iommu_one(iommu, h);
926 if (ret) 933 if (ret) {
927 return ret; 934 amd_iommu_init_err = ret;
935 return 0;
936 }
928 break; 937 break;
929 default: 938 default:
930 break; 939 break;
@@ -934,8 +943,6 @@ static int __init init_iommu_all(struct acpi_table_header *table)
934 } 943 }
935 WARN_ON(p != end); 944 WARN_ON(p != end);
936 945
937 amd_iommu_initialized = true;
938
939 return 0; 946 return 0;
940} 947}
941 948
@@ -1211,6 +1218,10 @@ static int __init amd_iommu_init(void)
1211 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0) 1218 if (acpi_table_parse("IVRS", find_last_devid_acpi) != 0)
1212 return -ENODEV; 1219 return -ENODEV;
1213 1220
1221 ret = amd_iommu_init_err;
1222 if (ret)
1223 goto out;
1224
1214 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE); 1225 dev_table_size = tbl_size(DEV_TABLE_ENTRY_SIZE);
1215 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE); 1226 alias_table_size = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
1216 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE); 1227 rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);
@@ -1270,12 +1281,19 @@ static int __init amd_iommu_init(void)
1270 if (acpi_table_parse("IVRS", init_iommu_all) != 0) 1281 if (acpi_table_parse("IVRS", init_iommu_all) != 0)
1271 goto free; 1282 goto free;
1272 1283
1273 if (!amd_iommu_initialized) 1284 if (amd_iommu_init_err) {
1285 ret = amd_iommu_init_err;
1274 goto free; 1286 goto free;
1287 }
1275 1288
1276 if (acpi_table_parse("IVRS", init_memory_definitions) != 0) 1289 if (acpi_table_parse("IVRS", init_memory_definitions) != 0)
1277 goto free; 1290 goto free;
1278 1291
1292 if (amd_iommu_init_err) {
1293 ret = amd_iommu_init_err;
1294 goto free;
1295 }
1296
1279 ret = sysdev_class_register(&amd_iommu_sysdev_class); 1297 ret = sysdev_class_register(&amd_iommu_sysdev_class);
1280 if (ret) 1298 if (ret)
1281 goto free; 1299 goto free;
@@ -1288,6 +1306,8 @@ static int __init amd_iommu_init(void)
1288 if (ret) 1306 if (ret)
1289 goto free; 1307 goto free;
1290 1308
1309 enable_iommus();
1310
1291 if (iommu_pass_through) 1311 if (iommu_pass_through)
1292 ret = amd_iommu_init_passthrough(); 1312 ret = amd_iommu_init_passthrough();
1293 else 1313 else
@@ -1300,8 +1320,6 @@ static int __init amd_iommu_init(void)
1300 1320
1301 amd_iommu_init_notifier(); 1321 amd_iommu_init_notifier();
1302 1322
1303 enable_iommus();
1304
1305 if (iommu_pass_through) 1323 if (iommu_pass_through)
1306 goto out; 1324 goto out;
1307 1325
@@ -1315,6 +1333,7 @@ out:
1315 return ret; 1333 return ret;
1316 1334
1317free: 1335free:
1336 disable_iommus();
1318 1337
1319 amd_iommu_uninit_devices(); 1338 amd_iommu_uninit_devices();
1320 1339
@@ -1354,6 +1373,9 @@ void __init amd_iommu_detect(void)
1354 if (no_iommu || (iommu_detected && !gart_iommu_aperture)) 1373 if (no_iommu || (iommu_detected && !gart_iommu_aperture))
1355 return; 1374 return;
1356 1375
1376 if (amd_iommu_disabled)
1377 return;
1378
1357 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) { 1379 if (acpi_table_parse("IVRS", early_amd_iommu_detect) == 0) {
1358 iommu_detected = 1; 1380 iommu_detected = 1;
1359 amd_iommu_detected = 1; 1381 amd_iommu_detected = 1;
@@ -1383,6 +1405,8 @@ static int __init parse_amd_iommu_options(char *str)
1383 for (; *str; ++str) { 1405 for (; *str; ++str) {
1384 if (strncmp(str, "fullflush", 9) == 0) 1406 if (strncmp(str, "fullflush", 9) == 0)
1385 amd_iommu_unmap_flush = true; 1407 amd_iommu_unmap_flush = true;
1408 if (strncmp(str, "off", 3) == 0)
1409 amd_iommu_disabled = true;
1386 } 1410 }
1387 1411
1388 return 1; 1412 return 1;