aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-05-04 12:41:16 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-05-28 12:08:58 -0400
commit58492e128892e3b55f1a6ef0cf3c3ab4ce7cc214 (patch)
treef477f7c2f7290deb1325e3a91e549617ad05d08b /arch
parent3bd221724adb9d642270df0e78b0105fb61e4a1c (diff)
amd-iommu: consolidate hardware initialization to one function
This patch restructures the AMD IOMMU initialization code to initialize all hardware registers with one single function call. This is helpful for suspend/resume support. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/amd_iommu_init.c50
1 files changed, 32 insertions, 18 deletions
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 675a4b642f70..74f4f1fea930 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -252,13 +252,6 @@ static void __init iommu_enable(struct amd_iommu *iommu)
252 iommu_feature_enable(iommu, CONTROL_IOMMU_EN); 252 iommu_feature_enable(iommu, CONTROL_IOMMU_EN);
253} 253}
254 254
255/* Function to enable IOMMU event logging and event interrupts */
256static void __init iommu_enable_event_logging(struct amd_iommu *iommu)
257{
258 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
259 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
260}
261
262/* 255/*
263 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in 256 * mapping and unmapping functions for the IOMMU MMIO space. Each AMD IOMMU in
264 * the system has one. 257 * the system has one.
@@ -413,25 +406,36 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu)
413{ 406{
414 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 407 u8 *cmd_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
415 get_order(CMD_BUFFER_SIZE)); 408 get_order(CMD_BUFFER_SIZE));
416 u64 entry;
417 409
418 if (cmd_buf == NULL) 410 if (cmd_buf == NULL)
419 return NULL; 411 return NULL;
420 412
421 iommu->cmd_buf_size = CMD_BUFFER_SIZE; 413 iommu->cmd_buf_size = CMD_BUFFER_SIZE;
422 414
423 entry = (u64)virt_to_phys(cmd_buf); 415 return cmd_buf;
416}
417
418/*
419 * This function writes the command buffer address to the hardware and
420 * enables it.
421 */
422static void iommu_enable_command_buffer(struct amd_iommu *iommu)
423{
424 u64 entry;
425
426 BUG_ON(iommu->cmd_buf == NULL);
427
428 entry = (u64)virt_to_phys(iommu->cmd_buf);
424 entry |= MMIO_CMD_SIZE_512; 429 entry |= MMIO_CMD_SIZE_512;
430
425 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, 431 memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET,
426 &entry, sizeof(entry)); 432 &entry, sizeof(entry));
427 433
428 /* set head and tail to zero manually */ 434 /* set head and tail to zero manually */
429 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); 435 writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET);
430 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 436 writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
431 437
432 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); 438 iommu_feature_enable(iommu, CONTROL_CMDBUF_EN);
433
434 return cmd_buf;
435} 439}
436 440
437static void __init free_command_buffer(struct amd_iommu *iommu) 441static void __init free_command_buffer(struct amd_iommu *iommu)
@@ -443,20 +447,27 @@ static void __init free_command_buffer(struct amd_iommu *iommu)
443/* allocates the memory where the IOMMU will log its events to */ 447/* allocates the memory where the IOMMU will log its events to */
444static u8 * __init alloc_event_buffer(struct amd_iommu *iommu) 448static u8 * __init alloc_event_buffer(struct amd_iommu *iommu)
445{ 449{
446 u64 entry;
447 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 450 iommu->evt_buf = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
448 get_order(EVT_BUFFER_SIZE)); 451 get_order(EVT_BUFFER_SIZE));
449 452
450 if (iommu->evt_buf == NULL) 453 if (iommu->evt_buf == NULL)
451 return NULL; 454 return NULL;
452 455
456 return iommu->evt_buf;
457}
458
459static void iommu_enable_event_buffer(struct amd_iommu *iommu)
460{
461 u64 entry;
462
463 BUG_ON(iommu->evt_buf == NULL);
464
453 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK; 465 entry = (u64)virt_to_phys(iommu->evt_buf) | EVT_LEN_MASK;
466
454 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET, 467 memcpy_toio(iommu->mmio_base + MMIO_EVT_BUF_OFFSET,
455 &entry, sizeof(entry)); 468 &entry, sizeof(entry));
456 469
457 iommu->evt_buf_size = EVT_BUFFER_SIZE; 470 iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
458
459 return iommu->evt_buf;
460} 471}
461 472
462static void __init free_event_buffer(struct amd_iommu *iommu) 473static void __init free_event_buffer(struct amd_iommu *iommu)
@@ -710,7 +721,6 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
710 if (!iommu->mmio_base) 721 if (!iommu->mmio_base)
711 return -ENOMEM; 722 return -ENOMEM;
712 723
713 iommu_set_device_table(iommu);
714 iommu->cmd_buf = alloc_command_buffer(iommu); 724 iommu->cmd_buf = alloc_command_buffer(iommu);
715 if (!iommu->cmd_buf) 725 if (!iommu->cmd_buf)
716 return -ENOMEM; 726 return -ENOMEM;
@@ -837,6 +847,8 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu)
837 return 1; 847 return 1;
838 } 848 }
839 849
850 iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
851
840 return 0; 852 return 0;
841} 853}
842 854
@@ -972,9 +984,11 @@ static void __init enable_iommus(void)
972 struct amd_iommu *iommu; 984 struct amd_iommu *iommu;
973 985
974 for_each_iommu(iommu) { 986 for_each_iommu(iommu) {
987 iommu_set_device_table(iommu);
988 iommu_enable_command_buffer(iommu);
989 iommu_enable_event_buffer(iommu);
975 iommu_set_exclusion_range(iommu); 990 iommu_set_exclusion_range(iommu);
976 iommu_init_msi(iommu); 991 iommu_init_msi(iommu);
977 iommu_enable_event_logging(iommu);
978 iommu_enable(iommu); 992 iommu_enable(iommu);
979 } 993 }
980} 994}