diff options
Diffstat (limited to 'drivers/iommu/amd_iommu_init.c')
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 133 |
1 files changed, 120 insertions, 13 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 82d2410f4205..bdea288dc185 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | #include <linux/amd-iommu.h> | 27 | #include <linux/amd-iommu.h> |
28 | #include <linux/export.h> | ||
28 | #include <asm/pci-direct.h> | 29 | #include <asm/pci-direct.h> |
29 | #include <asm/iommu.h> | 30 | #include <asm/iommu.h> |
30 | #include <asm/gart.h> | 31 | #include <asm/gart.h> |
@@ -141,6 +142,12 @@ int amd_iommus_present; | |||
141 | bool amd_iommu_np_cache __read_mostly; | 142 | bool amd_iommu_np_cache __read_mostly; |
142 | bool amd_iommu_iotlb_sup __read_mostly = true; | 143 | bool amd_iommu_iotlb_sup __read_mostly = true; |
143 | 144 | ||
145 | u32 amd_iommu_max_pasids __read_mostly = ~0; | ||
146 | |||
147 | bool amd_iommu_v2_present __read_mostly; | ||
148 | |||
149 | bool amd_iommu_force_isolation __read_mostly; | ||
150 | |||
144 | /* | 151 | /* |
145 | * The ACPI table parsing functions set this variable on an error | 152 | * The ACPI table parsing functions set this variable on an error |
146 | */ | 153 | */ |
@@ -299,6 +306,16 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) | |||
299 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | 306 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); |
300 | } | 307 | } |
301 | 308 | ||
309 | static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout) | ||
310 | { | ||
311 | u32 ctrl; | ||
312 | |||
313 | ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET); | ||
314 | ctrl &= ~CTRL_INV_TO_MASK; | ||
315 | ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK; | ||
316 | writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET); | ||
317 | } | ||
318 | |||
302 | /* Function to enable the hardware */ | 319 | /* Function to enable the hardware */ |
303 | static void iommu_enable(struct amd_iommu *iommu) | 320 | static void iommu_enable(struct amd_iommu *iommu) |
304 | { | 321 | { |
@@ -581,21 +598,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu) | |||
581 | free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); | 598 | free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE)); |
582 | } | 599 | } |
583 | 600 | ||
601 | /* allocates the memory where the IOMMU will log its events to */ | ||
602 | static u8 * __init alloc_ppr_log(struct amd_iommu *iommu) | ||
603 | { | ||
604 | iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
605 | get_order(PPR_LOG_SIZE)); | ||
606 | |||
607 | if (iommu->ppr_log == NULL) | ||
608 | return NULL; | ||
609 | |||
610 | return iommu->ppr_log; | ||
611 | } | ||
612 | |||
613 | static void iommu_enable_ppr_log(struct amd_iommu *iommu) | ||
614 | { | ||
615 | u64 entry; | ||
616 | |||
617 | if (iommu->ppr_log == NULL) | ||
618 | return; | ||
619 | |||
620 | entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512; | ||
621 | |||
622 | memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET, | ||
623 | &entry, sizeof(entry)); | ||
624 | |||
625 | /* set head and tail to zero manually */ | ||
626 | writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); | ||
627 | writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); | ||
628 | |||
629 | iommu_feature_enable(iommu, CONTROL_PPFLOG_EN); | ||
630 | iommu_feature_enable(iommu, CONTROL_PPR_EN); | ||
631 | } | ||
632 | |||
633 | static void __init free_ppr_log(struct amd_iommu *iommu) | ||
634 | { | ||
635 | if (iommu->ppr_log == NULL) | ||
636 | return; | ||
637 | |||
638 | free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); | ||
639 | } | ||
640 | |||
641 | static void iommu_enable_gt(struct amd_iommu *iommu) | ||
642 | { | ||
643 | if (!iommu_feature(iommu, FEATURE_GT)) | ||
644 | return; | ||
645 | |||
646 | iommu_feature_enable(iommu, CONTROL_GT_EN); | ||
647 | } | ||
648 | |||
584 | /* sets a specific bit in the device table entry. */ | 649 | /* sets a specific bit in the device table entry. */ |
585 | static void set_dev_entry_bit(u16 devid, u8 bit) | 650 | static void set_dev_entry_bit(u16 devid, u8 bit) |
586 | { | 651 | { |
587 | int i = (bit >> 5) & 0x07; | 652 | int i = (bit >> 6) & 0x03; |
588 | int _bit = bit & 0x1f; | 653 | int _bit = bit & 0x3f; |
589 | 654 | ||
590 | amd_iommu_dev_table[devid].data[i] |= (1 << _bit); | 655 | amd_iommu_dev_table[devid].data[i] |= (1UL << _bit); |
591 | } | 656 | } |
592 | 657 | ||
593 | static int get_dev_entry_bit(u16 devid, u8 bit) | 658 | static int get_dev_entry_bit(u16 devid, u8 bit) |
594 | { | 659 | { |
595 | int i = (bit >> 5) & 0x07; | 660 | int i = (bit >> 6) & 0x03; |
596 | int _bit = bit & 0x1f; | 661 | int _bit = bit & 0x3f; |
597 | 662 | ||
598 | return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit; | 663 | return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit; |
599 | } | 664 | } |
600 | 665 | ||
601 | 666 | ||
@@ -699,6 +764,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) | |||
699 | 764 | ||
700 | iommu->features = ((u64)high << 32) | low; | 765 | iommu->features = ((u64)high << 32) | low; |
701 | 766 | ||
767 | if (iommu_feature(iommu, FEATURE_GT)) { | ||
768 | int glxval; | ||
769 | u32 pasids; | ||
770 | u64 shift; | ||
771 | |||
772 | shift = iommu->features & FEATURE_PASID_MASK; | ||
773 | shift >>= FEATURE_PASID_SHIFT; | ||
774 | pasids = (1 << shift); | ||
775 | |||
776 | amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); | ||
777 | |||
778 | glxval = iommu->features & FEATURE_GLXVAL_MASK; | ||
779 | glxval >>= FEATURE_GLXVAL_SHIFT; | ||
780 | |||
781 | if (amd_iommu_max_glx_val == -1) | ||
782 | amd_iommu_max_glx_val = glxval; | ||
783 | else | ||
784 | amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); | ||
785 | } | ||
786 | |||
787 | if (iommu_feature(iommu, FEATURE_GT) && | ||
788 | iommu_feature(iommu, FEATURE_PPR)) { | ||
789 | iommu->is_iommu_v2 = true; | ||
790 | amd_iommu_v2_present = true; | ||
791 | } | ||
792 | |||
702 | if (!is_rd890_iommu(iommu->dev)) | 793 | if (!is_rd890_iommu(iommu->dev)) |
703 | return; | 794 | return; |
704 | 795 | ||
@@ -901,6 +992,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu) | |||
901 | { | 992 | { |
902 | free_command_buffer(iommu); | 993 | free_command_buffer(iommu); |
903 | free_event_buffer(iommu); | 994 | free_event_buffer(iommu); |
995 | free_ppr_log(iommu); | ||
904 | iommu_unmap_mmio_space(iommu); | 996 | iommu_unmap_mmio_space(iommu); |
905 | } | 997 | } |
906 | 998 | ||
@@ -964,6 +1056,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
964 | init_iommu_from_acpi(iommu, h); | 1056 | init_iommu_from_acpi(iommu, h); |
965 | init_iommu_devices(iommu); | 1057 | init_iommu_devices(iommu); |
966 | 1058 | ||
1059 | if (iommu_feature(iommu, FEATURE_PPR)) { | ||
1060 | iommu->ppr_log = alloc_ppr_log(iommu); | ||
1061 | if (!iommu->ppr_log) | ||
1062 | return -ENOMEM; | ||
1063 | } | ||
1064 | |||
967 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) | 1065 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) |
968 | amd_iommu_np_cache = true; | 1066 | amd_iommu_np_cache = true; |
969 | 1067 | ||
@@ -1050,6 +1148,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu) | |||
1050 | iommu->int_enabled = true; | 1148 | iommu->int_enabled = true; |
1051 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); | 1149 | iommu_feature_enable(iommu, CONTROL_EVT_INT_EN); |
1052 | 1150 | ||
1151 | if (iommu->ppr_log != NULL) | ||
1152 | iommu_feature_enable(iommu, CONTROL_PPFINT_EN); | ||
1153 | |||
1053 | return 0; | 1154 | return 0; |
1054 | } | 1155 | } |
1055 | 1156 | ||
@@ -1209,6 +1310,9 @@ static void iommu_init_flags(struct amd_iommu *iommu) | |||
1209 | * make IOMMU memory accesses cache coherent | 1310 | * make IOMMU memory accesses cache coherent |
1210 | */ | 1311 | */ |
1211 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); | 1312 | iommu_feature_enable(iommu, CONTROL_COHERENT_EN); |
1313 | |||
1314 | /* Set IOTLB invalidation timeout to 1s */ | ||
1315 | iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S); | ||
1212 | } | 1316 | } |
1213 | 1317 | ||
1214 | static void iommu_apply_resume_quirks(struct amd_iommu *iommu) | 1318 | static void iommu_apply_resume_quirks(struct amd_iommu *iommu) |
@@ -1274,6 +1378,8 @@ static void enable_iommus(void) | |||
1274 | iommu_set_device_table(iommu); | 1378 | iommu_set_device_table(iommu); |
1275 | iommu_enable_command_buffer(iommu); | 1379 | iommu_enable_command_buffer(iommu); |
1276 | iommu_enable_event_buffer(iommu); | 1380 | iommu_enable_event_buffer(iommu); |
1381 | iommu_enable_ppr_log(iommu); | ||
1382 | iommu_enable_gt(iommu); | ||
1277 | iommu_set_exclusion_range(iommu); | 1383 | iommu_set_exclusion_range(iommu); |
1278 | iommu_init_msi(iommu); | 1384 | iommu_init_msi(iommu); |
1279 | iommu_enable(iommu); | 1385 | iommu_enable(iommu); |
@@ -1303,13 +1409,6 @@ static void amd_iommu_resume(void) | |||
1303 | 1409 | ||
1304 | /* re-load the hardware */ | 1410 | /* re-load the hardware */ |
1305 | enable_iommus(); | 1411 | enable_iommus(); |
1306 | |||
1307 | /* | ||
1308 | * we have to flush after the IOMMUs are enabled because a | ||
1309 | * disabled IOMMU will never execute the commands we send | ||
1310 | */ | ||
1311 | for_each_iommu(iommu) | ||
1312 | iommu_flush_all_caches(iommu); | ||
1313 | } | 1412 | } |
1314 | 1413 | ||
1315 | static int amd_iommu_suspend(void) | 1414 | static int amd_iommu_suspend(void) |
@@ -1560,6 +1659,8 @@ static int __init parse_amd_iommu_options(char *str) | |||
1560 | amd_iommu_unmap_flush = true; | 1659 | amd_iommu_unmap_flush = true; |
1561 | if (strncmp(str, "off", 3) == 0) | 1660 | if (strncmp(str, "off", 3) == 0) |
1562 | amd_iommu_disabled = true; | 1661 | amd_iommu_disabled = true; |
1662 | if (strncmp(str, "force_isolation", 15) == 0) | ||
1663 | amd_iommu_force_isolation = true; | ||
1563 | } | 1664 | } |
1564 | 1665 | ||
1565 | return 1; | 1666 | return 1; |
@@ -1572,3 +1673,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect, | |||
1572 | gart_iommu_hole_init, | 1673 | gart_iommu_hole_init, |
1573 | 0, | 1674 | 0, |
1574 | 0); | 1675 | 0); |
1676 | |||
1677 | bool amd_iommu_v2_supported(void) | ||
1678 | { | ||
1679 | return amd_iommu_v2_present; | ||
1680 | } | ||
1681 | EXPORT_SYMBOL(amd_iommu_v2_supported); | ||