diff options
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 224 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_types.h | 5 |
2 files changed, 121 insertions, 108 deletions
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index de7a6cedcc45..c3d650dea240 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -715,90 +715,6 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) | |||
715 | } | 715 | } |
716 | 716 | ||
717 | /* | 717 | /* |
718 | * This function reads some important data from the IOMMU PCI space and | ||
719 | * initializes the driver data structure with it. It reads the hardware | ||
720 | * capabilities and the first/last device entries | ||
721 | */ | ||
722 | static void __init init_iommu_from_pci(struct amd_iommu *iommu) | ||
723 | { | ||
724 | int cap_ptr = iommu->cap_ptr; | ||
725 | u32 range, misc, low, high; | ||
726 | int i, j; | ||
727 | |||
728 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, | ||
729 | &iommu->cap); | ||
730 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, | ||
731 | &range); | ||
732 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, | ||
733 | &misc); | ||
734 | |||
735 | iommu->first_device = calc_devid(MMIO_GET_BUS(range), | ||
736 | MMIO_GET_FD(range)); | ||
737 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), | ||
738 | MMIO_GET_LD(range)); | ||
739 | iommu->evt_msi_num = MMIO_MSI_NUM(misc); | ||
740 | |||
741 | if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) | ||
742 | amd_iommu_iotlb_sup = false; | ||
743 | |||
744 | /* read extended feature bits */ | ||
745 | low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); | ||
746 | high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); | ||
747 | |||
748 | iommu->features = ((u64)high << 32) | low; | ||
749 | |||
750 | if (iommu_feature(iommu, FEATURE_GT)) { | ||
751 | int glxval; | ||
752 | u32 pasids; | ||
753 | u64 shift; | ||
754 | |||
755 | shift = iommu->features & FEATURE_PASID_MASK; | ||
756 | shift >>= FEATURE_PASID_SHIFT; | ||
757 | pasids = (1 << shift); | ||
758 | |||
759 | amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); | ||
760 | |||
761 | glxval = iommu->features & FEATURE_GLXVAL_MASK; | ||
762 | glxval >>= FEATURE_GLXVAL_SHIFT; | ||
763 | |||
764 | if (amd_iommu_max_glx_val == -1) | ||
765 | amd_iommu_max_glx_val = glxval; | ||
766 | else | ||
767 | amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); | ||
768 | } | ||
769 | |||
770 | if (iommu_feature(iommu, FEATURE_GT) && | ||
771 | iommu_feature(iommu, FEATURE_PPR)) { | ||
772 | iommu->is_iommu_v2 = true; | ||
773 | amd_iommu_v2_present = true; | ||
774 | } | ||
775 | |||
776 | if (!is_rd890_iommu(iommu->dev)) | ||
777 | return; | ||
778 | |||
779 | /* | ||
780 | * Some rd890 systems may not be fully reconfigured by the BIOS, so | ||
781 | * it's necessary for us to store this information so it can be | ||
782 | * reprogrammed on resume | ||
783 | */ | ||
784 | |||
785 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, | ||
786 | &iommu->stored_addr_lo); | ||
787 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, | ||
788 | &iommu->stored_addr_hi); | ||
789 | |||
790 | /* Low bit locks writes to configuration space */ | ||
791 | iommu->stored_addr_lo &= ~1; | ||
792 | |||
793 | for (i = 0; i < 6; i++) | ||
794 | for (j = 0; j < 0x12; j++) | ||
795 | iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); | ||
796 | |||
797 | for (i = 0; i < 0x83; i++) | ||
798 | iommu->stored_l2[i] = iommu_read_l2(iommu, i); | ||
799 | } | ||
800 | |||
801 | /* | ||
802 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and | 718 | * Takes a pointer to an AMD IOMMU entry in the ACPI table and |
803 | * initializes the hardware and our data structures with it. | 719 | * initializes the hardware and our data structures with it. |
804 | */ | 720 | */ |
@@ -1014,13 +930,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
1014 | /* | 930 | /* |
1015 | * Copy data from ACPI table entry to the iommu struct | 931 | * Copy data from ACPI table entry to the iommu struct |
1016 | */ | 932 | */ |
1017 | iommu->dev = pci_get_bus_and_slot(PCI_BUS(h->devid), h->devid & 0xff); | 933 | iommu->devid = h->devid; |
1018 | if (!iommu->dev) | ||
1019 | return 1; | ||
1020 | |||
1021 | iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, | ||
1022 | PCI_DEVFN(0, 0)); | ||
1023 | |||
1024 | iommu->cap_ptr = h->cap_ptr; | 934 | iommu->cap_ptr = h->cap_ptr; |
1025 | iommu->pci_seg = h->pci_seg; | 935 | iommu->pci_seg = h->pci_seg; |
1026 | iommu->mmio_phys = h->mmio_phys; | 936 | iommu->mmio_phys = h->mmio_phys; |
@@ -1038,20 +948,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
1038 | 948 | ||
1039 | iommu->int_enabled = false; | 949 | iommu->int_enabled = false; |
1040 | 950 | ||
1041 | init_iommu_from_pci(iommu); | ||
1042 | init_iommu_from_acpi(iommu, h); | 951 | init_iommu_from_acpi(iommu, h); |
1043 | init_iommu_devices(iommu); | 952 | init_iommu_devices(iommu); |
1044 | 953 | ||
1045 | if (iommu_feature(iommu, FEATURE_PPR)) { | 954 | return 0; |
1046 | iommu->ppr_log = alloc_ppr_log(iommu); | ||
1047 | if (!iommu->ppr_log) | ||
1048 | return -ENOMEM; | ||
1049 | } | ||
1050 | |||
1051 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) | ||
1052 | amd_iommu_np_cache = true; | ||
1053 | |||
1054 | return pci_enable_device(iommu->dev); | ||
1055 | } | 955 | } |
1056 | 956 | ||
1057 | /* | 957 | /* |
@@ -1100,6 +1000,121 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
1100 | return 0; | 1000 | return 0; |
1101 | } | 1001 | } |
1102 | 1002 | ||
1003 | static int iommu_init_pci(struct amd_iommu *iommu) | ||
1004 | { | ||
1005 | int cap_ptr = iommu->cap_ptr; | ||
1006 | u32 range, misc, low, high; | ||
1007 | |||
1008 | iommu->dev = pci_get_bus_and_slot(PCI_BUS(iommu->devid), | ||
1009 | iommu->devid & 0xff); | ||
1010 | if (!iommu->dev) | ||
1011 | return -ENODEV; | ||
1012 | |||
1013 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, | ||
1014 | &iommu->cap); | ||
1015 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, | ||
1016 | &range); | ||
1017 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_MISC_OFFSET, | ||
1018 | &misc); | ||
1019 | |||
1020 | iommu->first_device = calc_devid(MMIO_GET_BUS(range), | ||
1021 | MMIO_GET_FD(range)); | ||
1022 | iommu->last_device = calc_devid(MMIO_GET_BUS(range), | ||
1023 | MMIO_GET_LD(range)); | ||
1024 | |||
1025 | if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) | ||
1026 | amd_iommu_iotlb_sup = false; | ||
1027 | |||
1028 | /* read extended feature bits */ | ||
1029 | low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); | ||
1030 | high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); | ||
1031 | |||
1032 | iommu->features = ((u64)high << 32) | low; | ||
1033 | |||
1034 | if (iommu_feature(iommu, FEATURE_GT)) { | ||
1035 | int glxval; | ||
1036 | u32 pasids; | ||
1037 | u64 shift; | ||
1038 | |||
1039 | shift = iommu->features & FEATURE_PASID_MASK; | ||
1040 | shift >>= FEATURE_PASID_SHIFT; | ||
1041 | pasids = (1 << shift); | ||
1042 | |||
1043 | amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids); | ||
1044 | |||
1045 | glxval = iommu->features & FEATURE_GLXVAL_MASK; | ||
1046 | glxval >>= FEATURE_GLXVAL_SHIFT; | ||
1047 | |||
1048 | if (amd_iommu_max_glx_val == -1) | ||
1049 | amd_iommu_max_glx_val = glxval; | ||
1050 | else | ||
1051 | amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval); | ||
1052 | } | ||
1053 | |||
1054 | if (iommu_feature(iommu, FEATURE_GT) && | ||
1055 | iommu_feature(iommu, FEATURE_PPR)) { | ||
1056 | iommu->is_iommu_v2 = true; | ||
1057 | amd_iommu_v2_present = true; | ||
1058 | } | ||
1059 | |||
1060 | if (iommu_feature(iommu, FEATURE_PPR)) { | ||
1061 | iommu->ppr_log = alloc_ppr_log(iommu); | ||
1062 | if (!iommu->ppr_log) | ||
1063 | return -ENOMEM; | ||
1064 | } | ||
1065 | |||
1066 | if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) | ||
1067 | amd_iommu_np_cache = true; | ||
1068 | |||
1069 | if (is_rd890_iommu(iommu->dev)) { | ||
1070 | int i, j; | ||
1071 | |||
1072 | iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number, | ||
1073 | PCI_DEVFN(0, 0)); | ||
1074 | |||
1075 | /* | ||
1076 | * Some rd890 systems may not be fully reconfigured by the | ||
1077 | * BIOS, so it's necessary for us to store this information so | ||
1078 | * it can be reprogrammed on resume | ||
1079 | */ | ||
1080 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 4, | ||
1081 | &iommu->stored_addr_lo); | ||
1082 | pci_read_config_dword(iommu->dev, iommu->cap_ptr + 8, | ||
1083 | &iommu->stored_addr_hi); | ||
1084 | |||
1085 | /* Low bit locks writes to configuration space */ | ||
1086 | iommu->stored_addr_lo &= ~1; | ||
1087 | |||
1088 | for (i = 0; i < 6; i++) | ||
1089 | for (j = 0; j < 0x12; j++) | ||
1090 | iommu->stored_l1[i][j] = iommu_read_l1(iommu, i, j); | ||
1091 | |||
1092 | for (i = 0; i < 0x83; i++) | ||
1093 | iommu->stored_l2[i] = iommu_read_l2(iommu, i); | ||
1094 | } | ||
1095 | |||
1096 | return pci_enable_device(iommu->dev); | ||
1097 | } | ||
1098 | |||
1099 | static int amd_iommu_init_pci(void) | ||
1100 | { | ||
1101 | struct amd_iommu *iommu; | ||
1102 | int ret = 0; | ||
1103 | |||
1104 | for_each_iommu(iommu) { | ||
1105 | ret = iommu_init_pci(iommu); | ||
1106 | if (ret) | ||
1107 | break; | ||
1108 | } | ||
1109 | |||
1110 | /* Make sure ACS will be enabled */ | ||
1111 | pci_request_acs(); | ||
1112 | |||
1113 | ret = amd_iommu_init_devices(); | ||
1114 | |||
1115 | return ret; | ||
1116 | } | ||
1117 | |||
1103 | /**************************************************************************** | 1118 | /**************************************************************************** |
1104 | * | 1119 | * |
1105 | * The following functions initialize the MSI interrupts for all IOMMUs | 1120 | * The following functions initialize the MSI interrupts for all IOMMUs |
@@ -1563,7 +1578,7 @@ int __init amd_iommu_init_hardware(void) | |||
1563 | if (ret) | 1578 | if (ret) |
1564 | goto free; | 1579 | goto free; |
1565 | 1580 | ||
1566 | ret = amd_iommu_init_devices(); | 1581 | ret = amd_iommu_init_pci(); |
1567 | if (ret) | 1582 | if (ret) |
1568 | goto free; | 1583 | goto free; |
1569 | 1584 | ||
@@ -1696,9 +1711,6 @@ int __init amd_iommu_detect(void) | |||
1696 | iommu_detected = 1; | 1711 | iommu_detected = 1; |
1697 | x86_init.iommu.iommu_init = amd_iommu_init; | 1712 | x86_init.iommu.iommu_init = amd_iommu_init; |
1698 | 1713 | ||
1699 | /* Make sure ACS will be enabled */ | ||
1700 | pci_request_acs(); | ||
1701 | |||
1702 | return 0; | 1714 | return 0; |
1703 | } | 1715 | } |
1704 | 1716 | ||
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 848fc8e37948..d0dab865a8b8 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
@@ -501,6 +501,9 @@ struct amd_iommu { | |||
501 | /* IOMMUv2 */ | 501 | /* IOMMUv2 */ |
502 | bool is_iommu_v2; | 502 | bool is_iommu_v2; |
503 | 503 | ||
504 | /* PCI device id of the IOMMU device */ | ||
505 | u16 devid; | ||
506 | |||
504 | /* | 507 | /* |
505 | * Capability pointer. There could be more than one IOMMU per PCI | 508 | * Capability pointer. There could be more than one IOMMU per PCI |
506 | * device function if there are more than one AMD IOMMU capability | 509 | * device function if there are more than one AMD IOMMU capability |
@@ -530,8 +533,6 @@ struct amd_iommu { | |||
530 | u32 evt_buf_size; | 533 | u32 evt_buf_size; |
531 | /* event buffer virtual address */ | 534 | /* event buffer virtual address */ |
532 | u8 *evt_buf; | 535 | u8 *evt_buf; |
533 | /* MSI number for event interrupt */ | ||
534 | u16 evt_msi_num; | ||
535 | 536 | ||
536 | /* Base of the PPR log, if present */ | 537 | /* Base of the PPR log, if present */ |
537 | u8 *ppr_log; | 538 | u8 *ppr_log; |