diff options
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
| -rw-r--r-- | arch/x86/kernel/amd_iommu.c | 141 |
1 files changed, 123 insertions, 18 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 8c93b7c7735e..dc19ed43b54e 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
| @@ -41,6 +41,13 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); | |||
| 41 | static LIST_HEAD(iommu_pd_list); | 41 | static LIST_HEAD(iommu_pd_list); |
| 42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); | 42 | static DEFINE_SPINLOCK(iommu_pd_list_lock); |
| 43 | 43 | ||
| 44 | /* | ||
| 45 | * Domain for untranslated devices - only allocated | ||
| 46 | * if iommu=pt passed on kernel cmd line. | ||
| 47 | */ | ||
| 48 | static struct protection_domain *pt_domain; | ||
| 49 | |||
| 50 | #ifdef CONFIG_IOMMU_API | ||
| 44 | static struct iommu_ops amd_iommu_ops; | 51 | static struct iommu_ops amd_iommu_ops; |
| 45 | 52 | ||
| 46 | /* | 53 | /* |
| @@ -1130,32 +1137,48 @@ static struct protection_domain *domain_for_device(u16 devid) | |||
| 1130 | * If a device is not yet associated with a domain, this function does | 1137 | * If a device is not yet associated with a domain, this function does |
| 1131 | * assigns it visible for the hardware | 1138 | * assigns it visible for the hardware |
| 1132 | */ | 1139 | */ |
| 1133 | static void attach_device(struct amd_iommu *iommu, | 1140 | static void __attach_device(struct amd_iommu *iommu, |
| 1134 | struct protection_domain *domain, | 1141 | struct protection_domain *domain, |
| 1135 | u16 devid) | 1142 | u16 devid) |
| 1136 | { | 1143 | { |
| 1137 | unsigned long flags; | 1144 | u64 pte_root; |
| 1138 | u64 pte_root = virt_to_phys(domain->pt_root); | ||
| 1139 | 1145 | ||
| 1140 | domain->dev_cnt += 1; | 1146 | /* lock domain */ |
| 1147 | spin_lock(&domain->lock); | ||
| 1148 | |||
| 1149 | pte_root = virt_to_phys(domain->pt_root); | ||
| 1141 | 1150 | ||
| 1142 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) | 1151 | pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) |
| 1143 | << DEV_ENTRY_MODE_SHIFT; | 1152 | << DEV_ENTRY_MODE_SHIFT; |
| 1144 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; | 1153 | pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; |
| 1145 | 1154 | ||
| 1146 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
| 1147 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
| 1148 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
| 1149 | amd_iommu_dev_table[devid].data[2] = domain->id; | 1155 | amd_iommu_dev_table[devid].data[2] = domain->id; |
| 1156 | amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); | ||
| 1157 | amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); | ||
| 1150 | 1158 | ||
| 1151 | amd_iommu_pd_table[devid] = domain; | 1159 | amd_iommu_pd_table[devid] = domain; |
| 1160 | |||
| 1161 | domain->dev_cnt += 1; | ||
| 1162 | |||
| 1163 | /* ready */ | ||
| 1164 | spin_unlock(&domain->lock); | ||
| 1165 | } | ||
| 1166 | |||
| 1167 | static void attach_device(struct amd_iommu *iommu, | ||
| 1168 | struct protection_domain *domain, | ||
| 1169 | u16 devid) | ||
| 1170 | { | ||
| 1171 | unsigned long flags; | ||
| 1172 | |||
| 1173 | write_lock_irqsave(&amd_iommu_devtable_lock, flags); | ||
| 1174 | __attach_device(iommu, domain, devid); | ||
| 1152 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 1175 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| 1153 | 1176 | ||
| 1154 | /* | 1177 | /* |
| 1155 | * We might boot into a crash-kernel here. The crashed kernel | 1178 | * We might boot into a crash-kernel here. The crashed kernel |
| 1156 | * left the caches in the IOMMU dirty. So we have to flush | 1179 | * left the caches in the IOMMU dirty. So we have to flush |
| 1157 | * here to evict all dirty stuff. | 1180 | * here to evict all dirty stuff. |
| 1158 | */ | 1181 | */ |
| 1159 | iommu_queue_inv_dev_entry(iommu, devid); | 1182 | iommu_queue_inv_dev_entry(iommu, devid); |
| 1160 | iommu_flush_tlb_pde(iommu, domain->id); | 1183 | iommu_flush_tlb_pde(iommu, domain->id); |
| 1161 | } | 1184 | } |
| @@ -1182,6 +1205,15 @@ static void __detach_device(struct protection_domain *domain, u16 devid) | |||
| 1182 | 1205 | ||
| 1183 | /* ready */ | 1206 | /* ready */ |
| 1184 | spin_unlock(&domain->lock); | 1207 | spin_unlock(&domain->lock); |
| 1208 | |||
| 1209 | /* | ||
| 1210 | * If we run in passthrough mode the device must be assigned to the | ||
| 1211 | * passthrough domain if it is detached from any other domain | ||
| 1212 | */ | ||
| 1213 | if (iommu_pass_through) { | ||
| 1214 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | ||
| 1215 | __attach_device(iommu, pt_domain, devid); | ||
| 1216 | } | ||
| 1185 | } | 1217 | } |
| 1186 | 1218 | ||
| 1187 | /* | 1219 | /* |
| @@ -1227,6 +1259,8 @@ static int device_change_notifier(struct notifier_block *nb, | |||
| 1227 | case BUS_NOTIFY_UNBOUND_DRIVER: | 1259 | case BUS_NOTIFY_UNBOUND_DRIVER: |
| 1228 | if (!domain) | 1260 | if (!domain) |
| 1229 | goto out; | 1261 | goto out; |
| 1262 | if (iommu_pass_through) | ||
| 1263 | break; | ||
| 1230 | detach_device(domain, devid); | 1264 | detach_device(domain, devid); |
| 1231 | break; | 1265 | break; |
| 1232 | case BUS_NOTIFY_ADD_DEVICE: | 1266 | case BUS_NOTIFY_ADD_DEVICE: |
| @@ -2051,19 +2085,47 @@ static void cleanup_domain(struct protection_domain *domain) | |||
| 2051 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); | 2085 | write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); |
| 2052 | } | 2086 | } |
| 2053 | 2087 | ||
| 2054 | static int amd_iommu_domain_init(struct iommu_domain *dom) | 2088 | static void protection_domain_free(struct protection_domain *domain) |
| 2089 | { | ||
| 2090 | if (!domain) | ||
| 2091 | return; | ||
| 2092 | |||
| 2093 | if (domain->id) | ||
| 2094 | domain_id_free(domain->id); | ||
| 2095 | |||
| 2096 | kfree(domain); | ||
| 2097 | } | ||
| 2098 | |||
| 2099 | static struct protection_domain *protection_domain_alloc(void) | ||
| 2055 | { | 2100 | { |
| 2056 | struct protection_domain *domain; | 2101 | struct protection_domain *domain; |
| 2057 | 2102 | ||
| 2058 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | 2103 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
| 2059 | if (!domain) | 2104 | if (!domain) |
| 2060 | return -ENOMEM; | 2105 | return NULL; |
| 2061 | 2106 | ||
| 2062 | spin_lock_init(&domain->lock); | 2107 | spin_lock_init(&domain->lock); |
| 2063 | domain->mode = PAGE_MODE_3_LEVEL; | ||
| 2064 | domain->id = domain_id_alloc(); | 2108 | domain->id = domain_id_alloc(); |
| 2065 | if (!domain->id) | 2109 | if (!domain->id) |
| 2110 | goto out_err; | ||
| 2111 | |||
| 2112 | return domain; | ||
| 2113 | |||
| 2114 | out_err: | ||
| 2115 | kfree(domain); | ||
| 2116 | |||
| 2117 | return NULL; | ||
| 2118 | } | ||
| 2119 | |||
| 2120 | static int amd_iommu_domain_init(struct iommu_domain *dom) | ||
| 2121 | { | ||
| 2122 | struct protection_domain *domain; | ||
| 2123 | |||
| 2124 | domain = protection_domain_alloc(); | ||
| 2125 | if (!domain) | ||
| 2066 | goto out_free; | 2126 | goto out_free; |
| 2127 | |||
| 2128 | domain->mode = PAGE_MODE_3_LEVEL; | ||
| 2067 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 2129 | domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
| 2068 | if (!domain->pt_root) | 2130 | if (!domain->pt_root) |
| 2069 | goto out_free; | 2131 | goto out_free; |
| @@ -2073,7 +2135,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) | |||
| 2073 | return 0; | 2135 | return 0; |
| 2074 | 2136 | ||
| 2075 | out_free: | 2137 | out_free: |
| 2076 | kfree(domain); | 2138 | protection_domain_free(domain); |
| 2077 | 2139 | ||
| 2078 | return -ENOMEM; | 2140 | return -ENOMEM; |
| 2079 | } | 2141 | } |
| @@ -2254,3 +2316,46 @@ static struct iommu_ops amd_iommu_ops = { | |||
| 2254 | .domain_has_cap = amd_iommu_domain_has_cap, | 2316 | .domain_has_cap = amd_iommu_domain_has_cap, |
| 2255 | }; | 2317 | }; |
| 2256 | 2318 | ||
| 2319 | /***************************************************************************** | ||
| 2320 | * | ||
| 2321 | * The next functions do a basic initialization of IOMMU for pass through | ||
| 2322 | * mode | ||
| 2323 | * | ||
| 2324 | * In passthrough mode the IOMMU is initialized and enabled but not used for | ||
| 2325 | * DMA-API translation. | ||
| 2326 | * | ||
| 2327 | *****************************************************************************/ | ||
| 2328 | |||
| 2329 | int __init amd_iommu_init_passthrough(void) | ||
| 2330 | { | ||
| 2331 | struct pci_dev *dev = NULL; | ||
| 2332 | u16 devid, devid2; | ||
| 2333 | |||
| 2334 | /* allocate passthroug domain */ | ||
| 2335 | pt_domain = protection_domain_alloc(); | ||
| 2336 | if (!pt_domain) | ||
| 2337 | return -ENOMEM; | ||
| 2338 | |||
| 2339 | pt_domain->mode |= PAGE_MODE_NONE; | ||
| 2340 | |||
| 2341 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | ||
| 2342 | struct amd_iommu *iommu; | ||
| 2343 | |||
| 2344 | devid = calc_devid(dev->bus->number, dev->devfn); | ||
| 2345 | if (devid > amd_iommu_last_bdf) | ||
| 2346 | continue; | ||
| 2347 | |||
| 2348 | devid2 = amd_iommu_alias_table[devid]; | ||
| 2349 | |||
| 2350 | iommu = amd_iommu_rlookup_table[devid2]; | ||
| 2351 | if (!iommu) | ||
| 2352 | continue; | ||
| 2353 | |||
| 2354 | __attach_device(iommu, pt_domain, devid); | ||
| 2355 | __attach_device(iommu, pt_domain, devid2); | ||
| 2356 | } | ||
| 2357 | |||
| 2358 | pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); | ||
| 2359 | |||
| 2360 | return 0; | ||
| 2361 | } | ||
