aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-26 09:26:30 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 10:15:42 -0400
commit0feae533ddebe02cda6ccce5cac7349b446776a8 (patch)
treed9c82d7ebec45d96194aeaf27acfbbd2015545ac /arch/x86/kernel/amd_iommu.c
parent2650815fb03fe2bf1e6701584087ba669dcf92cd (diff)
x86/amd-iommu: Add passthrough mode initialization functions
When iommu=pt is passed on kernel command line the devices should run untranslated. This requires the allocation of a special domain for that purpose. This patch implements the allocation and initialization path for iommu=pt. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c72
1 files changed, 64 insertions, 8 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0934348abfad..7987f20499ad 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -41,6 +41,12 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
41static LIST_HEAD(iommu_pd_list); 41static LIST_HEAD(iommu_pd_list);
42static DEFINE_SPINLOCK(iommu_pd_list_lock); 42static DEFINE_SPINLOCK(iommu_pd_list_lock);
43 43
44/*
45 * Domain for untranslated devices - only allocated
46 * if iommu=pt passed on kernel cmd line.
47 */
48static struct protection_domain *pt_domain;
49
44#ifdef CONFIG_IOMMU_API 50#ifdef CONFIG_IOMMU_API
45static struct iommu_ops amd_iommu_ops; 51static struct iommu_ops amd_iommu_ops;
46#endif 52#endif
@@ -1067,9 +1073,9 @@ static struct protection_domain *domain_for_device(u16 devid)
1067 * If a device is not yet associated with a domain, this function does 1073 * If a device is not yet associated with a domain, this function does
1068 * assigns it visible for the hardware 1074 * assigns it visible for the hardware
1069 */ 1075 */
1070static void attach_device(struct amd_iommu *iommu, 1076static void __attach_device(struct amd_iommu *iommu,
1071 struct protection_domain *domain, 1077 struct protection_domain *domain,
1072 u16 devid) 1078 u16 devid)
1073{ 1079{
1074 unsigned long flags; 1080 unsigned long flags;
1075 u64 pte_root = virt_to_phys(domain->pt_root); 1081 u64 pte_root = virt_to_phys(domain->pt_root);
@@ -1087,12 +1093,19 @@ static void attach_device(struct amd_iommu *iommu,
1087 1093
1088 amd_iommu_pd_table[devid] = domain; 1094 amd_iommu_pd_table[devid] = domain;
1089 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1095 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1096}
1090 1097
1091 /* 1098static void attach_device(struct amd_iommu *iommu,
1092 * We might boot into a crash-kernel here. The crashed kernel 1099 struct protection_domain *domain,
1093 * left the caches in the IOMMU dirty. So we have to flush 1100 u16 devid)
1094 * here to evict all dirty stuff. 1101{
1095 */ 1102 __attach_device(iommu, domain, devid);
1103
1104 /*
1105 * We might boot into a crash-kernel here. The crashed kernel
1106 * left the caches in the IOMMU dirty. So we have to flush
1107 * here to evict all dirty stuff.
1108 */
1096 iommu_queue_inv_dev_entry(iommu, devid); 1109 iommu_queue_inv_dev_entry(iommu, devid);
1097 iommu_flush_tlb_pde(iommu, domain->id); 1110 iommu_flush_tlb_pde(iommu, domain->id);
1098} 1111}
@@ -2219,3 +2232,46 @@ static struct iommu_ops amd_iommu_ops = {
2219 .domain_has_cap = amd_iommu_domain_has_cap, 2232 .domain_has_cap = amd_iommu_domain_has_cap,
2220}; 2233};
2221 2234
2235/*****************************************************************************
2236 *
2237 * The next functions do a basic initialization of IOMMU for pass through
2238 * mode
2239 *
2240 * In passthrough mode the IOMMU is initialized and enabled but not used for
2241 * DMA-API translation.
2242 *
2243 *****************************************************************************/
2244
2245int __init amd_iommu_init_passthrough(void)
2246{
2247 struct pci_dev *dev = NULL;
2248 u16 devid, devid2;
2249
2250 /* allocate passthroug domain */
2251 pt_domain = protection_domain_alloc();
2252 if (!pt_domain)
2253 return -ENOMEM;
2254
2255 pt_domain->mode |= PAGE_MODE_NONE;
2256
2257 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2258 struct amd_iommu *iommu;
2259
2260 devid = calc_devid(dev->bus->number, dev->devfn);
2261 if (devid > amd_iommu_last_bdf)
2262 continue;
2263
2264 devid2 = amd_iommu_alias_table[devid];
2265
2266 iommu = amd_iommu_rlookup_table[devid2];
2267 if (!iommu)
2268 continue;
2269
2270 __attach_device(iommu, pt_domain, devid);
2271 __attach_device(iommu, pt_domain, devid2);
2272 }
2273
2274 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
2275
2276 return 0;
2277}