aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-08-26 09:26:30 -0400
committerJoerg Roedel <joerg.roedel@amd.com>2009-09-03 10:15:42 -0400
commit0feae533ddebe02cda6ccce5cac7349b446776a8 (patch)
treed9c82d7ebec45d96194aeaf27acfbbd2015545ac
parent2650815fb03fe2bf1e6701584087ba669dcf92cd (diff)
x86/amd-iommu: Add passthrough mode initialization functions
When iommu=pt is passed on kernel command line the devices should run untranslated. This requires the allocation of a special domain for that purpose. This patch implements the allocation and initialization path for iommu=pt. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
-rw-r--r--arch/x86/include/asm/amd_iommu.h1
-rw-r--r--arch/x86/include/asm/amd_iommu_types.h4
-rw-r--r--arch/x86/kernel/amd_iommu.c72
3 files changed, 69 insertions, 8 deletions
diff --git a/arch/x86/include/asm/amd_iommu.h b/arch/x86/include/asm/amd_iommu.h
index bdf96f119f06..ac95995b7bad 100644
--- a/arch/x86/include/asm/amd_iommu.h
+++ b/arch/x86/include/asm/amd_iommu.h
@@ -25,6 +25,7 @@
25#ifdef CONFIG_AMD_IOMMU 25#ifdef CONFIG_AMD_IOMMU
26extern int amd_iommu_init(void); 26extern int amd_iommu_init(void);
27extern int amd_iommu_init_dma_ops(void); 27extern int amd_iommu_init_dma_ops(void);
28extern int amd_iommu_init_passthrough(void);
28extern void amd_iommu_detect(void); 29extern void amd_iommu_detect(void);
29extern irqreturn_t amd_iommu_int_handler(int irq, void *data); 30extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
30extern void amd_iommu_flush_all_domains(void); 31extern void amd_iommu_flush_all_domains(void);
diff --git a/arch/x86/include/asm/amd_iommu_types.h b/arch/x86/include/asm/amd_iommu_types.h
index 0c878caaa0a2..49f7453bff76 100644
--- a/arch/x86/include/asm/amd_iommu_types.h
+++ b/arch/x86/include/asm/amd_iommu_types.h
@@ -143,6 +143,7 @@
143#define EVT_BUFFER_SIZE 8192 /* 512 entries */ 143#define EVT_BUFFER_SIZE 8192 /* 512 entries */
144#define EVT_LEN_MASK (0x9ULL << 56) 144#define EVT_LEN_MASK (0x9ULL << 56)
145 145
146#define PAGE_MODE_NONE 0x00
146#define PAGE_MODE_1_LEVEL 0x01 147#define PAGE_MODE_1_LEVEL 0x01
147#define PAGE_MODE_2_LEVEL 0x02 148#define PAGE_MODE_2_LEVEL 0x02
148#define PAGE_MODE_3_LEVEL 0x03 149#define PAGE_MODE_3_LEVEL 0x03
@@ -194,6 +195,9 @@
194#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 195#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
195#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 196#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
196 domain for an IOMMU */ 197 domain for an IOMMU */
198#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
199 translation */
200
197extern bool amd_iommu_dump; 201extern bool amd_iommu_dump;
198#define DUMP_printk(format, arg...) \ 202#define DUMP_printk(format, arg...) \
199 do { \ 203 do { \
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 0934348abfad..7987f20499ad 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -41,6 +41,12 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
41static LIST_HEAD(iommu_pd_list); 41static LIST_HEAD(iommu_pd_list);
42static DEFINE_SPINLOCK(iommu_pd_list_lock); 42static DEFINE_SPINLOCK(iommu_pd_list_lock);
43 43
44/*
45 * Domain for untranslated devices - only allocated
46 * if iommu=pt passed on kernel cmd line.
47 */
48static struct protection_domain *pt_domain;
49
44#ifdef CONFIG_IOMMU_API 50#ifdef CONFIG_IOMMU_API
45static struct iommu_ops amd_iommu_ops; 51static struct iommu_ops amd_iommu_ops;
46#endif 52#endif
@@ -1067,9 +1073,9 @@ static struct protection_domain *domain_for_device(u16 devid)
1067 * If a device is not yet associated with a domain, this function does 1073 * If a device is not yet associated with a domain, this function does
1068 * assigns it visible for the hardware 1074 * assigns it visible for the hardware
1069 */ 1075 */
1070static void attach_device(struct amd_iommu *iommu, 1076static void __attach_device(struct amd_iommu *iommu,
1071 struct protection_domain *domain, 1077 struct protection_domain *domain,
1072 u16 devid) 1078 u16 devid)
1073{ 1079{
1074 unsigned long flags; 1080 unsigned long flags;
1075 u64 pte_root = virt_to_phys(domain->pt_root); 1081 u64 pte_root = virt_to_phys(domain->pt_root);
@@ -1087,12 +1093,19 @@ static void attach_device(struct amd_iommu *iommu,
1087 1093
1088 amd_iommu_pd_table[devid] = domain; 1094 amd_iommu_pd_table[devid] = domain;
1089 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1095 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1096}
1090 1097
1091 /* 1098static void attach_device(struct amd_iommu *iommu,
1092 * We might boot into a crash-kernel here. The crashed kernel 1099 struct protection_domain *domain,
1093 * left the caches in the IOMMU dirty. So we have to flush 1100 u16 devid)
1094 * here to evict all dirty stuff. 1101{
1095 */ 1102 __attach_device(iommu, domain, devid);
1103
1104 /*
1105 * We might boot into a crash-kernel here. The crashed kernel
1106 * left the caches in the IOMMU dirty. So we have to flush
1107 * here to evict all dirty stuff.
1108 */
1096 iommu_queue_inv_dev_entry(iommu, devid); 1109 iommu_queue_inv_dev_entry(iommu, devid);
1097 iommu_flush_tlb_pde(iommu, domain->id); 1110 iommu_flush_tlb_pde(iommu, domain->id);
1098} 1111}
@@ -2219,3 +2232,46 @@ static struct iommu_ops amd_iommu_ops = {
2219 .domain_has_cap = amd_iommu_domain_has_cap, 2232 .domain_has_cap = amd_iommu_domain_has_cap,
2220}; 2233};
2221 2234
2235/*****************************************************************************
2236 *
2237 * The next functions do a basic initialization of IOMMU for pass through
2238 * mode
2239 *
2240 * In passthrough mode the IOMMU is initialized and enabled but not used for
2241 * DMA-API translation.
2242 *
2243 *****************************************************************************/
2244
2245int __init amd_iommu_init_passthrough(void)
2246{
2247 struct pci_dev *dev = NULL;
2248 u16 devid, devid2;
2249
2250 /* allocate passthroug domain */
2251 pt_domain = protection_domain_alloc();
2252 if (!pt_domain)
2253 return -ENOMEM;
2254
2255 pt_domain->mode |= PAGE_MODE_NONE;
2256
2257 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
2258 struct amd_iommu *iommu;
2259
2260 devid = calc_devid(dev->bus->number, dev->devfn);
2261 if (devid > amd_iommu_last_bdf)
2262 continue;
2263
2264 devid2 = amd_iommu_alias_table[devid];
2265
2266 iommu = amd_iommu_rlookup_table[devid2];
2267 if (!iommu)
2268 continue;
2269
2270 __attach_device(iommu, pt_domain, devid);
2271 __attach_device(iommu, pt_domain, devid2);
2272 }
2273
2274 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
2275
2276 return 0;
2277}