diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/iommu.h | 4 | ||||
-rw-r--r-- | include/linux/dma_remapping.h | 27 | ||||
-rw-r--r-- | include/linux/intel-iommu.h | 39 |
3 files changed, 40 insertions, 30 deletions
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index 961e746da977..2daaffcda52f 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
@@ -7,9 +7,13 @@ extern struct dma_mapping_ops nommu_dma_ops; | |||
7 | extern int force_iommu, no_iommu; | 7 | extern int force_iommu, no_iommu; |
8 | extern int iommu_detected; | 8 | extern int iommu_detected; |
9 | extern int dmar_disabled; | 9 | extern int dmar_disabled; |
10 | extern int forbid_dac; | ||
10 | 11 | ||
11 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); | 12 | extern unsigned long iommu_nr_pages(unsigned long addr, unsigned long len); |
12 | 13 | ||
14 | /* 10 seconds */ | ||
15 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) | ||
16 | |||
13 | #ifdef CONFIG_GART_IOMMU | 17 | #ifdef CONFIG_GART_IOMMU |
14 | extern int gart_iommu_aperture; | 18 | extern int gart_iommu_aperture; |
15 | extern int gart_iommu_aperture_allowed; | 19 | extern int gart_iommu_aperture_allowed; |
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h index bff5c65f81dc..952df39c989d 100644 --- a/include/linux/dma_remapping.h +++ b/include/linux/dma_remapping.h | |||
@@ -2,15 +2,14 @@ | |||
2 | #define _DMA_REMAPPING_H | 2 | #define _DMA_REMAPPING_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * We need a fixed PAGE_SIZE of 4K irrespective of | 5 | * VT-d hardware uses 4KiB page size regardless of host page size. |
6 | * arch PAGE_SIZE for IOMMU page tables. | ||
7 | */ | 6 | */ |
8 | #define PAGE_SHIFT_4K (12) | 7 | #define VTD_PAGE_SHIFT (12) |
9 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | 8 | #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) |
10 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | 9 | #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) |
11 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | 10 | #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) |
12 | 11 | ||
13 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | 12 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
14 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | 13 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) |
15 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | 14 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) |
16 | 15 | ||
@@ -25,7 +24,7 @@ struct root_entry { | |||
25 | u64 val; | 24 | u64 val; |
26 | u64 rsvd1; | 25 | u64 rsvd1; |
27 | }; | 26 | }; |
28 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) | 27 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) |
29 | static inline bool root_present(struct root_entry *root) | 28 | static inline bool root_present(struct root_entry *root) |
30 | { | 29 | { |
31 | return (root->val & 1); | 30 | return (root->val & 1); |
@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root) | |||
36 | } | 35 | } |
37 | static inline void set_root_value(struct root_entry *root, unsigned long value) | 36 | static inline void set_root_value(struct root_entry *root, unsigned long value) |
38 | { | 37 | { |
39 | root->val |= value & PAGE_MASK_4K; | 38 | root->val |= value & VTD_PAGE_MASK; |
40 | } | 39 | } |
41 | 40 | ||
42 | struct context_entry; | 41 | struct context_entry; |
@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root) | |||
45 | { | 44 | { |
46 | return (struct context_entry *) | 45 | return (struct context_entry *) |
47 | (root_present(root)?phys_to_virt( | 46 | (root_present(root)?phys_to_virt( |
48 | root->val & PAGE_MASK_4K): | 47 | root->val & VTD_PAGE_MASK) : |
49 | NULL); | 48 | NULL); |
50 | } | 49 | } |
51 | 50 | ||
@@ -67,7 +66,7 @@ struct context_entry { | |||
67 | #define context_present(c) ((c).lo & 1) | 66 | #define context_present(c) ((c).lo & 1) |
68 | #define context_fault_disable(c) (((c).lo >> 1) & 1) | 67 | #define context_fault_disable(c) (((c).lo >> 1) & 1) |
69 | #define context_translation_type(c) (((c).lo >> 2) & 3) | 68 | #define context_translation_type(c) (((c).lo >> 2) & 3) |
70 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) | 69 | #define context_address_root(c) ((c).lo & VTD_PAGE_MASK) |
71 | #define context_address_width(c) ((c).hi & 7) | 70 | #define context_address_width(c) ((c).hi & 7) |
72 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) | 71 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) |
73 | 72 | ||
@@ -81,7 +80,7 @@ struct context_entry { | |||
81 | } while (0) | 80 | } while (0) |
82 | #define CONTEXT_TT_MULTI_LEVEL 0 | 81 | #define CONTEXT_TT_MULTI_LEVEL 0 |
83 | #define context_set_address_root(c, val) \ | 82 | #define context_set_address_root(c, val) \ |
84 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) | 83 | do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0) |
85 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) | 84 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) |
86 | #define context_set_domain_id(c, val) \ | 85 | #define context_set_domain_id(c, val) \ |
87 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) | 86 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) |
@@ -107,9 +106,9 @@ struct dma_pte { | |||
107 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | 106 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) |
108 | #define dma_set_pte_prot(p, prot) \ | 107 | #define dma_set_pte_prot(p, prot) \ |
109 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | 108 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) |
110 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) | 109 | #define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK) |
111 | #define dma_set_pte_addr(p, addr) do {\ | 110 | #define dma_set_pte_addr(p, addr) do {\ |
112 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) | 111 | (p).val |= ((addr) & VTD_PAGE_MASK); } while (0) |
113 | #define dma_pte_present(p) (((p).val & 3) != 0) | 112 | #define dma_pte_present(p) (((p).val & 3) != 0) |
114 | 113 | ||
115 | struct intel_iommu; | 114 | struct intel_iommu; |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index afb0d2a5b7cd..3d017cfd245b 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/dma_remapping.h> | 30 | #include <linux/dma_remapping.h> |
31 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
32 | #include <asm/iommu.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Intel IOMMU register specification per version 1.0 public spec. | 35 | * Intel IOMMU register specification per version 1.0 public spec. |
@@ -202,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
202 | #define dma_frcd_type(d) ((d >> 30) & 1) | 203 | #define dma_frcd_type(d) ((d >> 30) & 1) |
203 | #define dma_frcd_fault_reason(c) (c & 0xff) | 204 | #define dma_frcd_fault_reason(c) (c & 0xff) |
204 | #define dma_frcd_source_id(c) (c & 0xffff) | 205 | #define dma_frcd_source_id(c) (c & 0xffff) |
205 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ | 206 | /* low 64 bit */ |
206 | 207 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT)) | |
207 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ | 208 | |
208 | 209 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | |
209 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | 210 | do { \ |
210 | {\ | 211 | cycles_t start_time = get_cycles(); \ |
211 | cycles_t start_time = get_cycles();\ | 212 | while (1) { \ |
212 | while (1) {\ | 213 | sts = op(iommu->reg + offset); \ |
213 | sts = op (iommu->reg + offset);\ | 214 | if (cond) \ |
214 | if (cond)\ | 215 | break; \ |
215 | break;\ | ||
216 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | 216 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ |
217 | panic("DMAR hardware is malfunctioning\n");\ | 217 | panic("DMAR hardware is malfunctioning\n"); \ |
218 | cpu_relax();\ | 218 | cpu_relax(); \ |
219 | }\ | 219 | } \ |
220 | } | 220 | } while (0) |
221 | 221 | ||
222 | #define QI_LENGTH 256 /* queue length */ | 222 | #define QI_LENGTH 256 /* queue length */ |
223 | 223 | ||
@@ -244,7 +244,7 @@ enum { | |||
244 | #define QI_IOTLB_DR(dr) (((u64)dr) << 7) | 244 | #define QI_IOTLB_DR(dr) (((u64)dr) << 7) |
245 | #define QI_IOTLB_DW(dw) (((u64)dw) << 6) | 245 | #define QI_IOTLB_DW(dw) (((u64)dw) << 6) |
246 | #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) | 246 | #define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4)) |
247 | #define QI_IOTLB_ADDR(addr) (((u64)addr) & PAGE_MASK_4K) | 247 | #define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK) |
248 | #define QI_IOTLB_IH(ih) (((u64)ih) << 6) | 248 | #define QI_IOTLB_IH(ih) (((u64)ih) << 6) |
249 | #define QI_IOTLB_AM(am) (((u8)am)) | 249 | #define QI_IOTLB_AM(am) (((u8)am)) |
250 | 250 | ||
@@ -353,4 +353,11 @@ static inline int intel_iommu_found(void) | |||
353 | } | 353 | } |
354 | #endif /* CONFIG_DMAR */ | 354 | #endif /* CONFIG_DMAR */ |
355 | 355 | ||
356 | extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); | ||
357 | extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); | ||
358 | extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int); | ||
359 | extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int); | ||
360 | extern int intel_map_sg(struct device *, struct scatterlist *, int, int); | ||
361 | extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int); | ||
362 | |||
356 | #endif | 363 | #endif |