aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-10-20 15:16:53 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-10-20 15:19:36 -0400
commitb364776ad1208a71f0c53578c84619a395412a8d (patch)
treed6050e5db6298095324ccb8af7d477684485d52e /include/linux
parent6da0b38f4433fb0f24615449d7966471b6e5eae0 (diff)
parent6c8909b42fee1be67647bcd2518161a0fa8ca533 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/pci/intel-iommu.c
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/dma_remapping.h27
-rw-r--r--include/linux/intel-iommu.h66
2 files changed, 64 insertions, 29 deletions
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index bff5c65f81dc..952df39c989d 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -2,15 +2,14 @@
2#define _DMA_REMAPPING_H 2#define _DMA_REMAPPING_H
3 3
4/* 4/*
5 * We need a fixed PAGE_SIZE of 4K irrespective of 5 * VT-d hardware uses 4KiB page size regardless of host page size.
6 * arch PAGE_SIZE for IOMMU page tables.
7 */ 6 */
8#define PAGE_SHIFT_4K (12) 7#define VTD_PAGE_SHIFT (12)
9#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) 8#define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT)
10#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) 9#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
11#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) 10#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
12 11
13#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) 12#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
14#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) 13#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
15#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) 14#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
16 15
@@ -25,7 +24,7 @@ struct root_entry {
25 u64 val; 24 u64 val;
26 u64 rsvd1; 25 u64 rsvd1;
27}; 26};
28#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) 27#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
29static inline bool root_present(struct root_entry *root) 28static inline bool root_present(struct root_entry *root)
30{ 29{
31 return (root->val & 1); 30 return (root->val & 1);
@@ -36,7 +35,7 @@ static inline void set_root_present(struct root_entry *root)
36} 35}
37static inline void set_root_value(struct root_entry *root, unsigned long value) 36static inline void set_root_value(struct root_entry *root, unsigned long value)
38{ 37{
39 root->val |= value & PAGE_MASK_4K; 38 root->val |= value & VTD_PAGE_MASK;
40} 39}
41 40
42struct context_entry; 41struct context_entry;
@@ -45,7 +44,7 @@ get_context_addr_from_root(struct root_entry *root)
45{ 44{
46 return (struct context_entry *) 45 return (struct context_entry *)
47 (root_present(root)?phys_to_virt( 46 (root_present(root)?phys_to_virt(
48 root->val & PAGE_MASK_4K): 47 root->val & VTD_PAGE_MASK) :
49 NULL); 48 NULL);
50} 49}
51 50
@@ -67,7 +66,7 @@ struct context_entry {
67#define context_present(c) ((c).lo & 1) 66#define context_present(c) ((c).lo & 1)
68#define context_fault_disable(c) (((c).lo >> 1) & 1) 67#define context_fault_disable(c) (((c).lo >> 1) & 1)
69#define context_translation_type(c) (((c).lo >> 2) & 3) 68#define context_translation_type(c) (((c).lo >> 2) & 3)
70#define context_address_root(c) ((c).lo & PAGE_MASK_4K) 69#define context_address_root(c) ((c).lo & VTD_PAGE_MASK)
71#define context_address_width(c) ((c).hi & 7) 70#define context_address_width(c) ((c).hi & 7)
72#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) 71#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
73 72
@@ -81,7 +80,7 @@ struct context_entry {
81 } while (0) 80 } while (0)
82#define CONTEXT_TT_MULTI_LEVEL 0 81#define CONTEXT_TT_MULTI_LEVEL 0
83#define context_set_address_root(c, val) \ 82#define context_set_address_root(c, val) \
84 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) 83 do {(c).lo |= (val) & VTD_PAGE_MASK; } while (0)
85#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) 84#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
86#define context_set_domain_id(c, val) \ 85#define context_set_domain_id(c, val) \
87 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) 86 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
@@ -107,9 +106,9 @@ struct dma_pte {
107#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) 106#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
108#define dma_set_pte_prot(p, prot) \ 107#define dma_set_pte_prot(p, prot) \
109 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) 108 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
110#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) 109#define dma_pte_addr(p) ((p).val & VTD_PAGE_MASK)
111#define dma_set_pte_addr(p, addr) do {\ 110#define dma_set_pte_addr(p, addr) do {\
112 (p).val |= ((addr) & PAGE_MASK_4K); } while (0) 111 (p).val |= ((addr) & VTD_PAGE_MASK); } while (0)
113#define dma_pte_present(p) (((p).val & 3) != 0) 112#define dma_pte_present(p) (((p).val & 3) != 0)
114 113
115struct intel_iommu; 114struct intel_iommu;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 2e117f30a76c..3d017cfd245b 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -29,6 +29,7 @@
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/dma_remapping.h> 30#include <linux/dma_remapping.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/iommu.h>
32 33
33/* 34/*
34 * Intel IOMMU register specification per version 1.0 public spec. 35 * Intel IOMMU register specification per version 1.0 public spec.
@@ -127,6 +128,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
127 128
128 129
129/* IOTLB_REG */ 130/* IOTLB_REG */
131#define DMA_TLB_FLUSH_GRANU_OFFSET 60
130#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) 132#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
131#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) 133#define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
132#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) 134#define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
@@ -140,6 +142,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
140#define DMA_TLB_MAX_SIZE (0x3f) 142#define DMA_TLB_MAX_SIZE (0x3f)
141 143
142/* INVALID_DESC */ 144/* INVALID_DESC */
145#define DMA_CCMD_INVL_GRANU_OFFSET 61
143#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) 146#define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3)
144#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) 147#define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3)
145#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) 148#define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3)
@@ -200,22 +203,21 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
200#define dma_frcd_type(d) ((d >> 30) & 1) 203#define dma_frcd_type(d) ((d >> 30) & 1)
201#define dma_frcd_fault_reason(c) (c & 0xff) 204#define dma_frcd_fault_reason(c) (c & 0xff)
202#define dma_frcd_source_id(c) (c & 0xffff) 205#define dma_frcd_source_id(c) (c & 0xffff)
203#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ 206/* low 64 bit */
204 207#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
205#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ 208
206 209#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
207#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ 210do { \
208{\ 211 cycles_t start_time = get_cycles(); \
209 cycles_t start_time = get_cycles();\ 212 while (1) { \
210 while (1) {\ 213 sts = op(iommu->reg + offset); \
211 sts = op (iommu->reg + offset);\ 214 if (cond) \
212 if (cond)\ 215 break; \
213 break;\
214 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ 216 if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
215 panic("DMAR hardware is malfunctioning\n");\ 217 panic("DMAR hardware is malfunctioning\n"); \
216 cpu_relax();\ 218 cpu_relax(); \
217 }\ 219 } \
218} 220} while (0)
219 221
220#define QI_LENGTH 256 /* queue length */ 222#define QI_LENGTH 256 /* queue length */
221 223
@@ -238,6 +240,19 @@ enum {
238#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) 240#define QI_IWD_STATUS_DATA(d) (((u64)d) << 32)
239#define QI_IWD_STATUS_WRITE (((u64)1) << 5) 241#define QI_IWD_STATUS_WRITE (((u64)1) << 5)
240 242
243#define QI_IOTLB_DID(did) (((u64)did) << 16)
244#define QI_IOTLB_DR(dr) (((u64)dr) << 7)
245#define QI_IOTLB_DW(dw) (((u64)dw) << 6)
246#define QI_IOTLB_GRAN(gran) (((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
247#define QI_IOTLB_ADDR(addr) (((u64)addr) & VTD_PAGE_MASK)
248#define QI_IOTLB_IH(ih) (((u64)ih) << 6)
249#define QI_IOTLB_AM(am) (((u8)am))
250
251#define QI_CC_FM(fm) (((u64)fm) << 48)
252#define QI_CC_SID(sid) (((u64)sid) << 32)
253#define QI_CC_DID(did) (((u64)did) << 16)
254#define QI_CC_GRAN(gran) (((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
255
241struct qi_desc { 256struct qi_desc {
242 u64 low, high; 257 u64 low, high;
243}; 258};
@@ -263,6 +278,13 @@ struct ir_table {
263}; 278};
264#endif 279#endif
265 280
281struct iommu_flush {
282 int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
283 u64 type, int non_present_entry_flush);
284 int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
285 unsigned int size_order, u64 type, int non_present_entry_flush);
286};
287
266struct intel_iommu { 288struct intel_iommu {
267 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 289 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
268 u64 cap; 290 u64 cap;
@@ -282,6 +304,7 @@ struct intel_iommu {
282 unsigned char name[7]; /* Device Name */ 304 unsigned char name[7]; /* Device Name */
283 struct msi_msg saved_msg; 305 struct msi_msg saved_msg;
284 struct sys_device sysdev; 306 struct sys_device sysdev;
307 struct iommu_flush flush;
285#endif 308#endif
286 struct q_inval *qi; /* Queued invalidation info */ 309 struct q_inval *qi; /* Queued invalidation info */
287#ifdef CONFIG_INTR_REMAP 310#ifdef CONFIG_INTR_REMAP
@@ -303,6 +326,12 @@ extern void free_iommu(struct intel_iommu *iommu);
303extern int dmar_enable_qi(struct intel_iommu *iommu); 326extern int dmar_enable_qi(struct intel_iommu *iommu);
304extern void qi_global_iec(struct intel_iommu *iommu); 327extern void qi_global_iec(struct intel_iommu *iommu);
305 328
329extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
330 u8 fm, u64 type, int non_present_entry_flush);
331extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
332 unsigned int size_order, u64 type,
333 int non_present_entry_flush);
334
306extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); 335extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
307 336
308void intel_iommu_domain_exit(struct dmar_domain *domain); 337void intel_iommu_domain_exit(struct dmar_domain *domain);
@@ -324,4 +353,11 @@ static inline int intel_iommu_found(void)
324} 353}
325#endif /* CONFIG_DMAR */ 354#endif /* CONFIG_DMAR */
326 355
356extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
357extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);
358extern dma_addr_t intel_map_single(struct device *, phys_addr_t, size_t, int);
359extern void intel_unmap_single(struct device *, dma_addr_t, size_t, int);
360extern int intel_map_sg(struct device *, struct scatterlist *, int, int);
361extern void intel_unmap_sg(struct device *, struct scatterlist *, int, int);
362
327#endif 363#endif