diff options
Diffstat (limited to 'drivers/pci/intel-iommu.h')
-rw-r--r-- | drivers/pci/intel-iommu.h | 233 |
1 files changed, 98 insertions, 135 deletions
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h index afc0ad96122e..2142c01e0143 100644 --- a/drivers/pci/intel-iommu.h +++ b/drivers/pci/intel-iommu.h | |||
@@ -27,19 +27,8 @@ | |||
27 | #include <linux/sysdev.h> | 27 | #include <linux/sysdev.h> |
28 | #include "iova.h" | 28 | #include "iova.h" |
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | 30 | #include <asm/cacheflush.h> | |
31 | /* | 31 | #include "dma_remapping.h" |
32 | * We need a fixed PAGE_SIZE of 4K irrespective of | ||
33 | * arch PAGE_SIZE for IOMMU page tables. | ||
34 | */ | ||
35 | #define PAGE_SHIFT_4K (12) | ||
36 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | ||
37 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | ||
38 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | ||
39 | |||
40 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | ||
41 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | ||
42 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | ||
43 | 32 | ||
44 | /* | 33 | /* |
45 | * Intel IOMMU register specification per version 1.0 public spec. | 34 | * Intel IOMMU register specification per version 1.0 public spec. |
@@ -63,6 +52,11 @@ | |||
63 | #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ | 52 | #define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ |
64 | #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ | 53 | #define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ |
65 | #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ | 54 | #define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ |
55 | #define DMAR_IQH_REG 0x80 /* Invalidation queue head register */ | ||
56 | #define DMAR_IQT_REG 0x88 /* Invalidation queue tail register */ | ||
57 | #define DMAR_IQA_REG 0x90 /* Invalidation queue addr register */ | ||
58 | #define DMAR_ICS_REG 0x98 /* Invalidation complete status register */ | ||
59 | #define DMAR_IRTA_REG 0xb8 /* Interrupt remapping table addr register */ | ||
66 | 60 | ||
67 | #define OFFSET_STRIDE (9) | 61 | #define OFFSET_STRIDE (9) |
68 | /* | 62 | /* |
@@ -126,6 +120,10 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
126 | #define ecap_max_iotlb_offset(e) \ | 120 | #define ecap_max_iotlb_offset(e) \ |
127 | (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) | 121 | (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) |
128 | #define ecap_coherent(e) ((e) & 0x1) | 122 | #define ecap_coherent(e) ((e) & 0x1) |
123 | #define ecap_qis(e) ((e) & 0x2) | ||
124 | #define ecap_eim_support(e) ((e >> 4) & 0x1) | ||
125 | #define ecap_ir_support(e) ((e >> 3) & 0x1) | ||
126 | #define ecap_max_handle_mask(e) ((e >> 20) & 0xf) | ||
129 | 127 | ||
130 | 128 | ||
131 | /* IOTLB_REG */ | 129 | /* IOTLB_REG */ |
@@ -141,6 +139,17 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
141 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) | 139 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) |
142 | #define DMA_TLB_MAX_SIZE (0x3f) | 140 | #define DMA_TLB_MAX_SIZE (0x3f) |
143 | 141 | ||
142 | /* INVALID_DESC */ | ||
143 | #define DMA_ID_TLB_GLOBAL_FLUSH (((u64)1) << 3) | ||
144 | #define DMA_ID_TLB_DSI_FLUSH (((u64)2) << 3) | ||
145 | #define DMA_ID_TLB_PSI_FLUSH (((u64)3) << 3) | ||
146 | #define DMA_ID_TLB_READ_DRAIN (((u64)1) << 7) | ||
147 | #define DMA_ID_TLB_WRITE_DRAIN (((u64)1) << 6) | ||
148 | #define DMA_ID_TLB_DID(id) (((u64)((id & 0xffff) << 16))) | ||
149 | #define DMA_ID_TLB_IH_NONLEAF (((u64)1) << 6) | ||
150 | #define DMA_ID_TLB_ADDR(addr) (addr) | ||
151 | #define DMA_ID_TLB_ADDR_MASK(mask) (mask) | ||
152 | |||
144 | /* PMEN_REG */ | 153 | /* PMEN_REG */ |
145 | #define DMA_PMEN_EPM (((u32)1)<<31) | 154 | #define DMA_PMEN_EPM (((u32)1)<<31) |
146 | #define DMA_PMEN_PRS (((u32)1)<<0) | 155 | #define DMA_PMEN_PRS (((u32)1)<<0) |
@@ -151,6 +160,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
151 | #define DMA_GCMD_SFL (((u32)1) << 29) | 160 | #define DMA_GCMD_SFL (((u32)1) << 29) |
152 | #define DMA_GCMD_EAFL (((u32)1) << 28) | 161 | #define DMA_GCMD_EAFL (((u32)1) << 28) |
153 | #define DMA_GCMD_WBF (((u32)1) << 27) | 162 | #define DMA_GCMD_WBF (((u32)1) << 27) |
163 | #define DMA_GCMD_QIE (((u32)1) << 26) | ||
164 | #define DMA_GCMD_SIRTP (((u32)1) << 24) | ||
165 | #define DMA_GCMD_IRE (((u32) 1) << 25) | ||
154 | 166 | ||
155 | /* GSTS_REG */ | 167 | /* GSTS_REG */ |
156 | #define DMA_GSTS_TES (((u32)1) << 31) | 168 | #define DMA_GSTS_TES (((u32)1) << 31) |
@@ -158,6 +170,9 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
158 | #define DMA_GSTS_FLS (((u32)1) << 29) | 170 | #define DMA_GSTS_FLS (((u32)1) << 29) |
159 | #define DMA_GSTS_AFLS (((u32)1) << 28) | 171 | #define DMA_GSTS_AFLS (((u32)1) << 28) |
160 | #define DMA_GSTS_WBFS (((u32)1) << 27) | 172 | #define DMA_GSTS_WBFS (((u32)1) << 27) |
173 | #define DMA_GSTS_QIES (((u32)1) << 26) | ||
174 | #define DMA_GSTS_IRTPS (((u32)1) << 24) | ||
175 | #define DMA_GSTS_IRES (((u32)1) << 25) | ||
161 | 176 | ||
162 | /* CCMD_REG */ | 177 | /* CCMD_REG */ |
163 | #define DMA_CCMD_ICC (((u64)1) << 63) | 178 | #define DMA_CCMD_ICC (((u64)1) << 63) |
@@ -187,158 +202,106 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
187 | #define dma_frcd_source_id(c) (c & 0xffff) | 202 | #define dma_frcd_source_id(c) (c & 0xffff) |
188 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ | 203 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ |
189 | 204 | ||
190 | /* | 205 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ |
191 | * 0: Present | 206 | |
192 | * 1-11: Reserved | 207 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ |
193 | * 12-63: Context Ptr (12 - (haw-1)) | 208 | {\ |
194 | * 64-127: Reserved | 209 | cycles_t start_time = get_cycles();\ |
195 | */ | 210 | while (1) {\ |
196 | struct root_entry { | 211 | sts = op (iommu->reg + offset);\ |
197 | u64 val; | 212 | if (cond)\ |
198 | u64 rsvd1; | 213 | break;\ |
199 | }; | 214 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ |
200 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) | 215 | panic("DMAR hardware is malfunctioning\n");\ |
201 | static inline bool root_present(struct root_entry *root) | 216 | cpu_relax();\ |
202 | { | 217 | }\ |
203 | return (root->val & 1); | ||
204 | } | ||
205 | static inline void set_root_present(struct root_entry *root) | ||
206 | { | ||
207 | root->val |= 1; | ||
208 | } | ||
209 | static inline void set_root_value(struct root_entry *root, unsigned long value) | ||
210 | { | ||
211 | root->val |= value & PAGE_MASK_4K; | ||
212 | } | 218 | } |
213 | 219 | ||
214 | struct context_entry; | 220 | #define QI_LENGTH 256 /* queue length */ |
215 | static inline struct context_entry * | ||
216 | get_context_addr_from_root(struct root_entry *root) | ||
217 | { | ||
218 | return (struct context_entry *) | ||
219 | (root_present(root)?phys_to_virt( | ||
220 | root->val & PAGE_MASK_4K): | ||
221 | NULL); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * low 64 bits: | ||
226 | * 0: present | ||
227 | * 1: fault processing disable | ||
228 | * 2-3: translation type | ||
229 | * 12-63: address space root | ||
230 | * high 64 bits: | ||
231 | * 0-2: address width | ||
232 | * 3-6: aval | ||
233 | * 8-23: domain id | ||
234 | */ | ||
235 | struct context_entry { | ||
236 | u64 lo; | ||
237 | u64 hi; | ||
238 | }; | ||
239 | #define context_present(c) ((c).lo & 1) | ||
240 | #define context_fault_disable(c) (((c).lo >> 1) & 1) | ||
241 | #define context_translation_type(c) (((c).lo >> 2) & 3) | ||
242 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) | ||
243 | #define context_address_width(c) ((c).hi & 7) | ||
244 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) | ||
245 | |||
246 | #define context_set_present(c) do {(c).lo |= 1;} while (0) | ||
247 | #define context_set_fault_enable(c) \ | ||
248 | do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) | ||
249 | #define context_set_translation_type(c, val) \ | ||
250 | do { \ | ||
251 | (c).lo &= (((u64)-1) << 4) | 3; \ | ||
252 | (c).lo |= ((val) & 3) << 2; \ | ||
253 | } while (0) | ||
254 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
255 | #define context_set_address_root(c, val) \ | ||
256 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) | ||
257 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) | ||
258 | #define context_set_domain_id(c, val) \ | ||
259 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) | ||
260 | #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) | ||
261 | 221 | ||
262 | /* | 222 | enum { |
263 | * 0: readable | 223 | QI_FREE, |
264 | * 1: writable | 224 | QI_IN_USE, |
265 | * 2-6: reserved | 225 | QI_DONE |
266 | * 7: super page | ||
267 | * 8-11: available | ||
268 | * 12-63: Host physcial address | ||
269 | */ | ||
270 | struct dma_pte { | ||
271 | u64 val; | ||
272 | }; | 226 | }; |
273 | #define dma_clear_pte(p) do {(p).val = 0;} while (0) | ||
274 | |||
275 | #define DMA_PTE_READ (1) | ||
276 | #define DMA_PTE_WRITE (2) | ||
277 | 227 | ||
278 | #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) | 228 | #define QI_CC_TYPE 0x1 |
279 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | 229 | #define QI_IOTLB_TYPE 0x2 |
280 | #define dma_set_pte_prot(p, prot) \ | 230 | #define QI_DIOTLB_TYPE 0x3 |
281 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | 231 | #define QI_IEC_TYPE 0x4 |
282 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) | 232 | #define QI_IWD_TYPE 0x5 |
283 | #define dma_set_pte_addr(p, addr) do {\ | ||
284 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) | ||
285 | #define dma_pte_present(p) (((p).val & 3) != 0) | ||
286 | 233 | ||
287 | struct intel_iommu; | 234 | #define QI_IEC_SELECTIVE (((u64)1) << 4) |
235 | #define QI_IEC_IIDEX(idx) (((u64)(idx & 0xffff) << 32)) | ||
236 | #define QI_IEC_IM(m) (((u64)(m & 0x1f) << 27)) | ||
288 | 237 | ||
289 | struct dmar_domain { | 238 | #define QI_IWD_STATUS_DATA(d) (((u64)d) << 32) |
290 | int id; /* domain id */ | 239 | #define QI_IWD_STATUS_WRITE (((u64)1) << 5) |
291 | struct intel_iommu *iommu; /* back pointer to owning iommu */ | ||
292 | 240 | ||
293 | struct list_head devices; /* all devices' list */ | 241 | struct qi_desc { |
294 | struct iova_domain iovad; /* iova's that belong to this domain */ | 242 | u64 low, high; |
243 | }; | ||
295 | 244 | ||
296 | struct dma_pte *pgd; /* virtual address */ | 245 | struct q_inval { |
297 | spinlock_t mapping_lock; /* page table lock */ | 246 | spinlock_t q_lock; |
298 | int gaw; /* max guest address width */ | 247 | struct qi_desc *desc; /* invalidation queue */ |
248 | int *desc_status; /* desc status */ | ||
249 | int free_head; /* first free entry */ | ||
250 | int free_tail; /* last free entry */ | ||
251 | int free_cnt; | ||
252 | }; | ||
299 | 253 | ||
300 | /* adjusted guest address width, 0 is level 2 30-bit */ | 254 | #ifdef CONFIG_INTR_REMAP |
301 | int agaw; | 255 | /* 1MB - maximum possible interrupt remapping table size */ |
256 | #define INTR_REMAP_PAGE_ORDER 8 | ||
257 | #define INTR_REMAP_TABLE_REG_SIZE 0xf | ||
302 | 258 | ||
303 | #define DOMAIN_FLAG_MULTIPLE_DEVICES 1 | 259 | #define INTR_REMAP_TABLE_ENTRIES 65536 |
304 | int flags; | ||
305 | }; | ||
306 | 260 | ||
307 | /* PCI domain-device relationship */ | 261 | struct ir_table { |
308 | struct device_domain_info { | 262 | struct irte *base; |
309 | struct list_head link; /* link to domain siblings */ | ||
310 | struct list_head global; /* link to global list */ | ||
311 | u8 bus; /* PCI bus numer */ | ||
312 | u8 devfn; /* PCI devfn number */ | ||
313 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | ||
314 | struct dmar_domain *domain; /* pointer to domain */ | ||
315 | }; | 263 | }; |
316 | 264 | #endif | |
317 | extern int init_dmars(void); | ||
318 | 265 | ||
319 | struct intel_iommu { | 266 | struct intel_iommu { |
320 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 267 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
321 | u64 cap; | 268 | u64 cap; |
322 | u64 ecap; | 269 | u64 ecap; |
323 | unsigned long *domain_ids; /* bitmap of domains */ | ||
324 | struct dmar_domain **domains; /* ptr to domains */ | ||
325 | int seg; | 270 | int seg; |
326 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ | 271 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ |
327 | spinlock_t lock; /* protect context, domain ids */ | ||
328 | spinlock_t register_lock; /* protect register handling */ | 272 | spinlock_t register_lock; /* protect register handling */ |
273 | int seq_id; /* sequence id of the iommu */ | ||
274 | |||
275 | #ifdef CONFIG_DMAR | ||
276 | unsigned long *domain_ids; /* bitmap of domains */ | ||
277 | struct dmar_domain **domains; /* ptr to domains */ | ||
278 | spinlock_t lock; /* protect context, domain ids */ | ||
329 | struct root_entry *root_entry; /* virtual address */ | 279 | struct root_entry *root_entry; /* virtual address */ |
330 | 280 | ||
331 | unsigned int irq; | 281 | unsigned int irq; |
332 | unsigned char name[7]; /* Device Name */ | 282 | unsigned char name[7]; /* Device Name */ |
333 | struct msi_msg saved_msg; | 283 | struct msi_msg saved_msg; |
334 | struct sys_device sysdev; | 284 | struct sys_device sysdev; |
285 | #endif | ||
286 | struct q_inval *qi; /* Queued invalidation info */ | ||
287 | #ifdef CONFIG_INTR_REMAP | ||
288 | struct ir_table *ir_table; /* Interrupt remapping info */ | ||
289 | #endif | ||
335 | }; | 290 | }; |
336 | 291 | ||
337 | #ifndef CONFIG_DMAR_GFX_WA | 292 | static inline void __iommu_flush_cache( |
338 | static inline void iommu_prepare_gfx_mapping(void) | 293 | struct intel_iommu *iommu, void *addr, int size) |
339 | { | 294 | { |
340 | return; | 295 | if (!ecap_coherent(iommu->ecap)) |
296 | clflush_cache_range(addr, size); | ||
341 | } | 297 | } |
342 | #endif /* !CONFIG_DMAR_GFX_WA */ | ||
343 | 298 | ||
299 | extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); | ||
300 | |||
301 | extern int alloc_iommu(struct dmar_drhd_unit *drhd); | ||
302 | extern void free_iommu(struct intel_iommu *iommu); | ||
303 | extern int dmar_enable_qi(struct intel_iommu *iommu); | ||
304 | extern void qi_global_iec(struct intel_iommu *iommu); | ||
305 | |||
306 | extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); | ||
344 | #endif | 307 | #endif |