diff options
Diffstat (limited to 'drivers/pci/intel-iommu.h')
-rw-r--r-- | drivers/pci/intel-iommu.h | 163 |
1 files changed, 12 insertions, 151 deletions
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h index afc0ad96122..9e5e98c76c0 100644 --- a/drivers/pci/intel-iommu.h +++ b/drivers/pci/intel-iommu.h | |||
@@ -27,19 +27,7 @@ | |||
27 | #include <linux/sysdev.h> | 27 | #include <linux/sysdev.h> |
28 | #include "iova.h" | 28 | #include "iova.h" |
29 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | 30 | #include "dma_remapping.h" | |
31 | /* | ||
32 | * We need a fixed PAGE_SIZE of 4K irrespective of | ||
33 | * arch PAGE_SIZE for IOMMU page tables. | ||
34 | */ | ||
35 | #define PAGE_SHIFT_4K (12) | ||
36 | #define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) | ||
37 | #define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) | ||
38 | #define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) | ||
39 | |||
40 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) | ||
41 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | ||
42 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | ||
43 | 31 | ||
44 | /* | 32 | /* |
45 | * Intel IOMMU register specification per version 1.0 public spec. | 33 | * Intel IOMMU register specification per version 1.0 public spec. |
@@ -187,158 +175,31 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
187 | #define dma_frcd_source_id(c) (c & 0xffff) | 175 | #define dma_frcd_source_id(c) (c & 0xffff) |
188 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ | 176 | #define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ |
189 | 177 | ||
190 | /* | ||
191 | * 0: Present | ||
192 | * 1-11: Reserved | ||
193 | * 12-63: Context Ptr (12 - (haw-1)) | ||
194 | * 64-127: Reserved | ||
195 | */ | ||
196 | struct root_entry { | ||
197 | u64 val; | ||
198 | u64 rsvd1; | ||
199 | }; | ||
200 | #define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) | ||
201 | static inline bool root_present(struct root_entry *root) | ||
202 | { | ||
203 | return (root->val & 1); | ||
204 | } | ||
205 | static inline void set_root_present(struct root_entry *root) | ||
206 | { | ||
207 | root->val |= 1; | ||
208 | } | ||
209 | static inline void set_root_value(struct root_entry *root, unsigned long value) | ||
210 | { | ||
211 | root->val |= value & PAGE_MASK_4K; | ||
212 | } | ||
213 | |||
214 | struct context_entry; | ||
215 | static inline struct context_entry * | ||
216 | get_context_addr_from_root(struct root_entry *root) | ||
217 | { | ||
218 | return (struct context_entry *) | ||
219 | (root_present(root)?phys_to_virt( | ||
220 | root->val & PAGE_MASK_4K): | ||
221 | NULL); | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * low 64 bits: | ||
226 | * 0: present | ||
227 | * 1: fault processing disable | ||
228 | * 2-3: translation type | ||
229 | * 12-63: address space root | ||
230 | * high 64 bits: | ||
231 | * 0-2: address width | ||
232 | * 3-6: aval | ||
233 | * 8-23: domain id | ||
234 | */ | ||
235 | struct context_entry { | ||
236 | u64 lo; | ||
237 | u64 hi; | ||
238 | }; | ||
239 | #define context_present(c) ((c).lo & 1) | ||
240 | #define context_fault_disable(c) (((c).lo >> 1) & 1) | ||
241 | #define context_translation_type(c) (((c).lo >> 2) & 3) | ||
242 | #define context_address_root(c) ((c).lo & PAGE_MASK_4K) | ||
243 | #define context_address_width(c) ((c).hi & 7) | ||
244 | #define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) | ||
245 | |||
246 | #define context_set_present(c) do {(c).lo |= 1;} while (0) | ||
247 | #define context_set_fault_enable(c) \ | ||
248 | do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) | ||
249 | #define context_set_translation_type(c, val) \ | ||
250 | do { \ | ||
251 | (c).lo &= (((u64)-1) << 4) | 3; \ | ||
252 | (c).lo |= ((val) & 3) << 2; \ | ||
253 | } while (0) | ||
254 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
255 | #define context_set_address_root(c, val) \ | ||
256 | do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) | ||
257 | #define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) | ||
258 | #define context_set_domain_id(c, val) \ | ||
259 | do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) | ||
260 | #define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) | ||
261 | |||
262 | /* | ||
263 | * 0: readable | ||
264 | * 1: writable | ||
265 | * 2-6: reserved | ||
266 | * 7: super page | ||
267 | * 8-11: available | ||
268 | * 12-63: Host physcial address | ||
269 | */ | ||
270 | struct dma_pte { | ||
271 | u64 val; | ||
272 | }; | ||
273 | #define dma_clear_pte(p) do {(p).val = 0;} while (0) | ||
274 | |||
275 | #define DMA_PTE_READ (1) | ||
276 | #define DMA_PTE_WRITE (2) | ||
277 | |||
278 | #define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) | ||
279 | #define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) | ||
280 | #define dma_set_pte_prot(p, prot) \ | ||
281 | do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) | ||
282 | #define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) | ||
283 | #define dma_set_pte_addr(p, addr) do {\ | ||
284 | (p).val |= ((addr) & PAGE_MASK_4K); } while (0) | ||
285 | #define dma_pte_present(p) (((p).val & 3) != 0) | ||
286 | |||
287 | struct intel_iommu; | ||
288 | |||
289 | struct dmar_domain { | ||
290 | int id; /* domain id */ | ||
291 | struct intel_iommu *iommu; /* back pointer to owning iommu */ | ||
292 | |||
293 | struct list_head devices; /* all devices' list */ | ||
294 | struct iova_domain iovad; /* iova's that belong to this domain */ | ||
295 | |||
296 | struct dma_pte *pgd; /* virtual address */ | ||
297 | spinlock_t mapping_lock; /* page table lock */ | ||
298 | int gaw; /* max guest address width */ | ||
299 | |||
300 | /* adjusted guest address width, 0 is level 2 30-bit */ | ||
301 | int agaw; | ||
302 | |||
303 | #define DOMAIN_FLAG_MULTIPLE_DEVICES 1 | ||
304 | int flags; | ||
305 | }; | ||
306 | |||
307 | /* PCI domain-device relationship */ | ||
308 | struct device_domain_info { | ||
309 | struct list_head link; /* link to domain siblings */ | ||
310 | struct list_head global; /* link to global list */ | ||
311 | u8 bus; /* PCI bus numer */ | ||
312 | u8 devfn; /* PCI devfn number */ | ||
313 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | ||
314 | struct dmar_domain *domain; /* pointer to domain */ | ||
315 | }; | ||
316 | |||
317 | extern int init_dmars(void); | ||
318 | |||
319 | struct intel_iommu { | 178 | struct intel_iommu { |
320 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ | 179 | void __iomem *reg; /* Pointer to hardware regs, virtual addr */ |
321 | u64 cap; | 180 | u64 cap; |
322 | u64 ecap; | 181 | u64 ecap; |
323 | unsigned long *domain_ids; /* bitmap of domains */ | ||
324 | struct dmar_domain **domains; /* ptr to domains */ | ||
325 | int seg; | 182 | int seg; |
326 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ | 183 | u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ |
327 | spinlock_t lock; /* protect context, domain ids */ | ||
328 | spinlock_t register_lock; /* protect register handling */ | 184 | spinlock_t register_lock; /* protect register handling */ |
185 | |||
186 | #ifdef CONFIG_DMAR | ||
187 | unsigned long *domain_ids; /* bitmap of domains */ | ||
188 | struct dmar_domain **domains; /* ptr to domains */ | ||
189 | spinlock_t lock; /* protect context, domain ids */ | ||
329 | struct root_entry *root_entry; /* virtual address */ | 190 | struct root_entry *root_entry; /* virtual address */ |
330 | 191 | ||
331 | unsigned int irq; | 192 | unsigned int irq; |
332 | unsigned char name[7]; /* Device Name */ | 193 | unsigned char name[7]; /* Device Name */ |
333 | struct msi_msg saved_msg; | 194 | struct msi_msg saved_msg; |
334 | struct sys_device sysdev; | 195 | struct sys_device sysdev; |
196 | #endif | ||
335 | }; | 197 | }; |
336 | 198 | ||
337 | #ifndef CONFIG_DMAR_GFX_WA | 199 | extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); |
338 | static inline void iommu_prepare_gfx_mapping(void) | 200 | |
339 | { | 201 | extern struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, |
340 | return; | 202 | struct dmar_drhd_unit *drhd); |
341 | } | 203 | extern void free_iommu(struct intel_iommu *iommu); |
342 | #endif /* !CONFIG_DMAR_GFX_WA */ | ||
343 | 204 | ||
344 | #endif | 205 | #endif |