diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/device.h | 10 | ||||
| -rw-r--r-- | include/linux/dma_remapping.h | 58 | ||||
| -rw-r--r-- | include/linux/intel-iommu.h | 108 | ||||
| -rw-r--r-- | include/linux/iommu.h | 18 |
4 files changed, 122 insertions, 72 deletions
diff --git a/include/linux/device.h b/include/linux/device.h index 1b25c7a43f4c..6cb4640b6160 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -1058,6 +1058,16 @@ static inline struct device *kobj_to_dev(struct kobject *kobj) | |||
| 1058 | return container_of(kobj, struct device, kobj); | 1058 | return container_of(kobj, struct device, kobj); |
| 1059 | } | 1059 | } |
| 1060 | 1060 | ||
| 1061 | /** | ||
| 1062 | * device_iommu_mapped - Returns true when the device DMA is translated | ||
| 1063 | * by an IOMMU | ||
| 1064 | * @dev: Device to perform the check on | ||
| 1065 | */ | ||
| 1066 | static inline bool device_iommu_mapped(struct device *dev) | ||
| 1067 | { | ||
| 1068 | return (dev->iommu_group != NULL); | ||
| 1069 | } | ||
| 1070 | |||
| 1061 | /* Get the wakeup routines, which depend on struct device */ | 1071 | /* Get the wakeup routines, which depend on struct device */ |
| 1062 | #include <linux/pm_wakeup.h> | 1072 | #include <linux/pm_wakeup.h> |
| 1063 | 1073 | ||
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h deleted file mode 100644 index 21b3e7d33d68..000000000000 --- a/include/linux/dma_remapping.h +++ /dev/null | |||
| @@ -1,58 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _DMA_REMAPPING_H | ||
| 3 | #define _DMA_REMAPPING_H | ||
| 4 | |||
| 5 | /* | ||
| 6 | * VT-d hardware uses 4KiB page size regardless of host page size. | ||
| 7 | */ | ||
| 8 | #define VTD_PAGE_SHIFT (12) | ||
| 9 | #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) | ||
| 10 | #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) | ||
| 11 | #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) | ||
| 12 | |||
| 13 | #define VTD_STRIDE_SHIFT (9) | ||
| 14 | #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) | ||
| 15 | |||
| 16 | #define DMA_PTE_READ (1) | ||
| 17 | #define DMA_PTE_WRITE (2) | ||
| 18 | #define DMA_PTE_LARGE_PAGE (1 << 7) | ||
| 19 | #define DMA_PTE_SNP (1 << 11) | ||
| 20 | |||
| 21 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
| 22 | #define CONTEXT_TT_DEV_IOTLB 1 | ||
| 23 | #define CONTEXT_TT_PASS_THROUGH 2 | ||
| 24 | /* Extended context entry types */ | ||
| 25 | #define CONTEXT_TT_PT_PASID 4 | ||
| 26 | #define CONTEXT_TT_PT_PASID_DEV_IOTLB 5 | ||
| 27 | #define CONTEXT_TT_MASK (7ULL << 2) | ||
| 28 | |||
| 29 | #define CONTEXT_DINVE (1ULL << 8) | ||
| 30 | #define CONTEXT_PRS (1ULL << 9) | ||
| 31 | #define CONTEXT_PASIDE (1ULL << 11) | ||
| 32 | |||
| 33 | struct intel_iommu; | ||
| 34 | struct dmar_domain; | ||
| 35 | struct root_entry; | ||
| 36 | |||
| 37 | |||
| 38 | #ifdef CONFIG_INTEL_IOMMU | ||
| 39 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); | ||
| 40 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); | ||
| 41 | extern int dmar_disabled; | ||
| 42 | extern int intel_iommu_enabled; | ||
| 43 | extern int intel_iommu_tboot_noforce; | ||
| 44 | #else | ||
| 45 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) | ||
| 46 | { | ||
| 47 | return 0; | ||
| 48 | } | ||
| 49 | static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) | ||
| 50 | { | ||
| 51 | return 0; | ||
| 52 | } | ||
| 53 | #define dmar_disabled (1) | ||
| 54 | #define intel_iommu_enabled (0) | ||
| 55 | #endif | ||
| 56 | |||
| 57 | |||
| 58 | #endif | ||
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index b0ae25837361..0605f3bf6e79 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/iova.h> | 26 | #include <linux/iova.h> |
| 27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
| 28 | #include <linux/idr.h> | 28 | #include <linux/idr.h> |
| 29 | #include <linux/dma_remapping.h> | ||
| 30 | #include <linux/mmu_notifier.h> | 29 | #include <linux/mmu_notifier.h> |
| 31 | #include <linux/list.h> | 30 | #include <linux/list.h> |
| 32 | #include <linux/iommu.h> | 31 | #include <linux/iommu.h> |
| @@ -37,9 +36,29 @@ | |||
| 37 | #include <asm/iommu.h> | 36 | #include <asm/iommu.h> |
| 38 | 37 | ||
| 39 | /* | 38 | /* |
| 40 | * Intel IOMMU register specification per version 1.0 public spec. | 39 | * VT-d hardware uses 4KiB page size regardless of host page size. |
| 41 | */ | 40 | */ |
| 41 | #define VTD_PAGE_SHIFT (12) | ||
| 42 | #define VTD_PAGE_SIZE (1UL << VTD_PAGE_SHIFT) | ||
| 43 | #define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT) | ||
| 44 | #define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK) | ||
| 45 | |||
| 46 | #define VTD_STRIDE_SHIFT (9) | ||
| 47 | #define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT) | ||
| 48 | |||
| 49 | #define DMA_PTE_READ (1) | ||
| 50 | #define DMA_PTE_WRITE (2) | ||
| 51 | #define DMA_PTE_LARGE_PAGE (1 << 7) | ||
| 52 | #define DMA_PTE_SNP (1 << 11) | ||
| 42 | 53 | ||
| 54 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
| 55 | #define CONTEXT_TT_DEV_IOTLB 1 | ||
| 56 | #define CONTEXT_TT_PASS_THROUGH 2 | ||
| 57 | #define CONTEXT_PASIDE BIT_ULL(3) | ||
| 58 | |||
| 59 | /* | ||
| 60 | * Intel IOMMU register specification per version 1.0 public spec. | ||
| 61 | */ | ||
| 43 | #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ | 62 | #define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ |
| 44 | #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ | 63 | #define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ |
| 45 | #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ | 64 | #define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ |
| @@ -151,6 +170,10 @@ | |||
| 151 | * Extended Capability Register | 170 | * Extended Capability Register |
| 152 | */ | 171 | */ |
| 153 | 172 | ||
| 173 | #define ecap_smpwc(e) (((e) >> 48) & 0x1) | ||
| 174 | #define ecap_flts(e) (((e) >> 47) & 0x1) | ||
| 175 | #define ecap_slts(e) (((e) >> 46) & 0x1) | ||
| 176 | #define ecap_smts(e) (((e) >> 43) & 0x1) | ||
| 154 | #define ecap_dit(e) ((e >> 41) & 0x1) | 177 | #define ecap_dit(e) ((e >> 41) & 0x1) |
| 155 | #define ecap_pasid(e) ((e >> 40) & 0x1) | 178 | #define ecap_pasid(e) ((e >> 40) & 0x1) |
| 156 | #define ecap_pss(e) ((e >> 35) & 0x1f) | 179 | #define ecap_pss(e) ((e >> 35) & 0x1f) |
| @@ -229,6 +252,7 @@ | |||
| 229 | 252 | ||
| 230 | /* DMA_RTADDR_REG */ | 253 | /* DMA_RTADDR_REG */ |
| 231 | #define DMA_RTADDR_RTT (((u64)1) << 11) | 254 | #define DMA_RTADDR_RTT (((u64)1) << 11) |
| 255 | #define DMA_RTADDR_SMT (((u64)1) << 10) | ||
| 232 | 256 | ||
| 233 | /* CCMD_REG */ | 257 | /* CCMD_REG */ |
| 234 | #define DMA_CCMD_ICC (((u64)1) << 63) | 258 | #define DMA_CCMD_ICC (((u64)1) << 63) |
| @@ -374,13 +398,18 @@ enum { | |||
| 374 | #define QI_GRAN_NONG_PASID 2 | 398 | #define QI_GRAN_NONG_PASID 2 |
| 375 | #define QI_GRAN_PSI_PASID 3 | 399 | #define QI_GRAN_PSI_PASID 3 |
| 376 | 400 | ||
| 401 | #define qi_shift(iommu) (DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap)) | ||
| 402 | |||
| 377 | struct qi_desc { | 403 | struct qi_desc { |
| 378 | u64 low, high; | 404 | u64 qw0; |
| 405 | u64 qw1; | ||
| 406 | u64 qw2; | ||
| 407 | u64 qw3; | ||
| 379 | }; | 408 | }; |
| 380 | 409 | ||
| 381 | struct q_inval { | 410 | struct q_inval { |
| 382 | raw_spinlock_t q_lock; | 411 | raw_spinlock_t q_lock; |
| 383 | struct qi_desc *desc; /* invalidation queue */ | 412 | void *desc; /* invalidation queue */ |
| 384 | int *desc_status; /* desc status */ | 413 | int *desc_status; /* desc status */ |
| 385 | int free_head; /* first free entry */ | 414 | int free_head; /* first free entry */ |
| 386 | int free_tail; /* last free entry */ | 415 | int free_tail; /* last free entry */ |
| @@ -512,15 +541,8 @@ struct intel_iommu { | |||
| 512 | struct iommu_flush flush; | 541 | struct iommu_flush flush; |
| 513 | #endif | 542 | #endif |
| 514 | #ifdef CONFIG_INTEL_IOMMU_SVM | 543 | #ifdef CONFIG_INTEL_IOMMU_SVM |
| 515 | /* These are large and need to be contiguous, so we allocate just | ||
| 516 | * one for now. We'll maybe want to rethink that if we truly give | ||
| 517 | * devices away to userspace processes (e.g. for DPDK) and don't | ||
| 518 | * want to trust that userspace will use *only* the PASID it was | ||
| 519 | * told to. But while it's all driver-arbitrated, we're fine. */ | ||
| 520 | struct pasid_state_entry *pasid_state_table; | ||
| 521 | struct page_req_dsc *prq; | 544 | struct page_req_dsc *prq; |
| 522 | unsigned char prq_name[16]; /* Name for PRQ interrupt */ | 545 | unsigned char prq_name[16]; /* Name for PRQ interrupt */ |
| 523 | u32 pasid_max; | ||
| 524 | #endif | 546 | #endif |
| 525 | struct q_inval *qi; /* Queued invalidation info */ | 547 | struct q_inval *qi; /* Queued invalidation info */ |
| 526 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ | 548 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ |
| @@ -563,6 +585,49 @@ static inline void __iommu_flush_cache( | |||
| 563 | clflush_cache_range(addr, size); | 585 | clflush_cache_range(addr, size); |
| 564 | } | 586 | } |
| 565 | 587 | ||
| 588 | /* | ||
| 589 | * 0: readable | ||
| 590 | * 1: writable | ||
| 591 | * 2-6: reserved | ||
| 592 | * 7: super page | ||
| 593 | * 8-10: available | ||
| 594 | * 11: snoop behavior | ||
| 595 | * 12-63: Host physcial address | ||
| 596 | */ | ||
| 597 | struct dma_pte { | ||
| 598 | u64 val; | ||
| 599 | }; | ||
| 600 | |||
| 601 | static inline void dma_clear_pte(struct dma_pte *pte) | ||
| 602 | { | ||
| 603 | pte->val = 0; | ||
| 604 | } | ||
| 605 | |||
| 606 | static inline u64 dma_pte_addr(struct dma_pte *pte) | ||
| 607 | { | ||
| 608 | #ifdef CONFIG_64BIT | ||
| 609 | return pte->val & VTD_PAGE_MASK; | ||
| 610 | #else | ||
| 611 | /* Must have a full atomic 64-bit read */ | ||
| 612 | return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK; | ||
| 613 | #endif | ||
| 614 | } | ||
| 615 | |||
| 616 | static inline bool dma_pte_present(struct dma_pte *pte) | ||
| 617 | { | ||
| 618 | return (pte->val & 3) != 0; | ||
| 619 | } | ||
| 620 | |||
| 621 | static inline bool dma_pte_superpage(struct dma_pte *pte) | ||
| 622 | { | ||
| 623 | return (pte->val & DMA_PTE_LARGE_PAGE); | ||
| 624 | } | ||
| 625 | |||
| 626 | static inline int first_pte_in_page(struct dma_pte *pte) | ||
| 627 | { | ||
| 628 | return !((unsigned long)pte & ~VTD_PAGE_MASK); | ||
| 629 | } | ||
| 630 | |||
| 566 | extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); | 631 | extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev); |
| 567 | extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); | 632 | extern int dmar_find_matched_atsr_unit(struct pci_dev *dev); |
| 568 | 633 | ||
| @@ -587,10 +652,10 @@ void free_pgtable_page(void *vaddr); | |||
| 587 | struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); | 652 | struct intel_iommu *domain_get_iommu(struct dmar_domain *domain); |
| 588 | int for_each_device_domain(int (*fn)(struct device_domain_info *info, | 653 | int for_each_device_domain(int (*fn)(struct device_domain_info *info, |
| 589 | void *data), void *data); | 654 | void *data), void *data); |
| 655 | void iommu_flush_write_buffer(struct intel_iommu *iommu); | ||
| 590 | 656 | ||
| 591 | #ifdef CONFIG_INTEL_IOMMU_SVM | 657 | #ifdef CONFIG_INTEL_IOMMU_SVM |
| 592 | int intel_svm_init(struct intel_iommu *iommu); | 658 | int intel_svm_init(struct intel_iommu *iommu); |
| 593 | int intel_svm_exit(struct intel_iommu *iommu); | ||
| 594 | extern int intel_svm_enable_prq(struct intel_iommu *iommu); | 659 | extern int intel_svm_enable_prq(struct intel_iommu *iommu); |
| 595 | extern int intel_svm_finish_prq(struct intel_iommu *iommu); | 660 | extern int intel_svm_finish_prq(struct intel_iommu *iommu); |
| 596 | 661 | ||
| @@ -632,4 +697,23 @@ bool context_present(struct context_entry *context); | |||
| 632 | struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, | 697 | struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, |
| 633 | u8 devfn, int alloc); | 698 | u8 devfn, int alloc); |
| 634 | 699 | ||
| 700 | #ifdef CONFIG_INTEL_IOMMU | ||
| 701 | extern int iommu_calculate_agaw(struct intel_iommu *iommu); | ||
| 702 | extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu); | ||
| 703 | extern int dmar_disabled; | ||
| 704 | extern int intel_iommu_enabled; | ||
| 705 | extern int intel_iommu_tboot_noforce; | ||
| 706 | #else | ||
| 707 | static inline int iommu_calculate_agaw(struct intel_iommu *iommu) | ||
| 708 | { | ||
| 709 | return 0; | ||
| 710 | } | ||
| 711 | static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu) | ||
| 712 | { | ||
| 713 | return 0; | ||
| 714 | } | ||
| 715 | #define dmar_disabled (1) | ||
| 716 | #define intel_iommu_enabled (0) | ||
| 717 | #endif | ||
| 718 | |||
| 635 | #endif | 719 | #endif |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a1d28f42cb77..e90da6b6f3d1 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -168,8 +168,8 @@ struct iommu_resv_region { | |||
| 168 | * @map: map a physically contiguous memory region to an iommu domain | 168 | * @map: map a physically contiguous memory region to an iommu domain |
| 169 | * @unmap: unmap a physically contiguous memory region from an iommu domain | 169 | * @unmap: unmap a physically contiguous memory region from an iommu domain |
| 170 | * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain | 170 | * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain |
| 171 | * @tlb_range_add: Add a given iova range to the flush queue for this domain | 171 | * @iotlb_range_add: Add a given iova range to the flush queue for this domain |
| 172 | * @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush | 172 | * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush |
| 173 | * queue | 173 | * queue |
| 174 | * @iova_to_phys: translate iova to physical address | 174 | * @iova_to_phys: translate iova to physical address |
| 175 | * @add_device: add device to iommu grouping | 175 | * @add_device: add device to iommu grouping |
| @@ -398,6 +398,20 @@ void iommu_fwspec_free(struct device *dev); | |||
| 398 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); | 398 | int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids); |
| 399 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); | 399 | const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode); |
| 400 | 400 | ||
| 401 | static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev) | ||
| 402 | { | ||
| 403 | return dev->iommu_fwspec; | ||
| 404 | } | ||
| 405 | |||
| 406 | static inline void dev_iommu_fwspec_set(struct device *dev, | ||
| 407 | struct iommu_fwspec *fwspec) | ||
| 408 | { | ||
| 409 | dev->iommu_fwspec = fwspec; | ||
| 410 | } | ||
| 411 | |||
| 412 | int iommu_probe_device(struct device *dev); | ||
| 413 | void iommu_release_device(struct device *dev); | ||
| 414 | |||
| 401 | #else /* CONFIG_IOMMU_API */ | 415 | #else /* CONFIG_IOMMU_API */ |
| 402 | 416 | ||
| 403 | struct iommu_ops {}; | 417 | struct iommu_ops {}; |
