aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 14:08:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 14:08:21 -0500
commit1c8106528aa6bf16b3f457de80df1cf7462a49a4 (patch)
tree4aed009c4a36195fd14c9f8d70fe2723a49583da /drivers/iommu/intel-iommu.c
parent1a464cbb3d483f2f195b614cffa4aa1b910a0440 (diff)
parentf93ea733878733f3e98475bc3e2ccf789bebcfb8 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (53 commits) iommu/amd: Set IOTLB invalidation timeout iommu/amd: Init stats for iommu=pt iommu/amd: Remove unnecessary cache flushes in amd_iommu_resume iommu/amd: Add invalidate-context call-back iommu/amd: Add amd_iommu_device_info() function iommu/amd: Adapt IOMMU driver to PCI register name changes iommu/amd: Add invalid_ppr callback iommu/amd: Implement notifiers for IOMMUv2 iommu/amd: Implement IO page-fault handler iommu/amd: Add routines to bind/unbind a pasid iommu/amd: Implement device aquisition code for IOMMUv2 iommu/amd: Add driver stub for AMD IOMMUv2 support iommu/amd: Add stat counter for IOMMUv2 events iommu/amd: Add device errata handling iommu/amd: Add function to get IOMMUv2 domain for pdev iommu/amd: Implement function to send PPR completions iommu/amd: Implement functions to manage GCR3 table iommu/amd: Implement IOMMUv2 TLB flushing routines iommu/amd: Add support for IOMMUv2 domain mode iommu/amd: Add amd_iommu_domain_direct_map function ...
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c79
1 files changed, 72 insertions, 7 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 31053a951c34..c9c6053198d4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -79,6 +79,24 @@
79#define LEVEL_STRIDE (9) 79#define LEVEL_STRIDE (9)
80#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) 80#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
81 81
82/*
83 * This bitmap is used to advertise the page sizes our hardware support
84 * to the IOMMU core, which will then use this information to split
85 * physically contiguous memory regions it is mapping into page sizes
86 * that we support.
87 *
88 * Traditionally the IOMMU core just handed us the mappings directly,
89 * after making sure the size is an order of a 4KiB page and that the
90 * mapping has natural alignment.
91 *
92 * To retain this behavior, we currently advertise that we support
93 * all page sizes that are an order of 4KiB.
94 *
95 * If at some point we'd like to utilize the IOMMU core's new behavior,
96 * we could change this to advertise the real page sizes we support.
97 */
98#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
99
82static inline int agaw_to_level(int agaw) 100static inline int agaw_to_level(int agaw)
83{ 101{
84 return agaw + 2; 102 return agaw + 2;
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3979 3997
3980static int intel_iommu_map(struct iommu_domain *domain, 3998static int intel_iommu_map(struct iommu_domain *domain,
3981 unsigned long iova, phys_addr_t hpa, 3999 unsigned long iova, phys_addr_t hpa,
3982 int gfp_order, int iommu_prot) 4000 size_t size, int iommu_prot)
3983{ 4001{
3984 struct dmar_domain *dmar_domain = domain->priv; 4002 struct dmar_domain *dmar_domain = domain->priv;
3985 u64 max_addr; 4003 u64 max_addr;
3986 int prot = 0; 4004 int prot = 0;
3987 size_t size;
3988 int ret; 4005 int ret;
3989 4006
3990 if (iommu_prot & IOMMU_READ) 4007 if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
3994 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 4011 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 prot |= DMA_PTE_SNP; 4012 prot |= DMA_PTE_SNP;
3996 4013
3997 size = PAGE_SIZE << gfp_order;
3998 max_addr = iova + size; 4014 max_addr = iova + size;
3999 if (dmar_domain->max_addr < max_addr) { 4015 if (dmar_domain->max_addr < max_addr) {
4000 u64 end; 4016 u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
4017 return ret; 4033 return ret;
4018} 4034}
4019 4035
4020static int intel_iommu_unmap(struct iommu_domain *domain, 4036static size_t intel_iommu_unmap(struct iommu_domain *domain,
4021 unsigned long iova, int gfp_order) 4037 unsigned long iova, size_t size)
4022{ 4038{
4023 struct dmar_domain *dmar_domain = domain->priv; 4039 struct dmar_domain *dmar_domain = domain->priv;
4024 size_t size = PAGE_SIZE << gfp_order;
4025 int order; 4040 int order;
4026 4041
4027 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 4042 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
4030 if (dmar_domain->max_addr == iova + size) 4045 if (dmar_domain->max_addr == iova + size)
4031 dmar_domain->max_addr = iova; 4046 dmar_domain->max_addr = iova;
4032 4047
4033 return order; 4048 return PAGE_SIZE << order;
4034} 4049}
4035 4050
4036static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 4051static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4060,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
4060 return 0; 4075 return 0;
4061} 4076}
4062 4077
4078/*
4079 * Group numbers are arbitrary. Device with the same group number
4080 * indicate the iommu cannot differentiate between them. To avoid
4081 * tracking used groups we just use the seg|bus|devfn of the lowest
4082 * level we're able to differentiate devices
4083 */
4084static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
4085{
4086 struct pci_dev *pdev = to_pci_dev(dev);
4087 struct pci_dev *bridge;
4088 union {
4089 struct {
4090 u8 devfn;
4091 u8 bus;
4092 u16 segment;
4093 } pci;
4094 u32 group;
4095 } id;
4096
4097 if (iommu_no_mapping(dev))
4098 return -ENODEV;
4099
4100 id.pci.segment = pci_domain_nr(pdev->bus);
4101 id.pci.bus = pdev->bus->number;
4102 id.pci.devfn = pdev->devfn;
4103
4104 if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
4105 return -ENODEV;
4106
4107 bridge = pci_find_upstream_pcie_bridge(pdev);
4108 if (bridge) {
4109 if (pci_is_pcie(bridge)) {
4110 id.pci.bus = bridge->subordinate->number;
4111 id.pci.devfn = 0;
4112 } else {
4113 id.pci.bus = bridge->bus->number;
4114 id.pci.devfn = bridge->devfn;
4115 }
4116 }
4117
4118 if (!pdev->is_virtfn && iommu_group_mf)
4119 id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
4120
4121 *groupid = id.group;
4122
4123 return 0;
4124}
4125
4063static struct iommu_ops intel_iommu_ops = { 4126static struct iommu_ops intel_iommu_ops = {
4064 .domain_init = intel_iommu_domain_init, 4127 .domain_init = intel_iommu_domain_init,
4065 .domain_destroy = intel_iommu_domain_destroy, 4128 .domain_destroy = intel_iommu_domain_destroy,
@@ -4069,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
4069 .unmap = intel_iommu_unmap, 4132 .unmap = intel_iommu_unmap,
4070 .iova_to_phys = intel_iommu_iova_to_phys, 4133 .iova_to_phys = intel_iommu_iova_to_phys,
4071 .domain_has_cap = intel_iommu_domain_has_cap, 4134 .domain_has_cap = intel_iommu_domain_has_cap,
4135 .device_group = intel_iommu_device_group,
4136 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4072}; 4137};
4073 4138
4074static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) 4139static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)