aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-12-14 06:41:31 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2011-12-14 06:52:09 -0500
commita06ec394c9318e2ee9209ca3c106d3fa6fbfeb00 (patch)
treef94f71b5a542b42ecea60e6f903e3f19e5b2eada /drivers/iommu/intel-iommu.c
parent175d6146738b3d04e1adcaa4a971a3b2b0dbd8af (diff)
parent6c274d1cd5b3aa0834e9f0c3f58038f42278ff8c (diff)
Merge branch 'iommu/page-sizes' into x86/amd
Conflicts: drivers/iommu/amd_iommu.c
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c30
1 files changed, 23 insertions, 7 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a004c3945c67..c181883c2f9a 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -78,6 +78,24 @@
78#define LEVEL_STRIDE (9) 78#define LEVEL_STRIDE (9)
79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) 79#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
80 80
81/*
82 * This bitmap is used to advertise the page sizes our hardware support
83 * to the IOMMU core, which will then use this information to split
84 * physically contiguous memory regions it is mapping into page sizes
85 * that we support.
86 *
87 * Traditionally the IOMMU core just handed us the mappings directly,
88 * after making sure the size is an order of a 4KiB page and that the
89 * mapping has natural alignment.
90 *
91 * To retain this behavior, we currently advertise that we support
92 * all page sizes that are an order of 4KiB.
93 *
94 * If at some point we'd like to utilize the IOMMU core's new behavior,
95 * we could change this to advertise the real page sizes we support.
96 */
97#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
98
81static inline int agaw_to_level(int agaw) 99static inline int agaw_to_level(int agaw)
82{ 100{
83 return agaw + 2; 101 return agaw + 2;
@@ -3979,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
3979 3997
3980static int intel_iommu_map(struct iommu_domain *domain, 3998static int intel_iommu_map(struct iommu_domain *domain,
3981 unsigned long iova, phys_addr_t hpa, 3999 unsigned long iova, phys_addr_t hpa,
3982 int gfp_order, int iommu_prot) 4000 size_t size, int iommu_prot)
3983{ 4001{
3984 struct dmar_domain *dmar_domain = domain->priv; 4002 struct dmar_domain *dmar_domain = domain->priv;
3985 u64 max_addr; 4003 u64 max_addr;
3986 int prot = 0; 4004 int prot = 0;
3987 size_t size;
3988 int ret; 4005 int ret;
3989 4006
3990 if (iommu_prot & IOMMU_READ) 4007 if (iommu_prot & IOMMU_READ)
@@ -3994,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
3994 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) 4011 if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
3995 prot |= DMA_PTE_SNP; 4012 prot |= DMA_PTE_SNP;
3996 4013
3997 size = PAGE_SIZE << gfp_order;
3998 max_addr = iova + size; 4014 max_addr = iova + size;
3999 if (dmar_domain->max_addr < max_addr) { 4015 if (dmar_domain->max_addr < max_addr) {
4000 u64 end; 4016 u64 end;
@@ -4017,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
4017 return ret; 4033 return ret;
4018} 4034}
4019 4035
4020static int intel_iommu_unmap(struct iommu_domain *domain, 4036static size_t intel_iommu_unmap(struct iommu_domain *domain,
4021 unsigned long iova, int gfp_order) 4037 unsigned long iova, size_t size)
4022{ 4038{
4023 struct dmar_domain *dmar_domain = domain->priv; 4039 struct dmar_domain *dmar_domain = domain->priv;
4024 size_t size = PAGE_SIZE << gfp_order;
4025 int order; 4040 int order;
4026 4041
4027 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, 4042 order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4030,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
4030 if (dmar_domain->max_addr == iova + size) 4045 if (dmar_domain->max_addr == iova + size)
4031 dmar_domain->max_addr = iova; 4046 dmar_domain->max_addr = iova;
4032 4047
4033 return order; 4048 return PAGE_SIZE << order;
4034} 4049}
4035 4050
4036static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, 4051static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4069,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = {
4069 .unmap = intel_iommu_unmap, 4084 .unmap = intel_iommu_unmap,
4070 .iova_to_phys = intel_iommu_iova_to_phys, 4085 .iova_to_phys = intel_iommu_iova_to_phys,
4071 .domain_has_cap = intel_iommu_domain_has_cap, 4086 .domain_has_cap = intel_iommu_domain_has_cap,
4087 .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
4072}; 4088};
4073 4089
4074static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) 4090static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)