aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c32
1 files changed, 24 insertions, 8 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2a46b1d7a601..a7cbcd46af9e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -44,6 +44,24 @@
44 44
45#define LOOP_TIMEOUT 100000 45#define LOOP_TIMEOUT 100000
46 46
47/*
48 * This bitmap is used to advertise the page sizes our hardware support
49 * to the IOMMU core, which will then use this information to split
50 * physically contiguous memory regions it is mapping into page sizes
51 * that we support.
52 *
53 * Traditionally the IOMMU core just handed us the mappings directly,
54 * after making sure the size is an order of a 4KiB page and that the
55 * mapping has natural alignment.
56 *
57 * To retain this behavior, we currently advertise that we support
58 * all page sizes that are an order of 4KiB.
59 *
60 * If at some point we'd like to utilize the IOMMU core's new behavior,
61 * we could change this to advertise the real page sizes we support.
62 */
63#define AMD_IOMMU_PGSIZES (~0xFFFUL)
64
47static DEFINE_RWLOCK(amd_iommu_devtable_lock); 65static DEFINE_RWLOCK(amd_iommu_devtable_lock);
48 66
49/* A list of preallocated protection domains */ 67/* A list of preallocated protection domains */
@@ -3093,9 +3111,8 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
3093} 3111}
3094 3112
3095static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, 3113static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3096 phys_addr_t paddr, int gfp_order, int iommu_prot) 3114 phys_addr_t paddr, size_t page_size, int iommu_prot)
3097{ 3115{
3098 unsigned long page_size = 0x1000UL << gfp_order;
3099 struct protection_domain *domain = dom->priv; 3116 struct protection_domain *domain = dom->priv;
3100 int prot = 0; 3117 int prot = 0;
3101 int ret; 3118 int ret;
@@ -3115,24 +3132,22 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
3115 return ret; 3132 return ret;
3116} 3133}
3117 3134
3118static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, 3135static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
3119 int gfp_order) 3136 size_t page_size)
3120{ 3137{
3121 struct protection_domain *domain = dom->priv; 3138 struct protection_domain *domain = dom->priv;
3122 unsigned long page_size, unmap_size; 3139 size_t unmap_size;
3123 3140
3124 if (domain->mode == PAGE_MODE_NONE) 3141 if (domain->mode == PAGE_MODE_NONE)
3125 return -EINVAL; 3142 return -EINVAL;
3126 3143
3127 page_size = 0x1000UL << gfp_order;
3128
3129 mutex_lock(&domain->api_lock); 3144 mutex_lock(&domain->api_lock);
3130 unmap_size = iommu_unmap_page(domain, iova, page_size); 3145 unmap_size = iommu_unmap_page(domain, iova, page_size);
3131 mutex_unlock(&domain->api_lock); 3146 mutex_unlock(&domain->api_lock);
3132 3147
3133 domain_flush_tlb_pde(domain); 3148 domain_flush_tlb_pde(domain);
3134 3149
3135 return get_order(unmap_size); 3150 return unmap_size;
3136} 3151}
3137 3152
3138static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 3153static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -3182,6 +3197,7 @@ static struct iommu_ops amd_iommu_ops = {
3182 .unmap = amd_iommu_unmap, 3197 .unmap = amd_iommu_unmap,
3183 .iova_to_phys = amd_iommu_iova_to_phys, 3198 .iova_to_phys = amd_iommu_iova_to_phys,
3184 .domain_has_cap = amd_iommu_domain_has_cap, 3199 .domain_has_cap = amd_iommu_domain_has_cap,
3200 .pgsize_bitmap = AMD_IOMMU_PGSIZES,
3185}; 3201};
3186 3202
3187/***************************************************************************** 3203/*****************************************************************************