aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/iommu/iova.c12
-rw-r--r--include/linux/iova.h35
3 files changed, 49 insertions, 7 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 86f9e82b015b..ae4c1a854e57 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1635,7 +1635,8 @@ static int dmar_init_reserved_ranges(void)
1635 struct iova *iova; 1635 struct iova *iova;
1636 int i; 1636 int i;
1637 1637
1638 init_iova_domain(&reserved_iova_list, IOVA_START_PFN, DMA_32BIT_PFN); 1638 init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN,
1639 DMA_32BIT_PFN);
1639 1640
1640 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, 1641 lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
1641 &reserved_rbtree_key); 1642 &reserved_rbtree_key);
@@ -1693,7 +1694,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1693 int adjust_width, agaw; 1694 int adjust_width, agaw;
1694 unsigned long sagaw; 1695 unsigned long sagaw;
1695 1696
1696 init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN); 1697 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
1698 DMA_32BIT_PFN);
1697 domain_reserve_special_ranges(domain); 1699 domain_reserve_special_ranges(domain);
1698 1700
1699 /* calculate AGAW */ 1701 /* calculate AGAW */
@@ -4316,7 +4318,8 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
4316{ 4318{
4317 int adjust_width; 4319 int adjust_width;
4318 4320
4319 init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN); 4321 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN,
4322 DMA_32BIT_PFN);
4320 domain_reserve_special_ranges(domain); 4323 domain_reserve_special_ranges(domain);
4321 4324
4322 /* calculate AGAW */ 4325 /* calculate AGAW */
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index a3dbba8caa19..9dd8208312c2 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -55,12 +55,20 @@ void free_iova_mem(struct iova *iova)
55} 55}
56 56
57void 57void
58init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn, 58init_iova_domain(struct iova_domain *iovad, unsigned long granule,
59 unsigned long pfn_32bit) 59 unsigned long start_pfn, unsigned long pfn_32bit)
60{ 60{
61 /*
62 * IOVA granularity will normally be equal to the smallest
63 * supported IOMMU page size; both *must* be capable of
64 * representing individual CPU pages exactly.
65 */
66 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
67
61 spin_lock_init(&iovad->iova_rbtree_lock); 68 spin_lock_init(&iovad->iova_rbtree_lock);
62 iovad->rbroot = RB_ROOT; 69 iovad->rbroot = RB_ROOT;
63 iovad->cached32_node = NULL; 70 iovad->cached32_node = NULL;
71 iovad->granule = granule;
64 iovad->start_pfn = start_pfn; 72 iovad->start_pfn = start_pfn;
65 iovad->dma_32bit_pfn = pfn_32bit; 73 iovad->dma_32bit_pfn = pfn_32bit;
66} 74}
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 591b19626b46..3920a19d8194 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -28,6 +28,7 @@ struct iova_domain {
28 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ 28 spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
29 struct rb_root rbroot; /* iova domain rbtree root */ 29 struct rb_root rbroot; /* iova domain rbtree root */
30 struct rb_node *cached32_node; /* Save last alloced node */ 30 struct rb_node *cached32_node; /* Save last alloced node */
31 unsigned long granule; /* pfn granularity for this domain */
31 unsigned long start_pfn; /* Lower limit for this domain */ 32 unsigned long start_pfn; /* Lower limit for this domain */
32 unsigned long dma_32bit_pfn; 33 unsigned long dma_32bit_pfn;
33}; 34};
@@ -37,6 +38,36 @@ static inline unsigned long iova_size(struct iova *iova)
37 return iova->pfn_hi - iova->pfn_lo + 1; 38 return iova->pfn_hi - iova->pfn_lo + 1;
38} 39}
39 40
41static inline unsigned long iova_shift(struct iova_domain *iovad)
42{
43 return __ffs(iovad->granule);
44}
45
46static inline unsigned long iova_mask(struct iova_domain *iovad)
47{
48 return iovad->granule - 1;
49}
50
51static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
52{
53 return iova & iova_mask(iovad);
54}
55
56static inline size_t iova_align(struct iova_domain *iovad, size_t size)
57{
58 return ALIGN(size, iovad->granule);
59}
60
61static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
62{
63 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
64}
65
66static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
67{
68 return iova >> iova_shift(iovad);
69}
70
40int iommu_iova_cache_init(void); 71int iommu_iova_cache_init(void);
41void iommu_iova_cache_destroy(void); 72void iommu_iova_cache_destroy(void);
42 73
@@ -50,8 +81,8 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
50struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, 81struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
51 unsigned long pfn_hi); 82 unsigned long pfn_hi);
52void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); 83void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
53void init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn, 84void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
54 unsigned long pfn_32bit); 85 unsigned long start_pfn, unsigned long pfn_32bit);
55struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); 86struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
56void put_iova_domain(struct iova_domain *iovad); 87void put_iova_domain(struct iova_domain *iovad);
57struct iova *split_and_remove_iova(struct iova_domain *iovad, 88struct iova *split_and_remove_iova(struct iova_domain *iovad,