diff options
author | Robin Murphy <robin.murphy@arm.com> | 2015-01-12 12:51:15 -0500 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-01-19 08:55:22 -0500 |
commit | 1b72250076dde4276acecf3a7da722b185703e78 (patch) | |
tree | 86711d7e91dfb6ad914c23a18e8a12f5f30169ee /drivers/iommu | |
parent | 85b4545629663486b7f71047ce3b54fa0ad3eb28 (diff) |
iommu: Make IOVA domain low limit flexible
To share the IOVA allocator with other architectures, it needs to
accommodate more general aperture restrictions; move the lower limit
from a compile-time constant to a runtime domain property to allow
IOVA domains with different requirements to co-exist.
Also reword the slightly unclear description of alloc_iova since we're
touching it anyway.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 9 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 10 |
2 files changed, 12 insertions, 7 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index e758d8ed8fb5..86f9e82b015b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -71,6 +71,9 @@ | |||
71 | __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) | 71 | __DOMAIN_MAX_PFN(gaw), (unsigned long)-1)) |
72 | #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) | 72 | #define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT) |
73 | 73 | ||
74 | /* IO virtual address start page frame number */ | ||
75 | #define IOVA_START_PFN (1) | ||
76 | |||
74 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) | 77 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) |
75 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) | 78 | #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) |
76 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) | 79 | #define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64)) |
@@ -1632,7 +1635,7 @@ static int dmar_init_reserved_ranges(void) | |||
1632 | struct iova *iova; | 1635 | struct iova *iova; |
1633 | int i; | 1636 | int i; |
1634 | 1637 | ||
1635 | init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); | 1638 | init_iova_domain(&reserved_iova_list, IOVA_START_PFN, DMA_32BIT_PFN); |
1636 | 1639 | ||
1637 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, | 1640 | lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, |
1638 | &reserved_rbtree_key); | 1641 | &reserved_rbtree_key); |
@@ -1690,7 +1693,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1690 | int adjust_width, agaw; | 1693 | int adjust_width, agaw; |
1691 | unsigned long sagaw; | 1694 | unsigned long sagaw; |
1692 | 1695 | ||
1693 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 1696 | init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN); |
1694 | domain_reserve_special_ranges(domain); | 1697 | domain_reserve_special_ranges(domain); |
1695 | 1698 | ||
1696 | /* calculate AGAW */ | 1699 | /* calculate AGAW */ |
@@ -4313,7 +4316,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) | |||
4313 | { | 4316 | { |
4314 | int adjust_width; | 4317 | int adjust_width; |
4315 | 4318 | ||
4316 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 4319 | init_iova_domain(&domain->iovad, IOVA_START_PFN, DMA_32BIT_PFN); |
4317 | domain_reserve_special_ranges(domain); | 4320 | domain_reserve_special_ranges(domain); |
4318 | 4321 | ||
4319 | /* calculate AGAW */ | 4322 | /* calculate AGAW */ |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 520b8c8ae0c4..a3dbba8caa19 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -55,11 +55,13 @@ void free_iova_mem(struct iova *iova) | |||
55 | } | 55 | } |
56 | 56 | ||
57 | void | 57 | void |
58 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) | 58 | init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn, |
59 | unsigned long pfn_32bit) | ||
59 | { | 60 | { |
60 | spin_lock_init(&iovad->iova_rbtree_lock); | 61 | spin_lock_init(&iovad->iova_rbtree_lock); |
61 | iovad->rbroot = RB_ROOT; | 62 | iovad->rbroot = RB_ROOT; |
62 | iovad->cached32_node = NULL; | 63 | iovad->cached32_node = NULL; |
64 | iovad->start_pfn = start_pfn; | ||
63 | iovad->dma_32bit_pfn = pfn_32bit; | 65 | iovad->dma_32bit_pfn = pfn_32bit; |
64 | } | 66 | } |
65 | 67 | ||
@@ -162,7 +164,7 @@ move_left: | |||
162 | if (!curr) { | 164 | if (!curr) { |
163 | if (size_aligned) | 165 | if (size_aligned) |
164 | pad_size = iova_get_pad_size(size, limit_pfn); | 166 | pad_size = iova_get_pad_size(size, limit_pfn); |
165 | if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { | 167 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
166 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 168 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
167 | return -ENOMEM; | 169 | return -ENOMEM; |
168 | } | 170 | } |
@@ -237,8 +239,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
237 | * @size: - size of page frames to allocate | 239 | * @size: - size of page frames to allocate |
238 | * @limit_pfn: - max limit address | 240 | * @limit_pfn: - max limit address |
239 | * @size_aligned: - set if size_aligned address range is required | 241 | * @size_aligned: - set if size_aligned address range is required |
240 | * This function allocates an iova in the range limit_pfn to IOVA_START_PFN | 242 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
241 | * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned | 243 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned |
242 | * flag is set then the allocated address iova->pfn_lo will be naturally | 244 | * flag is set then the allocated address iova->pfn_lo will be naturally |
243 | * aligned on roundup_power_of_two(size). | 245 | * aligned on roundup_power_of_two(size). |
244 | */ | 246 | */ |