aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iova.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2015-01-12 12:51:15 -0500
committerJoerg Roedel <jroedel@suse.de>2015-01-19 08:55:22 -0500
commit1b72250076dde4276acecf3a7da722b185703e78 (patch)
tree86711d7e91dfb6ad914c23a18e8a12f5f30169ee /drivers/iommu/iova.c
parent85b4545629663486b7f71047ce3b54fa0ad3eb28 (diff)
iommu: Make IOVA domain low limit flexible
To share the IOVA allocator with other architectures, it needs to accommodate more general aperture restrictions; move the lower limit from a compile-time constant to a runtime domain property to allow IOVA domains with different requirements to co-exist. Also reword the slightly unclear description of alloc_iova since we're touching it anyway. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r--drivers/iommu/iova.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 520b8c8ae0c4..a3dbba8caa19 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -55,11 +55,13 @@ void free_iova_mem(struct iova *iova)
55} 55}
56 56
57void 57void
58init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 58init_iova_domain(struct iova_domain *iovad, unsigned long start_pfn,
59 unsigned long pfn_32bit)
59{ 60{
60 spin_lock_init(&iovad->iova_rbtree_lock); 61 spin_lock_init(&iovad->iova_rbtree_lock);
61 iovad->rbroot = RB_ROOT; 62 iovad->rbroot = RB_ROOT;
62 iovad->cached32_node = NULL; 63 iovad->cached32_node = NULL;
64 iovad->start_pfn = start_pfn;
63 iovad->dma_32bit_pfn = pfn_32bit; 65 iovad->dma_32bit_pfn = pfn_32bit;
64} 66}
65 67
@@ -162,7 +164,7 @@ move_left:
162 if (!curr) { 164 if (!curr) {
163 if (size_aligned) 165 if (size_aligned)
164 pad_size = iova_get_pad_size(size, limit_pfn); 166 pad_size = iova_get_pad_size(size, limit_pfn);
165 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { 167 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
166 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 168 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
167 return -ENOMEM; 169 return -ENOMEM;
168 } 170 }
@@ -237,8 +239,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
237 * @size: - size of page frames to allocate 239 * @size: - size of page frames to allocate
238 * @limit_pfn: - max limit address 240 * @limit_pfn: - max limit address
239 * @size_aligned: - set if size_aligned address range is required 241 * @size_aligned: - set if size_aligned address range is required
240 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN 242 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
241 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned 243 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
242 * flag is set then the allocated address iova->pfn_lo will be naturally 244 * flag is set then the allocated address iova->pfn_lo will be naturally
243 * aligned on roundup_power_of_two(size). 245 * aligned on roundup_power_of_two(size).
244 */ 246 */