diff options
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r-- | drivers/iommu/iova.c | 53 |
1 files changed, 49 insertions, 4 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index f6b17e6af2fb..9dd8208312c2 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -18,13 +18,58 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/iova.h> | 20 | #include <linux/iova.h> |
21 | #include <linux/slab.h> | ||
22 | |||
23 | static struct kmem_cache *iommu_iova_cache; | ||
24 | |||
25 | int iommu_iova_cache_init(void) | ||
26 | { | ||
27 | int ret = 0; | ||
28 | |||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | ||
30 | sizeof(struct iova), | ||
31 | 0, | ||
32 | SLAB_HWCACHE_ALIGN, | ||
33 | NULL); | ||
34 | if (!iommu_iova_cache) { | ||
35 | pr_err("Couldn't create iova cache\n"); | ||
36 | ret = -ENOMEM; | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | void iommu_iova_cache_destroy(void) | ||
43 | { | ||
44 | kmem_cache_destroy(iommu_iova_cache); | ||
45 | } | ||
46 | |||
47 | struct iova *alloc_iova_mem(void) | ||
48 | { | ||
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | ||
50 | } | ||
51 | |||
52 | void free_iova_mem(struct iova *iova) | ||
53 | { | ||
54 | kmem_cache_free(iommu_iova_cache, iova); | ||
55 | } | ||
21 | 56 | ||
22 | void | 57 | void |
23 | init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) | 58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | ||
24 | { | 60 | { |
61 | /* | ||
62 | * IOVA granularity will normally be equal to the smallest | ||
63 | * supported IOMMU page size; both *must* be capable of | ||
64 | * representing individual CPU pages exactly. | ||
65 | */ | ||
66 | BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule)); | ||
67 | |||
25 | spin_lock_init(&iovad->iova_rbtree_lock); | 68 | spin_lock_init(&iovad->iova_rbtree_lock); |
26 | iovad->rbroot = RB_ROOT; | 69 | iovad->rbroot = RB_ROOT; |
27 | iovad->cached32_node = NULL; | 70 | iovad->cached32_node = NULL; |
71 | iovad->granule = granule; | ||
72 | iovad->start_pfn = start_pfn; | ||
28 | iovad->dma_32bit_pfn = pfn_32bit; | 73 | iovad->dma_32bit_pfn = pfn_32bit; |
29 | } | 74 | } |
30 | 75 | ||
@@ -127,7 +172,7 @@ move_left: | |||
127 | if (!curr) { | 172 | if (!curr) { |
128 | if (size_aligned) | 173 | if (size_aligned) |
129 | pad_size = iova_get_pad_size(size, limit_pfn); | 174 | pad_size = iova_get_pad_size(size, limit_pfn); |
130 | if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { | 175 | if ((iovad->start_pfn + size + pad_size) > limit_pfn) { |
131 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 176 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
132 | return -ENOMEM; | 177 | return -ENOMEM; |
133 | } | 178 | } |
@@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
202 | * @size: - size of page frames to allocate | 247 | * @size: - size of page frames to allocate |
203 | * @limit_pfn: - max limit address | 248 | * @limit_pfn: - max limit address |
204 | * @size_aligned: - set if size_aligned address range is required | 249 | * @size_aligned: - set if size_aligned address range is required |
205 | * This function allocates an iova in the range limit_pfn to IOVA_START_PFN | 250 | * This function allocates an iova in the range iovad->start_pfn to limit_pfn, |
206 | * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned | 251 | * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned |
207 | * flag is set then the allocated address iova->pfn_lo will be naturally | 252 | * flag is set then the allocated address iova->pfn_lo will be naturally |
208 | * aligned on roundup_power_of_two(size). | 253 | * aligned on roundup_power_of_two(size). |
209 | */ | 254 | */ |