diff options
author | Sakari Ailus <sakari.ailus@linux.intel.com> | 2015-07-13 07:31:28 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2015-07-28 10:47:58 -0400 |
commit | ae1ff3d623905947158fd3394854c23026337810 (patch) | |
tree | f4dbf8564370339f17021f74bfe37043e23f7872 /drivers/iommu | |
parent | 8f6429c7cb59f28433253575cc8e3262eed63592 (diff) |
iommu: iova: Move iova cache management to the iova library
This is necessary to separate intel-iommu from the iova library.
Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 6 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 83 |
2 files changed, 52 insertions, 37 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 92101597cede..2d5cf39e1053 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -3743,7 +3743,7 @@ static inline int iommu_devinfo_cache_init(void) | |||
3743 | static int __init iommu_init_mempool(void) | 3743 | static int __init iommu_init_mempool(void) |
3744 | { | 3744 | { |
3745 | int ret; | 3745 | int ret; |
3746 | ret = iommu_iova_cache_init(); | 3746 | ret = iova_cache_get(); |
3747 | if (ret) | 3747 | if (ret) |
3748 | return ret; | 3748 | return ret; |
3749 | 3749 | ||
@@ -3757,7 +3757,7 @@ static int __init iommu_init_mempool(void) | |||
3757 | 3757 | ||
3758 | kmem_cache_destroy(iommu_domain_cache); | 3758 | kmem_cache_destroy(iommu_domain_cache); |
3759 | domain_error: | 3759 | domain_error: |
3760 | iommu_iova_cache_destroy(); | 3760 | iova_cache_put(); |
3761 | 3761 | ||
3762 | return -ENOMEM; | 3762 | return -ENOMEM; |
3763 | } | 3763 | } |
@@ -3766,7 +3766,7 @@ static void __init iommu_exit_mempool(void) | |||
3766 | { | 3766 | { |
3767 | kmem_cache_destroy(iommu_devinfo_cache); | 3767 | kmem_cache_destroy(iommu_devinfo_cache); |
3768 | kmem_cache_destroy(iommu_domain_cache); | 3768 | kmem_cache_destroy(iommu_domain_cache); |
3769 | iommu_iova_cache_destroy(); | 3769 | iova_cache_put(); |
3770 | } | 3770 | } |
3771 | 3771 | ||
3772 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) | 3772 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 29f2efcf668e..ed95f7a0fad3 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -20,40 +20,6 @@ | |||
20 | #include <linux/iova.h> | 20 | #include <linux/iova.h> |
21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
22 | 22 | ||
23 | static struct kmem_cache *iommu_iova_cache; | ||
24 | |||
25 | int iommu_iova_cache_init(void) | ||
26 | { | ||
27 | int ret = 0; | ||
28 | |||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | ||
30 | sizeof(struct iova), | ||
31 | 0, | ||
32 | SLAB_HWCACHE_ALIGN, | ||
33 | NULL); | ||
34 | if (!iommu_iova_cache) { | ||
35 | pr_err("Couldn't create iova cache\n"); | ||
36 | ret = -ENOMEM; | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | void iommu_iova_cache_destroy(void) | ||
43 | { | ||
44 | kmem_cache_destroy(iommu_iova_cache); | ||
45 | } | ||
46 | |||
47 | struct iova *alloc_iova_mem(void) | ||
48 | { | ||
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | ||
50 | } | ||
51 | |||
52 | void free_iova_mem(struct iova *iova) | ||
53 | { | ||
54 | kmem_cache_free(iommu_iova_cache, iova); | ||
55 | } | ||
56 | |||
57 | void | 23 | void |
58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, | 24 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | 25 | unsigned long start_pfn, unsigned long pfn_32bit) |
@@ -237,6 +203,55 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
237 | rb_insert_color(&iova->node, root); | 203 | rb_insert_color(&iova->node, root); |
238 | } | 204 | } |
239 | 205 | ||
206 | static struct kmem_cache *iova_cache; | ||
207 | static unsigned int iova_cache_users; | ||
208 | static DEFINE_MUTEX(iova_cache_mutex); | ||
209 | |||
210 | struct iova *alloc_iova_mem(void) | ||
211 | { | ||
212 | return kmem_cache_alloc(iova_cache, GFP_ATOMIC); | ||
213 | } | ||
214 | EXPORT_SYMBOL(alloc_iova_mem); | ||
215 | |||
216 | void free_iova_mem(struct iova *iova) | ||
217 | { | ||
218 | kmem_cache_free(iova_cache, iova); | ||
219 | } | ||
220 | EXPORT_SYMBOL(free_iova_mem); | ||
221 | |||
222 | int iova_cache_get(void) | ||
223 | { | ||
224 | mutex_lock(&iova_cache_mutex); | ||
225 | if (!iova_cache_users) { | ||
226 | iova_cache = kmem_cache_create( | ||
227 | "iommu_iova", sizeof(struct iova), 0, | ||
228 | SLAB_HWCACHE_ALIGN, NULL); | ||
229 | if (!iova_cache) { | ||
230 | mutex_unlock(&iova_cache_mutex); | ||
231 | printk(KERN_ERR "Couldn't create iova cache\n"); | ||
232 | return -ENOMEM; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | iova_cache_users++; | ||
237 | mutex_unlock(&iova_cache_mutex); | ||
238 | |||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | void iova_cache_put(void) | ||
243 | { | ||
244 | mutex_lock(&iova_cache_mutex); | ||
245 | if (WARN_ON(!iova_cache_users)) { | ||
246 | mutex_unlock(&iova_cache_mutex); | ||
247 | return; | ||
248 | } | ||
249 | iova_cache_users--; | ||
250 | if (!iova_cache_users) | ||
251 | kmem_cache_destroy(iova_cache); | ||
252 | mutex_unlock(&iova_cache_mutex); | ||
253 | } | ||
254 | |||
240 | /** | 255 | /** |
241 | * alloc_iova - allocates an iova | 256 | * alloc_iova - allocates an iova |
242 | * @iovad: - iova domain in question | 257 | * @iovad: - iova domain in question |