diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-02 07:59:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-02 07:59:29 -0400 |
commit | 8c25ab8b5a04a7c559aa8fd4cabe5fc4463b8ada (patch) | |
tree | 9cdc7d9cb6576fa757277cd2e3ad8231191442b9 /drivers/iommu | |
parent | bde17b90dd9712cb61a7ab0c1ccd0f7f6aa57957 (diff) | |
parent | 15bbdec3931e617231c12b0920e497e87ec8c2c6 (diff) |
Merge git://git.infradead.org/intel-iommu
Pull IOVA fixes from David Woodhouse:
"The main fix here is the first one, fixing the over-allocation of
size-aligned requests. The other patches simply make the existing
IOVA code available to users other than the Intel VT-d driver, with no
functional change.
I concede the latter really *should* have been submitted during the
merge window, but since it's basically risk-free and people are
waiting to build on top of it and it's my fault I didn't get it in, I
(and they) would be grateful if you'd take it"
* git://git.infradead.org/intel-iommu:
iommu: Make the iova library a module
iommu: iova: Export symbols
iommu: iova: Move iova cache management to the iova library
iommu/iova: Avoid over-allocating when size-aligned
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 2 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 8 | ||||
-rw-r--r-- | drivers/iommu/iova.c | 120 |
3 files changed, 75 insertions, 55 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 4664c2a96c67..d9da766719c8 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST | |||
43 | endmenu | 43 | endmenu |
44 | 44 | ||
45 | config IOMMU_IOVA | 45 | config IOMMU_IOVA |
46 | bool | 46 | tristate |
47 | 47 | ||
48 | config OF_IOMMU | 48 | config OF_IOMMU |
49 | def_bool y | 49 | def_bool y |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2d7349a3ee14..041bc1810a86 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev, | |||
3215 | 3215 | ||
3216 | /* Restrict dma_mask to the width that the iommu can handle */ | 3216 | /* Restrict dma_mask to the width that the iommu can handle */ |
3217 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); | 3217 | dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); |
3218 | /* Ensure we reserve the whole size-aligned region */ | ||
3219 | nrpages = __roundup_pow_of_two(nrpages); | ||
3218 | 3220 | ||
3219 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { | 3221 | if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { |
3220 | /* | 3222 | /* |
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void) | |||
3711 | static int __init iommu_init_mempool(void) | 3713 | static int __init iommu_init_mempool(void) |
3712 | { | 3714 | { |
3713 | int ret; | 3715 | int ret; |
3714 | ret = iommu_iova_cache_init(); | 3716 | ret = iova_cache_get(); |
3715 | if (ret) | 3717 | if (ret) |
3716 | return ret; | 3718 | return ret; |
3717 | 3719 | ||
@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void) | |||
3725 | 3727 | ||
3726 | kmem_cache_destroy(iommu_domain_cache); | 3728 | kmem_cache_destroy(iommu_domain_cache); |
3727 | domain_error: | 3729 | domain_error: |
3728 | iommu_iova_cache_destroy(); | 3730 | iova_cache_put(); |
3729 | 3731 | ||
3730 | return -ENOMEM; | 3732 | return -ENOMEM; |
3731 | } | 3733 | } |
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void) | |||
3734 | { | 3736 | { |
3735 | kmem_cache_destroy(iommu_devinfo_cache); | 3737 | kmem_cache_destroy(iommu_devinfo_cache); |
3736 | kmem_cache_destroy(iommu_domain_cache); | 3738 | kmem_cache_destroy(iommu_domain_cache); |
3737 | iommu_iova_cache_destroy(); | 3739 | iova_cache_put(); |
3738 | } | 3740 | } |
3739 | 3741 | ||
3740 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) | 3742 | static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) |
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index b7c3d923f3e1..fa0adef32bd6 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c | |||
@@ -18,42 +18,9 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/iova.h> | 20 | #include <linux/iova.h> |
21 | #include <linux/module.h> | ||
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | 23 | ||
23 | static struct kmem_cache *iommu_iova_cache; | ||
24 | |||
25 | int iommu_iova_cache_init(void) | ||
26 | { | ||
27 | int ret = 0; | ||
28 | |||
29 | iommu_iova_cache = kmem_cache_create("iommu_iova", | ||
30 | sizeof(struct iova), | ||
31 | 0, | ||
32 | SLAB_HWCACHE_ALIGN, | ||
33 | NULL); | ||
34 | if (!iommu_iova_cache) { | ||
35 | pr_err("Couldn't create iova cache\n"); | ||
36 | ret = -ENOMEM; | ||
37 | } | ||
38 | |||
39 | return ret; | ||
40 | } | ||
41 | |||
42 | void iommu_iova_cache_destroy(void) | ||
43 | { | ||
44 | kmem_cache_destroy(iommu_iova_cache); | ||
45 | } | ||
46 | |||
47 | struct iova *alloc_iova_mem(void) | ||
48 | { | ||
49 | return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC); | ||
50 | } | ||
51 | |||
52 | void free_iova_mem(struct iova *iova) | ||
53 | { | ||
54 | kmem_cache_free(iommu_iova_cache, iova); | ||
55 | } | ||
56 | |||
57 | void | 24 | void |
58 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, | 25 | init_iova_domain(struct iova_domain *iovad, unsigned long granule, |
59 | unsigned long start_pfn, unsigned long pfn_32bit) | 26 | unsigned long start_pfn, unsigned long pfn_32bit) |
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, | |||
72 | iovad->start_pfn = start_pfn; | 39 | iovad->start_pfn = start_pfn; |
73 | iovad->dma_32bit_pfn = pfn_32bit; | 40 | iovad->dma_32bit_pfn = pfn_32bit; |
74 | } | 41 | } |
42 | EXPORT_SYMBOL_GPL(init_iova_domain); | ||
75 | 43 | ||
76 | static struct rb_node * | 44 | static struct rb_node * |
77 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) | 45 | __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) |
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) | |||
120 | } | 88 | } |
121 | } | 89 | } |
122 | 90 | ||
123 | /* Computes the padding size required, to make the | 91 | /* |
124 | * the start address naturally aligned on its size | 92 | * Computes the padding size required, to make the start address |
93 | * naturally aligned on the power-of-two order of its size | ||
125 | */ | 94 | */ |
126 | static int | 95 | static unsigned int |
127 | iova_get_pad_size(int size, unsigned int limit_pfn) | 96 | iova_get_pad_size(unsigned int size, unsigned int limit_pfn) |
128 | { | 97 | { |
129 | unsigned int pad_size = 0; | 98 | return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); |
130 | unsigned int order = ilog2(size); | ||
131 | |||
132 | if (order) | ||
133 | pad_size = (limit_pfn + 1) % (1 << order); | ||
134 | |||
135 | return pad_size; | ||
136 | } | 99 | } |
137 | 100 | ||
138 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, | 101 | static int __alloc_and_insert_iova_range(struct iova_domain *iovad, |
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova) | |||
242 | rb_insert_color(&iova->node, root); | 205 | rb_insert_color(&iova->node, root); |
243 | } | 206 | } |
244 | 207 | ||
208 | static struct kmem_cache *iova_cache; | ||
209 | static unsigned int iova_cache_users; | ||
210 | static DEFINE_MUTEX(iova_cache_mutex); | ||
211 | |||
212 | struct iova *alloc_iova_mem(void) | ||
213 | { | ||
214 | return kmem_cache_alloc(iova_cache, GFP_ATOMIC); | ||
215 | } | ||
216 | EXPORT_SYMBOL(alloc_iova_mem); | ||
217 | |||
218 | void free_iova_mem(struct iova *iova) | ||
219 | { | ||
220 | kmem_cache_free(iova_cache, iova); | ||
221 | } | ||
222 | EXPORT_SYMBOL(free_iova_mem); | ||
223 | |||
224 | int iova_cache_get(void) | ||
225 | { | ||
226 | mutex_lock(&iova_cache_mutex); | ||
227 | if (!iova_cache_users) { | ||
228 | iova_cache = kmem_cache_create( | ||
229 | "iommu_iova", sizeof(struct iova), 0, | ||
230 | SLAB_HWCACHE_ALIGN, NULL); | ||
231 | if (!iova_cache) { | ||
232 | mutex_unlock(&iova_cache_mutex); | ||
233 | printk(KERN_ERR "Couldn't create iova cache\n"); | ||
234 | return -ENOMEM; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | iova_cache_users++; | ||
239 | mutex_unlock(&iova_cache_mutex); | ||
240 | |||
241 | return 0; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(iova_cache_get); | ||
244 | |||
245 | void iova_cache_put(void) | ||
246 | { | ||
247 | mutex_lock(&iova_cache_mutex); | ||
248 | if (WARN_ON(!iova_cache_users)) { | ||
249 | mutex_unlock(&iova_cache_mutex); | ||
250 | return; | ||
251 | } | ||
252 | iova_cache_users--; | ||
253 | if (!iova_cache_users) | ||
254 | kmem_cache_destroy(iova_cache); | ||
255 | mutex_unlock(&iova_cache_mutex); | ||
256 | } | ||
257 | EXPORT_SYMBOL_GPL(iova_cache_put); | ||
258 | |||
245 | /** | 259 | /** |
246 | * alloc_iova - allocates an iova | 260 | * alloc_iova - allocates an iova |
247 | * @iovad: - iova domain in question | 261 | * @iovad: - iova domain in question |
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
265 | if (!new_iova) | 279 | if (!new_iova) |
266 | return NULL; | 280 | return NULL; |
267 | 281 | ||
268 | /* If size aligned is set then round the size to | ||
269 | * to next power of two. | ||
270 | */ | ||
271 | if (size_aligned) | ||
272 | size = __roundup_pow_of_two(size); | ||
273 | |||
274 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, | 282 | ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, |
275 | new_iova, size_aligned); | 283 | new_iova, size_aligned); |
276 | 284 | ||
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, | |||
281 | 289 | ||
282 | return new_iova; | 290 | return new_iova; |
283 | } | 291 | } |
292 | EXPORT_SYMBOL_GPL(alloc_iova); | ||
284 | 293 | ||
285 | /** | 294 | /** |
286 | * find_iova - find's an iova for a given pfn | 295 | * find_iova - find's an iova for a given pfn |
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) | |||
321 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 330 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
322 | return NULL; | 331 | return NULL; |
323 | } | 332 | } |
333 | EXPORT_SYMBOL_GPL(find_iova); | ||
324 | 334 | ||
325 | /** | 335 | /** |
326 | * __free_iova - frees the given iova | 336 | * __free_iova - frees the given iova |
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova) | |||
339 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 349 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
340 | free_iova_mem(iova); | 350 | free_iova_mem(iova); |
341 | } | 351 | } |
352 | EXPORT_SYMBOL_GPL(__free_iova); | ||
342 | 353 | ||
343 | /** | 354 | /** |
344 | * free_iova - finds and frees the iova for a given pfn | 355 | * free_iova - finds and frees the iova for a given pfn |
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn) | |||
356 | __free_iova(iovad, iova); | 367 | __free_iova(iovad, iova); |
357 | 368 | ||
358 | } | 369 | } |
370 | EXPORT_SYMBOL_GPL(free_iova); | ||
359 | 371 | ||
360 | /** | 372 | /** |
361 | * put_iova_domain - destroys the iova doamin | 373 | * put_iova_domain - destroys the iova doamin |
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad) | |||
378 | } | 390 | } |
379 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 391 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
380 | } | 392 | } |
393 | EXPORT_SYMBOL_GPL(put_iova_domain); | ||
381 | 394 | ||
382 | static int | 395 | static int |
383 | __is_range_overlap(struct rb_node *node, | 396 | __is_range_overlap(struct rb_node *node, |
@@ -467,6 +480,7 @@ finish: | |||
467 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); | 480 | spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); |
468 | return iova; | 481 | return iova; |
469 | } | 482 | } |
483 | EXPORT_SYMBOL_GPL(reserve_iova); | ||
470 | 484 | ||
471 | /** | 485 | /** |
472 | * copy_reserved_iova - copies the reserved between domains | 486 | * copy_reserved_iova - copies the reserved between domains |
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) | |||
493 | } | 507 | } |
494 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); | 508 | spin_unlock_irqrestore(&from->iova_rbtree_lock, flags); |
495 | } | 509 | } |
510 | EXPORT_SYMBOL_GPL(copy_reserved_iova); | ||
496 | 511 | ||
497 | struct iova * | 512 | struct iova * |
498 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, | 513 | split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, |
@@ -534,3 +549,6 @@ error: | |||
534 | free_iova_mem(prev); | 549 | free_iova_mem(prev); |
535 | return NULL; | 550 | return NULL; |
536 | } | 551 | } |
552 | |||
553 | MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>"); | ||
554 | MODULE_LICENSE("GPL"); | ||