aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/iova.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 12:16:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 12:16:56 -0500
commita26be149facb22d30cd92cadb26f651d6fe802c9 (patch)
treec467bd0cae818ef793e9a694b4cd2bdc88d9ff6b /drivers/iommu/iova.c
parentcdd305454ebd181fa35b648c0921fe7df27d6f3b (diff)
parenta20cc76b9efae10c20123049df361adcd7f0e0b3 (diff)
Merge tag 'iommu-updates-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel: "This time with: - Generic page-table framework for ARM IOMMUs using the LPAE page-table format, ARM-SMMU and Renesas IPMMU make use of it already. - Break out the IO virtual address allocator from the Intel IOMMU so that it can be used by other DMA-API implementations too. The first user will be the ARM64 common DMA-API implementation for IOMMUs - Device tree support for Renesas IPMMU - Various fixes and cleanups all over the place" * tag 'iommu-updates-v3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits) iommu/amd: Convert non-returned local variable to boolean when relevant iommu: Update my email address iommu/amd: Use wait_event in put_pasid_state_wait iommu/amd: Fix amd_iommu_free_device() iommu/arm-smmu: Avoid build warning iommu/fsl: Various cleanups iommu/fsl: Use %pa to print phys_addr_t iommu/omap: Print phys_addr_t using %pa iommu: Make more drivers depend on COMPILE_TEST iommu/ipmmu-vmsa: Fix IOMMU lookup when multiple IOMMUs are registered iommu: Disable on !MMU builds iommu/fsl: Remove unused fsl_of_pamu_ids[] iommu/fsl: Fix section mismatch iommu/ipmmu-vmsa: Use the ARM LPAE page table allocator iommu: Fix trace_map() to report original iova and original size iommu/arm-smmu: add support for iova_to_phys through ATS1PR iopoll: Introduce memory-mapped IO polling macros iommu/arm-smmu: don't touch the secure STLBIALL register iommu/arm-smmu: make use of generic LPAE allocator iommu: io-pgtable-arm: add non-secure quirk ...
Diffstat (limited to 'drivers/iommu/iova.c')
-rw-r--r--drivers/iommu/iova.c53
1 files changed, 49 insertions, 4 deletions
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f6b17e6af2fb..9dd8208312c2 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,13 +18,58 @@
18 */ 18 */
19 19
20#include <linux/iova.h> 20#include <linux/iova.h>
21#include <linux/slab.h>
22
23static struct kmem_cache *iommu_iova_cache;
24
25int iommu_iova_cache_init(void)
26{
27 int ret = 0;
28
29 iommu_iova_cache = kmem_cache_create("iommu_iova",
30 sizeof(struct iova),
31 0,
32 SLAB_HWCACHE_ALIGN,
33 NULL);
34 if (!iommu_iova_cache) {
35 pr_err("Couldn't create iova cache\n");
36 ret = -ENOMEM;
37 }
38
39 return ret;
40}
41
42void iommu_iova_cache_destroy(void)
43{
44 kmem_cache_destroy(iommu_iova_cache);
45}
46
47struct iova *alloc_iova_mem(void)
48{
49 return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
50}
51
52void free_iova_mem(struct iova *iova)
53{
54 kmem_cache_free(iommu_iova_cache, iova);
55}
21 56
22void 57void
23init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) 58init_iova_domain(struct iova_domain *iovad, unsigned long granule,
59 unsigned long start_pfn, unsigned long pfn_32bit)
24{ 60{
61 /*
62 * IOVA granularity will normally be equal to the smallest
63 * supported IOMMU page size; both *must* be capable of
64 * representing individual CPU pages exactly.
65 */
66 BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
67
25 spin_lock_init(&iovad->iova_rbtree_lock); 68 spin_lock_init(&iovad->iova_rbtree_lock);
26 iovad->rbroot = RB_ROOT; 69 iovad->rbroot = RB_ROOT;
27 iovad->cached32_node = NULL; 70 iovad->cached32_node = NULL;
71 iovad->granule = granule;
72 iovad->start_pfn = start_pfn;
28 iovad->dma_32bit_pfn = pfn_32bit; 73 iovad->dma_32bit_pfn = pfn_32bit;
29} 74}
30 75
@@ -127,7 +172,7 @@ move_left:
127 if (!curr) { 172 if (!curr) {
128 if (size_aligned) 173 if (size_aligned)
129 pad_size = iova_get_pad_size(size, limit_pfn); 174 pad_size = iova_get_pad_size(size, limit_pfn);
130 if ((IOVA_START_PFN + size + pad_size) > limit_pfn) { 175 if ((iovad->start_pfn + size + pad_size) > limit_pfn) {
131 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 176 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
132 return -ENOMEM; 177 return -ENOMEM;
133 } 178 }
@@ -202,8 +247,8 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
202 * @size: - size of page frames to allocate 247 * @size: - size of page frames to allocate
203 * @limit_pfn: - max limit address 248 * @limit_pfn: - max limit address
204 * @size_aligned: - set if size_aligned address range is required 249 * @size_aligned: - set if size_aligned address range is required
205 * This function allocates an iova in the range limit_pfn to IOVA_START_PFN 250 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
206 * looking from limit_pfn instead from IOVA_START_PFN. If the size_aligned 251 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
207 * flag is set then the allocated address iova->pfn_lo will be naturally 252 * flag is set then the allocated address iova->pfn_lo will be naturally
208 * aligned on roundup_power_of_two(size). 253 * aligned on roundup_power_of_two(size).
209 */ 254 */