diff options
author | Robin Murphy <robin.murphy@arm.com> | 2016-09-12 12:13:59 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2016-09-16 04:34:22 -0400 |
commit | 44bb7e243bd4b4e5c79de2452cd9762582f58925 (patch) | |
tree | bf0d949ec860769d523d512dc26b20c9ce26e945 /drivers/iommu/dma-iommu.c | |
parent | 455eb7d34ad11b09490f70c33973f9f3e31c4df6 (diff) |
iommu/dma: Add support for mapping MSIs
When an MSI doorbell is located downstream of an IOMMU, attaching
devices to a DMA ops domain and switching on translation leads to a rude
shock when their attempt to write to the physical address returned by
the irqchip driver faults (or worse, writes into some already-mapped
buffer) and no interrupt is forthcoming.
Address this by adding a hook for relevant irqchip drivers to call from
their compose_msi_msg() callback, to swizzle the physical address with
an appropriatly-mapped IOVA for any device attached to one of our DMA
ops domains.
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 136 |
1 files changed, 121 insertions, 15 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 00c8a08d56e7..4329d18080cf 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -25,10 +25,28 @@ | |||
25 | #include <linux/huge_mm.h> | 25 | #include <linux/huge_mm.h> |
26 | #include <linux/iommu.h> | 26 | #include <linux/iommu.h> |
27 | #include <linux/iova.h> | 27 | #include <linux/iova.h> |
28 | #include <linux/irq.h> | ||
28 | #include <linux/mm.h> | 29 | #include <linux/mm.h> |
29 | #include <linux/scatterlist.h> | 30 | #include <linux/scatterlist.h> |
30 | #include <linux/vmalloc.h> | 31 | #include <linux/vmalloc.h> |
31 | 32 | ||
33 | struct iommu_dma_msi_page { | ||
34 | struct list_head list; | ||
35 | dma_addr_t iova; | ||
36 | phys_addr_t phys; | ||
37 | }; | ||
38 | |||
39 | struct iommu_dma_cookie { | ||
40 | struct iova_domain iovad; | ||
41 | struct list_head msi_page_list; | ||
42 | spinlock_t msi_lock; | ||
43 | }; | ||
44 | |||
45 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) | ||
46 | { | ||
47 | return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; | ||
48 | } | ||
49 | |||
32 | int iommu_dma_init(void) | 50 | int iommu_dma_init(void) |
33 | { | 51 | { |
34 | return iova_cache_get(); | 52 | return iova_cache_get(); |
@@ -43,15 +61,19 @@ int iommu_dma_init(void) | |||
43 | */ | 61 | */ |
44 | int iommu_get_dma_cookie(struct iommu_domain *domain) | 62 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
45 | { | 63 | { |
46 | struct iova_domain *iovad; | 64 | struct iommu_dma_cookie *cookie; |
47 | 65 | ||
48 | if (domain->iova_cookie) | 66 | if (domain->iova_cookie) |
49 | return -EEXIST; | 67 | return -EEXIST; |
50 | 68 | ||
51 | iovad = kzalloc(sizeof(*iovad), GFP_KERNEL); | 69 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
52 | domain->iova_cookie = iovad; | 70 | if (!cookie) |
71 | return -ENOMEM; | ||
53 | 72 | ||
54 | return iovad ? 0 : -ENOMEM; | 73 | spin_lock_init(&cookie->msi_lock); |
74 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
75 | domain->iova_cookie = cookie; | ||
76 | return 0; | ||
55 | } | 77 | } |
56 | EXPORT_SYMBOL(iommu_get_dma_cookie); | 78 | EXPORT_SYMBOL(iommu_get_dma_cookie); |
57 | 79 | ||
@@ -63,14 +85,20 @@ EXPORT_SYMBOL(iommu_get_dma_cookie); | |||
63 | */ | 85 | */ |
64 | void iommu_put_dma_cookie(struct iommu_domain *domain) | 86 | void iommu_put_dma_cookie(struct iommu_domain *domain) |
65 | { | 87 | { |
66 | struct iova_domain *iovad = domain->iova_cookie; | 88 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
89 | struct iommu_dma_msi_page *msi, *tmp; | ||
67 | 90 | ||
68 | if (!iovad) | 91 | if (!cookie) |
69 | return; | 92 | return; |
70 | 93 | ||
71 | if (iovad->granule) | 94 | if (cookie->iovad.granule) |
72 | put_iova_domain(iovad); | 95 | put_iova_domain(&cookie->iovad); |
73 | kfree(iovad); | 96 | |
97 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | ||
98 | list_del(&msi->list); | ||
99 | kfree(msi); | ||
100 | } | ||
101 | kfree(cookie); | ||
74 | domain->iova_cookie = NULL; | 102 | domain->iova_cookie = NULL; |
75 | } | 103 | } |
76 | EXPORT_SYMBOL(iommu_put_dma_cookie); | 104 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
@@ -88,7 +116,7 @@ EXPORT_SYMBOL(iommu_put_dma_cookie); | |||
88 | */ | 116 | */ |
89 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) | 117 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, u64 size) |
90 | { | 118 | { |
91 | struct iova_domain *iovad = domain->iova_cookie; | 119 | struct iova_domain *iovad = cookie_iovad(domain); |
92 | unsigned long order, base_pfn, end_pfn; | 120 | unsigned long order, base_pfn, end_pfn; |
93 | 121 | ||
94 | if (!iovad) | 122 | if (!iovad) |
@@ -155,7 +183,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) | |||
155 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, | 183 | static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, |
156 | dma_addr_t dma_limit) | 184 | dma_addr_t dma_limit) |
157 | { | 185 | { |
158 | struct iova_domain *iovad = domain->iova_cookie; | 186 | struct iova_domain *iovad = cookie_iovad(domain); |
159 | unsigned long shift = iova_shift(iovad); | 187 | unsigned long shift = iova_shift(iovad); |
160 | unsigned long length = iova_align(iovad, size) >> shift; | 188 | unsigned long length = iova_align(iovad, size) >> shift; |
161 | 189 | ||
@@ -171,7 +199,7 @@ static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size, | |||
171 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ | 199 | /* The IOVA allocator knows what we mapped, so just unmap whatever that was */ |
172 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) | 200 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr) |
173 | { | 201 | { |
174 | struct iova_domain *iovad = domain->iova_cookie; | 202 | struct iova_domain *iovad = cookie_iovad(domain); |
175 | unsigned long shift = iova_shift(iovad); | 203 | unsigned long shift = iova_shift(iovad); |
176 | unsigned long pfn = dma_addr >> shift; | 204 | unsigned long pfn = dma_addr >> shift; |
177 | struct iova *iova = find_iova(iovad, pfn); | 205 | struct iova *iova = find_iova(iovad, pfn); |
@@ -294,7 +322,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, | |||
294 | void (*flush_page)(struct device *, const void *, phys_addr_t)) | 322 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
295 | { | 323 | { |
296 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 324 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
297 | struct iova_domain *iovad = domain->iova_cookie; | 325 | struct iova_domain *iovad = cookie_iovad(domain); |
298 | struct iova *iova; | 326 | struct iova *iova; |
299 | struct page **pages; | 327 | struct page **pages; |
300 | struct sg_table sgt; | 328 | struct sg_table sgt; |
@@ -386,7 +414,7 @@ dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, | |||
386 | { | 414 | { |
387 | dma_addr_t dma_addr; | 415 | dma_addr_t dma_addr; |
388 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 416 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
389 | struct iova_domain *iovad = domain->iova_cookie; | 417 | struct iova_domain *iovad = cookie_iovad(domain); |
390 | phys_addr_t phys = page_to_phys(page) + offset; | 418 | phys_addr_t phys = page_to_phys(page) + offset; |
391 | size_t iova_off = iova_offset(iovad, phys); | 419 | size_t iova_off = iova_offset(iovad, phys); |
392 | size_t len = iova_align(iovad, size + iova_off); | 420 | size_t len = iova_align(iovad, size + iova_off); |
@@ -495,7 +523,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, | |||
495 | int nents, int prot) | 523 | int nents, int prot) |
496 | { | 524 | { |
497 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | 525 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
498 | struct iova_domain *iovad = domain->iova_cookie; | 526 | struct iova_domain *iovad = cookie_iovad(domain); |
499 | struct iova *iova; | 527 | struct iova *iova; |
500 | struct scatterlist *s, *prev = NULL; | 528 | struct scatterlist *s, *prev = NULL; |
501 | dma_addr_t dma_addr; | 529 | dma_addr_t dma_addr; |
@@ -587,3 +615,81 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
587 | { | 615 | { |
588 | return dma_addr == DMA_ERROR_CODE; | 616 | return dma_addr == DMA_ERROR_CODE; |
589 | } | 617 | } |
618 | |||
619 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | ||
620 | phys_addr_t msi_addr, struct iommu_domain *domain) | ||
621 | { | ||
622 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | ||
623 | struct iommu_dma_msi_page *msi_page; | ||
624 | struct iova_domain *iovad = &cookie->iovad; | ||
625 | struct iova *iova; | ||
626 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | ||
627 | |||
628 | msi_addr &= ~(phys_addr_t)iova_mask(iovad); | ||
629 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) | ||
630 | if (msi_page->phys == msi_addr) | ||
631 | return msi_page; | ||
632 | |||
633 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); | ||
634 | if (!msi_page) | ||
635 | return NULL; | ||
636 | |||
637 | iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); | ||
638 | if (!iova) | ||
639 | goto out_free_page; | ||
640 | |||
641 | msi_page->phys = msi_addr; | ||
642 | msi_page->iova = iova_dma_addr(iovad, iova); | ||
643 | if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) | ||
644 | goto out_free_iova; | ||
645 | |||
646 | INIT_LIST_HEAD(&msi_page->list); | ||
647 | list_add(&msi_page->list, &cookie->msi_page_list); | ||
648 | return msi_page; | ||
649 | |||
650 | out_free_iova: | ||
651 | __free_iova(iovad, iova); | ||
652 | out_free_page: | ||
653 | kfree(msi_page); | ||
654 | return NULL; | ||
655 | } | ||
656 | |||
657 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | ||
658 | { | ||
659 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); | ||
660 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); | ||
661 | struct iommu_dma_cookie *cookie; | ||
662 | struct iommu_dma_msi_page *msi_page; | ||
663 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; | ||
664 | unsigned long flags; | ||
665 | |||
666 | if (!domain || !domain->iova_cookie) | ||
667 | return; | ||
668 | |||
669 | cookie = domain->iova_cookie; | ||
670 | |||
671 | /* | ||
672 | * We disable IRQs to rule out a possible inversion against | ||
673 | * irq_desc_lock if, say, someone tries to retarget the affinity | ||
674 | * of an MSI from within an IPI handler. | ||
675 | */ | ||
676 | spin_lock_irqsave(&cookie->msi_lock, flags); | ||
677 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); | ||
678 | spin_unlock_irqrestore(&cookie->msi_lock, flags); | ||
679 | |||
680 | if (WARN_ON(!msi_page)) { | ||
681 | /* | ||
682 | * We're called from a void callback, so the best we can do is | ||
683 | * 'fail' by filling the message with obviously bogus values. | ||
684 | * Since we got this far due to an IOMMU being present, it's | ||
685 | * not like the existing address would have worked anyway... | ||
686 | */ | ||
687 | msg->address_hi = ~0U; | ||
688 | msg->address_lo = ~0U; | ||
689 | msg->data = ~0U; | ||
690 | } else { | ||
691 | msg->address_hi = upper_32_bits(msi_page->iova); | ||
692 | msg->address_lo &= iova_mask(&cookie->iovad); | ||
693 | msg->address_lo += lower_32_bits(msi_page->iova); | ||
694 | } | ||
695 | } | ||