diff options
-rw-r--r-- | drivers/iommu/dma-iommu.c | 119 | ||||
-rw-r--r-- | include/linux/dma-iommu.h | 6 |
2 files changed, 102 insertions, 23 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 2db0d641cf45..de41ead6542a 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page { | |||
37 | phys_addr_t phys; | 37 | phys_addr_t phys; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | enum iommu_dma_cookie_type { | ||
41 | IOMMU_DMA_IOVA_COOKIE, | ||
42 | IOMMU_DMA_MSI_COOKIE, | ||
43 | }; | ||
44 | |||
40 | struct iommu_dma_cookie { | 45 | struct iommu_dma_cookie { |
41 | struct iova_domain iovad; | 46 | enum iommu_dma_cookie_type type; |
42 | struct list_head msi_page_list; | 47 | union { |
43 | spinlock_t msi_lock; | 48 | /* Full allocator for IOMMU_DMA_IOVA_COOKIE */ |
49 | struct iova_domain iovad; | ||
50 | /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */ | ||
51 | dma_addr_t msi_iova; | ||
52 | }; | ||
53 | struct list_head msi_page_list; | ||
54 | spinlock_t msi_lock; | ||
44 | }; | 55 | }; |
45 | 56 | ||
57 | static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) | ||
58 | { | ||
59 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | ||
60 | return cookie->iovad.granule; | ||
61 | return PAGE_SIZE; | ||
62 | } | ||
63 | |||
46 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) | 64 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) |
47 | { | 65 | { |
48 | return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; | 66 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
67 | |||
68 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE) | ||
69 | return &cookie->iovad; | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type) | ||
74 | { | ||
75 | struct iommu_dma_cookie *cookie; | ||
76 | |||
77 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | ||
78 | if (cookie) { | ||
79 | spin_lock_init(&cookie->msi_lock); | ||
80 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
81 | cookie->type = type; | ||
82 | } | ||
83 | return cookie; | ||
49 | } | 84 | } |
50 | 85 | ||
51 | int iommu_dma_init(void) | 86 | int iommu_dma_init(void) |
@@ -62,25 +97,53 @@ int iommu_dma_init(void) | |||
62 | */ | 97 | */ |
63 | int iommu_get_dma_cookie(struct iommu_domain *domain) | 98 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
64 | { | 99 | { |
100 | if (domain->iova_cookie) | ||
101 | return -EEXIST; | ||
102 | |||
103 | domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE); | ||
104 | if (!domain->iova_cookie) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | EXPORT_SYMBOL(iommu_get_dma_cookie); | ||
110 | |||
111 | /** | ||
112 | * iommu_get_msi_cookie - Acquire just MSI remapping resources | ||
113 | * @domain: IOMMU domain to prepare | ||
114 | * @base: Start address of IOVA region for MSI mappings | ||
115 | * | ||
116 | * Users who manage their own IOVA allocation and do not want DMA API support, | ||
117 | * but would still like to take advantage of automatic MSI remapping, can use | ||
118 | * this to initialise their own domain appropriately. Users should reserve a | ||
119 | * contiguous IOVA region, starting at @base, large enough to accommodate the | ||
120 | * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address | ||
121 | * used by the devices attached to @domain. | ||
122 | */ | ||
123 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | ||
124 | { | ||
65 | struct iommu_dma_cookie *cookie; | 125 | struct iommu_dma_cookie *cookie; |
66 | 126 | ||
127 | if (domain->type != IOMMU_DOMAIN_UNMANAGED) | ||
128 | return -EINVAL; | ||
129 | |||
67 | if (domain->iova_cookie) | 130 | if (domain->iova_cookie) |
68 | return -EEXIST; | 131 | return -EEXIST; |
69 | 132 | ||
70 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); | 133 | cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE); |
71 | if (!cookie) | 134 | if (!cookie) |
72 | return -ENOMEM; | 135 | return -ENOMEM; |
73 | 136 | ||
74 | spin_lock_init(&cookie->msi_lock); | 137 | cookie->msi_iova = base; |
75 | INIT_LIST_HEAD(&cookie->msi_page_list); | ||
76 | domain->iova_cookie = cookie; | 138 | domain->iova_cookie = cookie; |
77 | return 0; | 139 | return 0; |
78 | } | 140 | } |
79 | EXPORT_SYMBOL(iommu_get_dma_cookie); | 141 | EXPORT_SYMBOL(iommu_get_msi_cookie); |
80 | 142 | ||
81 | /** | 143 | /** |
82 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources | 144 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
83 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() | 145 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or |
146 | * iommu_get_msi_cookie() | ||
84 | * | 147 | * |
85 | * IOMMU drivers should normally call this from their domain_free callback. | 148 | * IOMMU drivers should normally call this from their domain_free callback. |
86 | */ | 149 | */ |
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain) | |||
92 | if (!cookie) | 155 | if (!cookie) |
93 | return; | 156 | return; |
94 | 157 | ||
95 | if (cookie->iovad.granule) | 158 | if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) |
96 | put_iova_domain(&cookie->iovad); | 159 | put_iova_domain(&cookie->iovad); |
97 | 160 | ||
98 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { | 161 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
@@ -137,11 +200,12 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, | |||
137 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, | 200 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
138 | u64 size, struct device *dev) | 201 | u64 size, struct device *dev) |
139 | { | 202 | { |
140 | struct iova_domain *iovad = cookie_iovad(domain); | 203 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
204 | struct iova_domain *iovad = &cookie->iovad; | ||
141 | unsigned long order, base_pfn, end_pfn; | 205 | unsigned long order, base_pfn, end_pfn; |
142 | 206 | ||
143 | if (!iovad) | 207 | if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE) |
144 | return -ENODEV; | 208 | return -EINVAL; |
145 | 209 | ||
146 | /* Use the smallest supported page size for IOVA granularity */ | 210 | /* Use the smallest supported page size for IOVA granularity */ |
147 | order = __ffs(domain->pgsize_bitmap); | 211 | order = __ffs(domain->pgsize_bitmap); |
@@ -662,11 +726,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
662 | { | 726 | { |
663 | struct iommu_dma_cookie *cookie = domain->iova_cookie; | 727 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
664 | struct iommu_dma_msi_page *msi_page; | 728 | struct iommu_dma_msi_page *msi_page; |
665 | struct iova_domain *iovad = &cookie->iovad; | 729 | struct iova_domain *iovad = cookie_iovad(domain); |
666 | struct iova *iova; | 730 | struct iova *iova; |
667 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | 731 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
732 | size_t size = cookie_msi_granule(cookie); | ||
668 | 733 | ||
669 | msi_addr &= ~(phys_addr_t)iova_mask(iovad); | 734 | msi_addr &= ~(phys_addr_t)(size - 1); |
670 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) | 735 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
671 | if (msi_page->phys == msi_addr) | 736 | if (msi_page->phys == msi_addr) |
672 | return msi_page; | 737 | return msi_page; |
@@ -675,13 +740,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
675 | if (!msi_page) | 740 | if (!msi_page) |
676 | return NULL; | 741 | return NULL; |
677 | 742 | ||
678 | iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev)); | ||
679 | if (!iova) | ||
680 | goto out_free_page; | ||
681 | |||
682 | msi_page->phys = msi_addr; | 743 | msi_page->phys = msi_addr; |
683 | msi_page->iova = iova_dma_addr(iovad, iova); | 744 | if (iovad) { |
684 | if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) | 745 | iova = __alloc_iova(domain, size, dma_get_mask(dev)); |
746 | if (!iova) | ||
747 | goto out_free_page; | ||
748 | msi_page->iova = iova_dma_addr(iovad, iova); | ||
749 | } else { | ||
750 | msi_page->iova = cookie->msi_iova; | ||
751 | cookie->msi_iova += size; | ||
752 | } | ||
753 | |||
754 | if (iommu_map(domain, msi_page->iova, msi_addr, size, prot)) | ||
685 | goto out_free_iova; | 755 | goto out_free_iova; |
686 | 756 | ||
687 | INIT_LIST_HEAD(&msi_page->list); | 757 | INIT_LIST_HEAD(&msi_page->list); |
@@ -689,7 +759,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, | |||
689 | return msi_page; | 759 | return msi_page; |
690 | 760 | ||
691 | out_free_iova: | 761 | out_free_iova: |
692 | __free_iova(iovad, iova); | 762 | if (iovad) |
763 | __free_iova(iovad, iova); | ||
764 | else | ||
765 | cookie->msi_iova -= size; | ||
693 | out_free_page: | 766 | out_free_page: |
694 | kfree(msi_page); | 767 | kfree(msi_page); |
695 | return NULL; | 768 | return NULL; |
@@ -730,7 +803,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) | |||
730 | msg->data = ~0U; | 803 | msg->data = ~0U; |
731 | } else { | 804 | } else { |
732 | msg->address_hi = upper_32_bits(msi_page->iova); | 805 | msg->address_hi = upper_32_bits(msi_page->iova); |
733 | msg->address_lo &= iova_mask(&cookie->iovad); | 806 | msg->address_lo &= cookie_msi_granule(cookie) - 1; |
734 | msg->address_lo += lower_32_bits(msi_page->iova); | 807 | msg->address_lo += lower_32_bits(msi_page->iova); |
735 | } | 808 | } |
736 | } | 809 | } |
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 7f7e9a7e3839..28df844a23b6 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h | |||
@@ -27,6 +27,7 @@ int iommu_dma_init(void); | |||
27 | 27 | ||
28 | /* Domain management interface for IOMMU drivers */ | 28 | /* Domain management interface for IOMMU drivers */ |
29 | int iommu_get_dma_cookie(struct iommu_domain *domain); | 29 | int iommu_get_dma_cookie(struct iommu_domain *domain); |
30 | int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); | ||
30 | void iommu_put_dma_cookie(struct iommu_domain *domain); | 31 | void iommu_put_dma_cookie(struct iommu_domain *domain); |
31 | 32 | ||
32 | /* Setup call for arch DMA mapping code */ | 33 | /* Setup call for arch DMA mapping code */ |
@@ -86,6 +87,11 @@ static inline int iommu_get_dma_cookie(struct iommu_domain *domain) | |||
86 | return -ENODEV; | 87 | return -ENODEV; |
87 | } | 88 | } |
88 | 89 | ||
90 | static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) | ||
91 | { | ||
92 | return -ENODEV; | ||
93 | } | ||
94 | |||
89 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) | 95 | static inline void iommu_put_dma_cookie(struct iommu_domain *domain) |
90 | { | 96 | { |
91 | } | 97 | } |