diff options
author | Hiroshi Doyu <hdoyu@nvidia.com> | 2012-08-28 01:13:04 -0400 |
---|---|---|
committer | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-08-28 15:01:07 -0400 |
commit | 479ed93a4b98eef03fd8260f7ddc00019221c450 (patch) | |
tree | eb4c1d6cb33edd15c4b892e42c2e3f58b227525c /arch | |
parent | 665bad7bb911d392000fa69bc6b599c0df992504 (diff) |
ARM: dma-mapping: IOMMU allocates pages from atomic_pool with GFP_ATOMIC
Make use of the same atomic pool as DMA does, and skip a kernel page
mapping which can involve sleep'able operations at allocating a kernel
page table.
Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 54b158df5afa..051204fc4617 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1161,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | |||
1161 | return NULL; | 1161 | return NULL; |
1162 | } | 1162 | } |
1163 | 1163 | ||
1164 | static void *__iommu_alloc_atomic(struct device *dev, size_t size, | ||
1165 | dma_addr_t *handle) | ||
1166 | { | ||
1167 | struct page *page; | ||
1168 | void *addr; | ||
1169 | |||
1170 | addr = __alloc_from_pool(size, &page); | ||
1171 | if (!addr) | ||
1172 | return NULL; | ||
1173 | |||
1174 | *handle = __iommu_create_mapping(dev, &page, size); | ||
1175 | if (*handle == DMA_ERROR_CODE) | ||
1176 | goto err_mapping; | ||
1177 | |||
1178 | return addr; | ||
1179 | |||
1180 | err_mapping: | ||
1181 | __free_from_pool(addr, size); | ||
1182 | return NULL; | ||
1183 | } | ||
1184 | |||
1185 | static void __iommu_free_atomic(struct device *dev, struct page **pages, | ||
1186 | dma_addr_t handle, size_t size) | ||
1187 | { | ||
1188 | __iommu_remove_mapping(dev, handle, size); | ||
1189 | __free_from_pool(page_address(pages[0]), size); | ||
1190 | } | ||
1191 | |||
1164 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | 1192 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, |
1165 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 1193 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) |
1166 | { | 1194 | { |
@@ -1171,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1171 | *handle = DMA_ERROR_CODE; | 1199 | *handle = DMA_ERROR_CODE; |
1172 | size = PAGE_ALIGN(size); | 1200 | size = PAGE_ALIGN(size); |
1173 | 1201 | ||
1202 | if (gfp & GFP_ATOMIC) | ||
1203 | return __iommu_alloc_atomic(dev, size, handle); | ||
1204 | |||
1174 | pages = __iommu_alloc_buffer(dev, size, gfp); | 1205 | pages = __iommu_alloc_buffer(dev, size, gfp); |
1175 | if (!pages) | 1206 | if (!pages) |
1176 | return NULL; | 1207 | return NULL; |
@@ -1237,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | |||
1237 | return; | 1268 | return; |
1238 | } | 1269 | } |
1239 | 1270 | ||
1271 | if (__in_atomic_pool(cpu_addr, size)) { | ||
1272 | __iommu_free_atomic(dev, pages, handle, size); | ||
1273 | return; | ||
1274 | } | ||
1275 | |||
1240 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { | 1276 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { |
1241 | unmap_kernel_range((unsigned long)cpu_addr, size); | 1277 | unmap_kernel_range((unsigned long)cpu_addr, size); |
1242 | vunmap(cpu_addr); | 1278 | vunmap(cpu_addr); |