aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-03 02:43:51 -0400
committerChristoph Hellwig <hch@lst.de>2019-06-03 10:00:08 -0400
commitc30700db9eaabb35e0b123301df35a6846e6b6b4 (patch)
tree96e4e6018c0b6e92289aee5093082c6322ab7abf /kernel
parent67f30ad19c4b329dbe47e1563b2017203bd02e34 (diff)
dma-direct: provide generic support for uncached kernel segments
A few architectures support uncached kernel segments. In that case we get an uncached mapping for a given physica address by using an offset in the uncached segement. Implement support for this scheme in the generic dma-direct code instead of duplicating it in arch hooks. Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/direct.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 0816c1e8b05a..b67f0aa08aa3 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -158,6 +158,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
158 *dma_handle = phys_to_dma(dev, page_to_phys(page)); 158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
159 } 159 }
160 memset(ret, 0, size); 160 memset(ret, 0, size);
161
162 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
163 !dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
164 arch_dma_prep_coherent(page, size);
165 ret = uncached_kernel_address(ret);
166 }
167
161 return ret; 168 return ret;
162} 169}
163 170
@@ -173,13 +180,18 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
173 180
174 if (force_dma_unencrypted()) 181 if (force_dma_unencrypted())
175 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); 182 set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
183
184 if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
185 !dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_NON_CONSISTENT))
186 cpu_addr = cached_kernel_address(cpu_addr);
176 __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr)); 187 __dma_direct_free_pages(dev, size, virt_to_page(cpu_addr));
177} 188}
178 189
179void *dma_direct_alloc(struct device *dev, size_t size, 190void *dma_direct_alloc(struct device *dev, size_t size,
180 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) 191 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
181{ 192{
182 if (!dev_is_dma_coherent(dev)) 193 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
194 !dev_is_dma_coherent(dev))
183 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); 195 return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
184 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); 196 return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
185} 197}
@@ -187,7 +199,8 @@ void *dma_direct_alloc(struct device *dev, size_t size,
187void dma_direct_free(struct device *dev, size_t size, 199void dma_direct_free(struct device *dev, size_t size,
188 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) 200 void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
189{ 201{
190 if (!dev_is_dma_coherent(dev)) 202 if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
203 !dev_is_dma_coherent(dev))
191 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); 204 arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
192 else 205 else
193 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); 206 dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);