aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSimon Baatz <gmbnomis@gmail.com>2013-06-10 16:10:12 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-06-17 05:30:52 -0400
commit1bc39742aab09248169ef9d3727c9def3528b3f3 (patch)
tree50179be222c15d5e06f77928331d63843f6fc738 /arch
parent049be07053ebbf0ee8543caea23ae7bdf0765bb2 (diff)
ARM: 7755/1: handle user space mapped pages in flush_kernel_dcache_page
Commit f8b63c1 made flush_kernel_dcache_page a no-op assuming that the pages it needs to handle are kernel mapped only. However, for example when doing direct I/O, pages with user space mappings may occur. Thus, continue to do lazy flushing if there are no user space mappings. Otherwise, flush the kernel cache lines directly. Signed-off-by: Simon Baatz <gmbnomis@gmail.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Cc: <stable@vger.kernel.org> # 3.2+ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/cacheflush.h4
-rw-r--r--arch/arm/mm/flush.c33
2 files changed, 34 insertions, 3 deletions
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index bff71388e72a..17d0ae8672fa 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
320} 320}
321 321
322#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 322#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
323static inline void flush_kernel_dcache_page(struct page *page) 323extern void flush_kernel_dcache_page(struct page *);
324{
325}
326 324
327#define flush_dcache_mmap_lock(mapping) \ 325#define flush_dcache_mmap_lock(mapping) \
328 spin_lock_irq(&(mapping)->tree_lock) 326 spin_lock_irq(&(mapping)->tree_lock)
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 0d473cce501c..32aa5861119f 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -301,6 +301,39 @@ void flush_dcache_page(struct page *page)
301EXPORT_SYMBOL(flush_dcache_page); 301EXPORT_SYMBOL(flush_dcache_page);
302 302
303/* 303/*
304 * Ensure cache coherency for the kernel mapping of this page. We can
305 * assume that the page is pinned via kmap.
306 *
307 * If the page only exists in the page cache and there are no user
308 * space mappings, this is a no-op since the page was already marked
309 * dirty at creation. Otherwise, we need to flush the dirty kernel
310 * cache lines directly.
311 */
312void flush_kernel_dcache_page(struct page *page)
313{
314 if (cache_is_vivt() || cache_is_vipt_aliasing()) {
315 struct address_space *mapping;
316
317 mapping = page_mapping(page);
318
319 if (!mapping || mapping_mapped(mapping)) {
320 void *addr;
321
322 addr = page_address(page);
323 /*
324 * kmap_atomic() doesn't set the page virtual
325 * address for highmem pages, and
326 * kunmap_atomic() takes care of cache
327 * flushing already.
328 */
329 if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
330 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
331 }
332 }
333}
334EXPORT_SYMBOL(flush_kernel_dcache_page);
335
336/*
304 * Flush an anonymous page so that users of get_user_pages() 337 * Flush an anonymous page so that users of get_user_pages()
305 * can safely access the data. The expected sequence is: 338 * can safely access the data. The expected sequence is:
306 * 339 *