From cbbe2f68f678a90bebeb30b8a7fcd8aed0614879 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 09:30:39 +0900 Subject: sh: rename pg-mmu.c -> cache.c, enable generically. This builds in the newly created cache.c (renamed from pg-mmu.c) for both MMU and NOMMU configurations. The kmap_coherent() stubs and alias information recorded by each CPU family takes care of doing the right thing while enabling the code to be commonly shared. Signed-off-by: Paul Mundt --- arch/sh/include/asm/cacheflush.h | 2 - arch/sh/mm/Makefile_32 | 16 ++--- arch/sh/mm/cache.c | 129 +++++++++++++++++++++++++++++++++++++++ arch/sh/mm/pg-mmu.c | 129 --------------------------------------- 4 files changed, 137 insertions(+), 139 deletions(-) create mode 100644 arch/sh/mm/cache.c delete mode 100644 arch/sh/mm/pg-mmu.c (limited to 'arch') diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 0e87e87cc01f..4bf621e4146d 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h @@ -45,7 +45,6 @@ extern void __flush_purge_region(void *start, int size); extern void __flush_invalidate_region(void *start, int size); #endif -#ifdef CONFIG_MMU #define ARCH_HAS_FLUSH_ANON_PAGE extern void __flush_anon_page(struct page *page, unsigned long); @@ -55,7 +54,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma, if (boot_cpu_data.dcache.n_aliases && PageAnon(page)) __flush_anon_page(page, vmaddr); } -#endif #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE static inline void flush_kernel_dcache_page(struct page *page) diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32 index 30771b137f48..affcc9a15cea 100644 --- a/arch/sh/mm/Makefile_32 +++ b/arch/sh/mm/Makefile_32 @@ -2,20 +2,20 @@ # Makefile for the Linux SuperH-specific parts of the memory manager. # -obj-y := init.o extable_32.o consistent.o mmap.o +obj-y := cache.o init.o extable_32.o consistent.o mmap.o ifndef CONFIG_CACHE_OFF -cache-$(CONFIG_CPU_SH2) := cache-sh2.o -cache-$(CONFIG_CPU_SH2A) := cache-sh2a.o -cache-$(CONFIG_CPU_SH3) := cache-sh3.o -cache-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o -cache-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o +cacheops-$(CONFIG_CPU_SH2) := cache-sh2.o +cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o +cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o +cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o +cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o endif -obj-y += $(cache-y) +obj-y += $(cacheops-y) mmu-y := tlb-nommu.o pg-nommu.o -mmu-$(CONFIG_MMU) := fault_32.o kmap.o tlbflush_32.o ioremap_32.o pg-mmu.o +mmu-$(CONFIG_MMU) := fault_32.o kmap.o tlbflush_32.o ioremap_32.o obj-y += $(mmu-y) obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c new file mode 100644 index 000000000000..f51d0a4eb3ba --- /dev/null +++ b/arch/sh/mm/cache.c @@ -0,0 +1,129 @@ +/* + * arch/sh/mm/pg-mmu.c + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2002 - 2009 Paul Mundt + * + * Released under the terms of the GNU GPL v2.0. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(vto, src, len); + kunmap_coherent(); + } else { + memcpy(dst, src, len); + if (boot_cpu_data.dcache.n_aliases) + set_bit(PG_dcache_dirty, &page->flags); + } + + if (vma->vm_flags & VM_EXEC) + flush_cache_page(vma, vaddr, page_to_pfn(page)); +} + +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(dst, vfrom, len); + kunmap_coherent(); + } else { + memcpy(dst, src, len); + if (boot_cpu_data.dcache.n_aliases) + set_bit(PG_dcache_dirty, &page->flags); + } +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *vfrom, *vto; + + vto = kmap_atomic(to, KM_USER1); + + if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && + !test_bit(PG_dcache_dirty, &from->flags)) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(); + } else { + vfrom = kmap_atomic(from, KM_USER0); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + } + + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + __flush_wback_region(vto, PAGE_SIZE); + + kunmap_atomic(vto, KM_USER1); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} +EXPORT_SYMBOL(copy_user_highpage); + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + clear_page(kaddr); + + if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) + __flush_wback_region(kaddr, PAGE_SIZE); + + kunmap_atomic(kaddr, KM_USER0); +} +EXPORT_SYMBOL(clear_user_highpage); + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + struct page *page; + unsigned long pfn = pte_pfn(pte); + + if (!boot_cpu_data.dcache.n_aliases) + return; + + page = pfn_to_page(pfn); + if (pfn_valid(pfn) && page_mapping(page)) { + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + if (dirty) { + unsigned long addr = (unsigned long)page_address(page); + + if (pages_do_alias(addr, address & PAGE_MASK)) + __flush_wback_region((void *)addr, PAGE_SIZE); + } + } +} + +void __flush_anon_page(struct page *page, unsigned long vmaddr) +{ + unsigned long addr = (unsigned long) page_address(page); + + if (pages_do_alias(addr, vmaddr)) { + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *kaddr; + + kaddr = kmap_coherent(page, vmaddr); + __flush_wback_region((void *)kaddr, PAGE_SIZE); + kunmap_coherent(); + } else + __flush_wback_region((void *)addr, PAGE_SIZE); + } +} diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c deleted file mode 100644 index f51d0a4eb3ba..000000000000 --- a/arch/sh/mm/pg-mmu.c +++ /dev/null @@ -1,129 +0,0 @@ -/* - * arch/sh/mm/pg-mmu.c - * - * Copyright (C) 1999, 2000, 2002 Niibe Yutaka - * Copyright (C) 2002 - 2009 Paul Mundt - * - * Released under the terms of the GNU GPL v2.0. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -void copy_to_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && - !test_bit(PG_dcache_dirty, &page->flags)) { - void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(vto, src, len); - kunmap_coherent(); - } else { - memcpy(dst, src, len); - if (boot_cpu_data.dcache.n_aliases) - set_bit(PG_dcache_dirty, &page->flags); - } - - if (vma->vm_flags & VM_EXEC) - flush_cache_page(vma, vaddr, page_to_pfn(page)); -} - -void copy_from_user_page(struct vm_area_struct *vma, struct page *page, - unsigned long vaddr, void *dst, const void *src, - unsigned long len) -{ - if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && - !test_bit(PG_dcache_dirty, &page->flags)) { - void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); - memcpy(dst, vfrom, len); - kunmap_coherent(); - } else { - memcpy(dst, src, len); - if (boot_cpu_data.dcache.n_aliases) - set_bit(PG_dcache_dirty, &page->flags); - } -} - -void copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr, struct vm_area_struct *vma) -{ - void *vfrom, *vto; - - vto = kmap_atomic(to, KM_USER1); - - if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && - !test_bit(PG_dcache_dirty, &from->flags)) { - vfrom = kmap_coherent(from, vaddr); - copy_page(vto, vfrom); - kunmap_coherent(); - } else { - vfrom = kmap_atomic(from, KM_USER0); - copy_page(vto, vfrom); - kunmap_atomic(vfrom, KM_USER0); - } - - if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) - __flush_wback_region(vto, PAGE_SIZE); - - kunmap_atomic(vto, KM_USER1); - /* Make sure this page is cleared on other CPU's too before using it */ - smp_wmb(); -} -EXPORT_SYMBOL(copy_user_highpage); - -void clear_user_highpage(struct page *page, unsigned long vaddr) -{ - void *kaddr = kmap_atomic(page, KM_USER0); - - clear_page(kaddr); - - if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) - __flush_wback_region(kaddr, PAGE_SIZE); - - kunmap_atomic(kaddr, KM_USER0); -} -EXPORT_SYMBOL(clear_user_highpage); - -void __update_cache(struct vm_area_struct *vma, - unsigned long address, pte_t pte) -{ - struct page *page; - unsigned long pfn = pte_pfn(pte); - - if (!boot_cpu_data.dcache.n_aliases) - return; - - page = pfn_to_page(pfn); - if (pfn_valid(pfn) && page_mapping(page)) { - int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); - if (dirty) { - unsigned long addr = (unsigned long)page_address(page); - - if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_wback_region((void *)addr, PAGE_SIZE); - } - } -} - -void __flush_anon_page(struct page *page, unsigned long vmaddr) -{ - unsigned long addr = (unsigned long) page_address(page); - - if (pages_do_alias(addr, vmaddr)) { - if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && - !test_bit(PG_dcache_dirty, &page->flags)) { - void *kaddr; - - kaddr = kmap_coherent(page, vmaddr); - __flush_wback_region((void *)kaddr, PAGE_SIZE); - kunmap_coherent(); - } else - __flush_wback_region((void *)addr, PAGE_SIZE); - } -} -- cgit v1.2.2