aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-07-27 08:30:17 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-07-27 08:30:17 -0400
commit0dfae7d5a21901b28ec0452d71be64adf5ea323e (patch)
tree1ff16641313a76505ec89058d953c92d355af275 /arch
parent221c007b028ebf663ebee4fc90483909547d92a7 (diff)
sh: Use the now generic SH-4 clear/copy page ops for all MMU platforms.
Now that the SH-4 page clear/copy ops are generic, they can be used for all platforms with CONFIG_MMU=y. SH-5 remains the odd one out, but it too will gradually be converted over to using this interface. SH-3 platforms which do not contain aliases will see no impact from this change, while aliasing SH-3 platforms will get the same interface as SH-4. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/cacheflush.h15
-rw-r--r--arch/sh/include/asm/page.h11
-rw-r--r--arch/sh/include/asm/pgtable.h3
-rw-r--r--arch/sh/mm/Makefile_326
-rw-r--r--arch/sh/mm/cache-sh5.c17
-rw-r--r--arch/sh/mm/pg-mmu.c (renamed from arch/sh/mm/pg-sh4.c)19
6 files changed, 38 insertions, 33 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index 4e360114269d..4c85d55847cc 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -49,7 +49,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
49 flush_dcache_page(page); 49 flush_dcache_page(page);
50} 50}
51 51
52#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF)
53extern void copy_to_user_page(struct vm_area_struct *vma, 52extern void copy_to_user_page(struct vm_area_struct *vma,
54 struct page *page, unsigned long vaddr, void *dst, const void *src, 53 struct page *page, unsigned long vaddr, void *dst, const void *src,
55 unsigned long len); 54 unsigned long len);
@@ -57,20 +56,6 @@ extern void copy_to_user_page(struct vm_area_struct *vma,
57extern void copy_from_user_page(struct vm_area_struct *vma, 56extern void copy_from_user_page(struct vm_area_struct *vma,
58 struct page *page, unsigned long vaddr, void *dst, const void *src, 57 struct page *page, unsigned long vaddr, void *dst, const void *src,
59 unsigned long len); 58 unsigned long len);
60#else
61#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
62 do { \
63 flush_cache_page(vma, vaddr, page_to_pfn(page));\
64 memcpy(dst, src, len); \
65 flush_icache_user_range(vma, page, vaddr, len); \
66 } while (0)
67
68#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
69 do { \
70 flush_cache_page(vma, vaddr, page_to_pfn(page));\
71 memcpy(dst, src, len); \
72 } while (0)
73#endif
74 59
75#define flush_cache_vmap(start, end) flush_cache_all() 60#define flush_cache_vmap(start, end) flush_cache_all()
76#define flush_cache_vunmap(start, end) flush_cache_all() 61#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 5208b7bfc24e..847eeabb9083 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -63,22 +63,23 @@ extern void copy_page(void *to, void *from);
63struct page; 63struct page;
64struct vm_area_struct; 64struct vm_area_struct;
65 65
66#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ 66#if defined(CONFIG_CPU_SH5)
67 (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \
68 defined(CONFIG_SH7705_CACHE_32KB))
69extern void clear_user_page(void *to, unsigned long address, struct page *page); 67extern void clear_user_page(void *to, unsigned long address, struct page *page);
70extern void copy_user_page(void *to, void *from, unsigned long address, 68extern void copy_user_page(void *to, void *from, unsigned long address,
71 struct page *page); 69 struct page *page);
72#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) 70
71#elif defined(CONFIG_MMU)
73extern void copy_user_highpage(struct page *to, struct page *from, 72extern void copy_user_highpage(struct page *to, struct page *from,
74 unsigned long vaddr, struct vm_area_struct *vma); 73 unsigned long vaddr, struct vm_area_struct *vma);
75#define __HAVE_ARCH_COPY_USER_HIGHPAGE 74#define __HAVE_ARCH_COPY_USER_HIGHPAGE
76extern void clear_user_highpage(struct page *page, unsigned long vaddr); 75extern void clear_user_highpage(struct page *page, unsigned long vaddr);
77#define clear_user_highpage clear_user_highpage 76#define clear_user_highpage clear_user_highpage
78#endif 77
79#else 78#else
79
80#define clear_user_page(page, vaddr, pg) clear_page(page) 80#define clear_user_page(page, vaddr, pg) clear_page(page)
81#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) 81#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
82
82#endif 83#endif
83 84
84/* 85/*
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index bef3ab7fc09e..ba2333216c5b 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -141,8 +141,7 @@ extern void paging_init(void);
141extern void page_table_range_init(unsigned long start, unsigned long end, 141extern void page_table_range_init(unsigned long start, unsigned long end,
142 pgd_t *pgd); 142 pgd_t *pgd);
143 143
144#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ 144#if defined(CONFIG_MMU) && !defined(CONFIG_CPU_SH5)
145 defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU)
146extern void kmap_coherent_init(void); 145extern void kmap_coherent_init(void);
147#else 146#else
148#define kmap_coherent_init() do { } while (0) 147#define kmap_coherent_init() do { } while (0)
diff --git a/arch/sh/mm/Makefile_32 b/arch/sh/mm/Makefile_32
index 5c04bbb08d36..62e280734dcb 100644
--- a/arch/sh/mm/Makefile_32
+++ b/arch/sh/mm/Makefile_32
@@ -15,7 +15,7 @@ endif
15obj-y += $(cache-y) 15obj-y += $(cache-y)
16 16
17mmu-y := tlb-nommu.o pg-nommu.o 17mmu-y := tlb-nommu.o pg-nommu.o
18mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o 18mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o pg-mmu.o
19 19
20obj-y += $(mmu-y) 20obj-y += $(mmu-y)
21obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o 21obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
@@ -29,10 +29,6 @@ tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
29tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o 29tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o
30tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o 30tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o
31obj-y += $(tlb-y) 31obj-y += $(tlb-y)
32ifndef CONFIG_CACHE_OFF
33obj-$(CONFIG_CPU_SH4) += pg-sh4.o
34obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh4.o
35endif
36endif 32endif
37 33
38obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 34obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 86762092508c..3e2d7321b636 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -831,4 +831,21 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
831 else 831 else
832 sh64_clear_user_page_coloured(to, address); 832 sh64_clear_user_page_coloured(to, address);
833} 833}
834
835void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
836 unsigned long vaddr, void *dst, const void *src,
837 unsigned long len)
838{
839 flush_cache_page(vma, vaddr, page_to_pfn(page));
840 memcpy(dst, src, len);
841 flush_icache_user_range(vma, page, vaddr, len);
842}
843
844void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
845 unsigned long vaddr, void *dst, const void *src,
846 unsigned long len)
847{
848 flush_cache_page(vma, vaddr, page_to_pfn(page));
849 memcpy(dst, src, len);
850}
834#endif 851#endif
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-mmu.c
index 4d93070b8220..356d2cdcb209 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-mmu.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * arch/sh/mm/pg-sh4.c 2 * arch/sh/mm/pg-mmu.c
3 * 3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt 5 * Copyright (C) 2002 - 2009 Paul Mundt
@@ -22,11 +22,13 @@ static pte_t *kmap_coherent_pte;
22 22
23void __init kmap_coherent_init(void) 23void __init kmap_coherent_init(void)
24{ 24{
25#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
25 unsigned long vaddr; 26 unsigned long vaddr;
26 27
27 /* cache the first coherent kmap pte */ 28 /* cache the first coherent kmap pte */
28 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); 29 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
29 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); 30 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
31#endif
30} 32}
31 33
32static inline void *kmap_coherent(struct page *page, unsigned long addr) 34static inline void *kmap_coherent(struct page *page, unsigned long addr)
@@ -62,13 +64,15 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
62 unsigned long vaddr, void *dst, const void *src, 64 unsigned long vaddr, void *dst, const void *src,
63 unsigned long len) 65 unsigned long len)
64{ 66{
65 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { 67 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
68 !test_bit(PG_dcache_dirty, &page->flags)) {
66 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 69 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
67 memcpy(vto, src, len); 70 memcpy(vto, src, len);
68 kunmap_coherent(vto); 71 kunmap_coherent(vto);
69 } else { 72 } else {
70 memcpy(dst, src, len); 73 memcpy(dst, src, len);
71 set_bit(PG_dcache_dirty, &page->flags); 74 if (boot_cpu_data.dcache.n_aliases)
75 set_bit(PG_dcache_dirty, &page->flags);
72 } 76 }
73 77
74 if (vma->vm_flags & VM_EXEC) 78 if (vma->vm_flags & VM_EXEC)
@@ -79,13 +83,15 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
79 unsigned long vaddr, void *dst, const void *src, 83 unsigned long vaddr, void *dst, const void *src,
80 unsigned long len) 84 unsigned long len)
81{ 85{
82 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { 86 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
87 !test_bit(PG_dcache_dirty, &page->flags)) {
83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 88 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 memcpy(dst, vfrom, len); 89 memcpy(dst, vfrom, len);
85 kunmap_coherent(vfrom); 90 kunmap_coherent(vfrom);
86 } else { 91 } else {
87 memcpy(dst, src, len); 92 memcpy(dst, src, len);
88 set_bit(PG_dcache_dirty, &page->flags); 93 if (boot_cpu_data.dcache.n_aliases)
94 set_bit(PG_dcache_dirty, &page->flags);
89 } 95 }
90} 96}
91 97
@@ -96,7 +102,8 @@ void copy_user_highpage(struct page *to, struct page *from,
96 102
97 vto = kmap_atomic(to, KM_USER1); 103 vto = kmap_atomic(to, KM_USER1);
98 104
99 if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) { 105 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
106 !test_bit(PG_dcache_dirty, &from->flags)) {
100 vfrom = kmap_coherent(from, vaddr); 107 vfrom = kmap_coherent(from, vaddr);
101 copy_page(vto, vfrom); 108 copy_page(vto, vfrom);
102 kunmap_coherent(vfrom); 109 kunmap_coherent(vfrom);