aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-14 20:49:32 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-14 20:49:32 -0400
commitdde5e3ffb770ef2854bbc32c51a365e932919e19 (patch)
tree1b7936b8068f3532892b30a526d23b79bbe401f5 /arch
parentcbbe2f68f678a90bebeb30b8a7fcd8aed0614879 (diff)
sh: rework nommu for generic cache.c use.
This does a bit of reorganizing for allowing nommu to use the new and generic cache.c, no functional changes. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/cacheflush.h2
-rw-r--r--arch/sh/include/asm/page.h7
-rw-r--r--arch/sh/include/cpu-sh3/cpu/cacheflush.h2
-rw-r--r--arch/sh/include/cpu-sh4/cpu/cacheflush.h2
-rw-r--r--arch/sh/kernel/cpu/init.c2
-rw-r--r--arch/sh/mm/mmap.c2
-rw-r--r--arch/sh/mm/tlb-nommu.c5
7 files changed, 4 insertions, 18 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index 4bf621e4146d..9ec13fb909dd 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -76,5 +76,7 @@ void kmap_coherent_init(void);
76void *kmap_coherent(struct page *page, unsigned long addr); 76void *kmap_coherent(struct page *page, unsigned long addr);
77void kunmap_coherent(void); 77void kunmap_coherent(void);
78 78
79#define PG_dcache_dirty PG_arch_1
80
79#endif /* __KERNEL__ */ 81#endif /* __KERNEL__ */
80#endif /* __ASM_SH_CACHEFLUSH_H */ 82#endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 847eeabb9083..a316eeb50b39 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -68,18 +68,13 @@ extern void clear_user_page(void *to, unsigned long address, struct page *page);
68extern void copy_user_page(void *to, void *from, unsigned long address, 68extern void copy_user_page(void *to, void *from, unsigned long address,
69 struct page *page); 69 struct page *page);
70 70
71#elif defined(CONFIG_MMU) 71#else
72extern void copy_user_highpage(struct page *to, struct page *from, 72extern void copy_user_highpage(struct page *to, struct page *from,
73 unsigned long vaddr, struct vm_area_struct *vma); 73 unsigned long vaddr, struct vm_area_struct *vma);
74#define __HAVE_ARCH_COPY_USER_HIGHPAGE 74#define __HAVE_ARCH_COPY_USER_HIGHPAGE
75extern void clear_user_highpage(struct page *page, unsigned long vaddr); 75extern void clear_user_highpage(struct page *page, unsigned long vaddr);
76#define clear_user_highpage clear_user_highpage 76#define clear_user_highpage clear_user_highpage
77 77
78#else
79
80#define clear_user_page(page, vaddr, pg) clear_page(page)
81#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
82
83#endif 78#endif
84 79
85/* 80/*
diff --git a/arch/sh/include/cpu-sh3/cpu/cacheflush.h b/arch/sh/include/cpu-sh3/cpu/cacheflush.h
index 6485ad5649ad..3b5f3df4e1c8 100644
--- a/arch/sh/include/cpu-sh3/cpu/cacheflush.h
+++ b/arch/sh/include/cpu-sh3/cpu/cacheflush.h
@@ -15,8 +15,6 @@
15 * SH4. Unlike the SH4 this is a unified cache so we need to do some work 15 * SH4. Unlike the SH4 this is a unified cache so we need to do some work
16 * in mmap when 'exec'ing a new binary 16 * in mmap when 'exec'ing a new binary
17 */ 17 */
18#define PG_dcache_dirty PG_arch_1
19
20void flush_cache_all(void); 18void flush_cache_all(void);
21void flush_cache_mm(struct mm_struct *mm); 19void flush_cache_mm(struct mm_struct *mm);
22#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 20#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h
index 3564f1722195..76764f0fb88a 100644
--- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h
+++ b/arch/sh/include/cpu-sh4/cpu/cacheflush.h
@@ -38,6 +38,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
38/* Initialization of P3 area for copy_user_page */ 38/* Initialization of P3 area for copy_user_page */
39void p3_cache_init(void); 39void p3_cache_init(void);
40 40
41#define PG_dcache_dirty PG_arch_1
42
43#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ 41#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ad85421099cd..c832fa4cf8ed 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -268,11 +268,9 @@ asmlinkage void __init sh_cpu_init(void)
268 cache_init(); 268 cache_init();
269 269
270 if (raw_smp_processor_id() == 0) { 270 if (raw_smp_processor_id() == 0) {
271#ifdef CONFIG_MMU
272 shm_align_mask = max_t(unsigned long, 271 shm_align_mask = max_t(unsigned long,
273 current_cpu_data.dcache.way_size - 1, 272 current_cpu_data.dcache.way_size - 1,
274 PAGE_SIZE - 1); 273 PAGE_SIZE - 1);
275#endif
276 274
277 /* Boot CPU sets the cache shape */ 275 /* Boot CPU sets the cache shape */
278 detect_cache_shape(); 276 detect_cache_shape();
diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
index 1b5fdfb4e0c2..d2984fa42d3d 100644
--- a/arch/sh/mm/mmap.c
+++ b/arch/sh/mm/mmap.c
@@ -14,10 +14,10 @@
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/processor.h> 15#include <asm/processor.h>
16 16
17#ifdef CONFIG_MMU
18unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 17unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19EXPORT_SYMBOL(shm_align_mask); 18EXPORT_SYMBOL(shm_align_mask);
20 19
20#ifdef CONFIG_MMU
21/* 21/*
22 * To avoid cache aliases, we map the shared page with same color. 22 * To avoid cache aliases, we map the shared page with same color.
23 */ 23 */
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
index c49e9d24c2e6..0710ebb99b9a 100644
--- a/arch/sh/mm/tlb-nommu.c
+++ b/arch/sh/mm/tlb-nommu.c
@@ -50,11 +50,6 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
50{ 50{
51} 51}
52 52
53void __update_cache(struct vm_area_struct *vma,
54 unsigned long address, pte_t pte)
55{
56}
57
58void __init kmap_coherent_init(void) 53void __init kmap_coherent_init(void)
59{ 54{
60} 55}