aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/cacheflush.h37
-rw-r--r--arch/sh/include/cpu-common/cpu/cacheflush.h37
-rw-r--r--arch/sh/include/cpu-sh4/cpu/cacheflush.h31
-rw-r--r--arch/sh/include/cpu-sh5/cpu/cacheflush.h10
-rw-r--r--arch/sh/mm/cache-sh4.c87
-rw-r--r--arch/sh/mm/cache-sh5.c4
-rw-r--r--arch/sh/mm/cache.c70
-rw-r--r--arch/sh/mm/flush-sh4.c13
-rw-r--r--arch/sh/mm/init.c5
9 files changed, 159 insertions, 135 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index b1cf30f423af..25b7f46494de 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -1,46 +1,11 @@
1#ifndef __ASM_SH_CACHEFLUSH_H 1#ifndef __ASM_SH_CACHEFLUSH_H
2#define __ASM_SH_CACHEFLUSH_H 2#define __ASM_SH_CACHEFLUSH_H
3 3
4#include <linux/mm.h>
5
6#ifdef __KERNEL__ 4#ifdef __KERNEL__
7 5
8#ifdef CONFIG_CACHE_OFF 6#include <linux/mm.h>
9/*
10 * Nothing to do when the cache is disabled, initial flush and explicit
11 * disabling is handled at CPU init time.
12 *
13 * See arch/sh/kernel/cpu/init.c:cache_init().
14 */
15#define flush_cache_all() do { } while (0)
16#define flush_cache_mm(mm) do { } while (0)
17#define flush_cache_dup_mm(mm) do { } while (0)
18#define flush_cache_range(vma, start, end) do { } while (0)
19#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
20#define flush_dcache_page(page) do { } while (0)
21#define flush_icache_range(start, end) do { } while (0)
22#define flush_icache_page(vma,pg) do { } while (0)
23#define flush_cache_sigtramp(vaddr) do { } while (0)
24#define __flush_wback_region(start, size) do { (void)(start); } while (0)
25#define __flush_purge_region(start, size) do { (void)(start); } while (0)
26#define __flush_invalidate_region(start, size) do { (void)(start); } while (0)
27#else
28#include <cpu/cacheflush.h> 7#include <cpu/cacheflush.h>
29 8
30/*
31 * Consistent DMA requires that the __flush_xxx() primitives must be set
32 * for any of the enabled non-coherent caches (most of the UP CPUs),
33 * regardless of PIPT or VIPT cache configurations.
34 */
35
36/* Flush (write-back only) a region (smaller than a page) */
37extern void __flush_wback_region(void *start, int size);
38/* Flush (write-back & invalidate) a region (smaller than a page) */
39extern void __flush_purge_region(void *start, int size);
40/* Flush (invalidate only) a region (smaller than a page) */
41extern void __flush_invalidate_region(void *start, int size);
42#endif
43
44#define ARCH_HAS_FLUSH_ANON_PAGE 9#define ARCH_HAS_FLUSH_ANON_PAGE
45extern void __flush_anon_page(struct page *page, unsigned long); 10extern void __flush_anon_page(struct page *page, unsigned long);
46 11
diff --git a/arch/sh/include/cpu-common/cpu/cacheflush.h b/arch/sh/include/cpu-common/cpu/cacheflush.h
index 5dc3736218e7..8189dbd68f8f 100644
--- a/arch/sh/include/cpu-common/cpu/cacheflush.h
+++ b/arch/sh/include/cpu-common/cpu/cacheflush.h
@@ -1,14 +1,12 @@
1/* 1/*
2 * include/asm-sh/cpu-sh2/cacheflush.h
3 *
4 * Copyright (C) 2003 Paul Mundt 2 * Copyright (C) 2003 Paul Mundt
5 * 3 *
6 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 6 * for more details.
9 */ 7 */
10#ifndef __ASM_CPU_SH2_CACHEFLUSH_H 8#ifndef __ASM_CPU_SH_CACHEFLUSH_H
11#define __ASM_CPU_SH2_CACHEFLUSH_H 9#define __ASM_CPU_SH_CACHEFLUSH_H
12 10
13/* 11/*
14 * Cache flushing: 12 * Cache flushing:
@@ -22,18 +20,23 @@
22 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache 20 * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
23 * - flush_icache_range(start, end) flushes(invalidates) a range for icache 21 * - flush_icache_range(start, end) flushes(invalidates) a range for icache
24 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache 22 * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache
25 * 23 * - flush_cache_sigtramp(vaddr) flushes the signal trampoline
26 * Caches are indexed (effectively) by physical address on SH-2, so
27 * we don't need them.
28 */ 24 */
29#define flush_cache_all() do { } while (0) 25extern void (*flush_cache_all)(void);
30#define flush_cache_mm(mm) do { } while (0) 26extern void (*flush_cache_mm)(struct mm_struct *mm);
31#define flush_cache_dup_mm(mm) do { } while (0) 27extern void (*flush_cache_dup_mm)(struct mm_struct *mm);
32#define flush_cache_range(vma, start, end) do { } while (0) 28extern void (*flush_cache_page)(struct vm_area_struct *vma,
33#define flush_cache_page(vma, vmaddr, pfn) do { } while (0) 29 unsigned long addr, unsigned long pfn);
34#define flush_dcache_page(page) do { } while (0) 30extern void (*flush_cache_range)(struct vm_area_struct *vma,
35#define flush_icache_range(start, end) do { } while (0) 31 unsigned long start, unsigned long end);
36#define flush_icache_page(vma,pg) do { } while (0) 32extern void (*flush_dcache_page)(struct page *page);
37#define flush_cache_sigtramp(vaddr) do { } while (0) 33extern void (*flush_icache_range)(unsigned long start, unsigned long end);
34extern void (*flush_icache_page)(struct vm_area_struct *vma,
35 struct page *page);
36extern void (*flush_cache_sigtramp)(unsigned long address);
37
38extern void (*__flush_wback_region)(void *start, int size);
39extern void (*__flush_purge_region)(void *start, int size);
40extern void (*__flush_invalidate_region)(void *start, int size);
38 41
39#endif /* __ASM_CPU_SH2_CACHEFLUSH_H */ 42#endif /* __ASM_CPU_SH_CACHEFLUSH_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h
deleted file mode 100644
index d6bd396d7dfb..000000000000
--- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * include/asm-sh/cpu-sh4/cacheflush.h
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2003 Paul Mundt
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11#ifndef __ASM_CPU_SH4_CACHEFLUSH_H
12#define __ASM_CPU_SH4_CACHEFLUSH_H
13
14/*
15 * Caches are broken on SH-4 (unless we use write-through
16 * caching; in which case they're only semi-broken),
17 * so we need them.
18 */
19void flush_cache_all(void);
20void flush_cache_mm(struct mm_struct *mm);
21#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
22void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
23 unsigned long end);
24void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
25 unsigned long pfn);
26void flush_dcache_page(struct page *pg);
27void flush_icache_range(unsigned long start, unsigned long end);
28
29#define flush_icache_page(vma,pg) do { } while (0)
30
31#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
diff --git a/arch/sh/include/cpu-sh5/cpu/cacheflush.h b/arch/sh/include/cpu-sh5/cpu/cacheflush.h
index 740d10a316ef..202f637a0e17 100644
--- a/arch/sh/include/cpu-sh5/cpu/cacheflush.h
+++ b/arch/sh/include/cpu-sh5/cpu/cacheflush.h
@@ -3,10 +3,6 @@
3 3
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5 5
6struct vm_area_struct;
7struct page;
8struct mm_struct;
9
10extern void flush_cache_all(void); 6extern void flush_cache_all(void);
11extern void flush_cache_mm(struct mm_struct *mm); 7extern void flush_cache_mm(struct mm_struct *mm);
12extern void flush_cache_sigtramp(unsigned long vaddr); 8extern void flush_cache_sigtramp(unsigned long vaddr);
@@ -16,10 +12,14 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, uns
16extern void flush_dcache_page(struct page *pg); 12extern void flush_dcache_page(struct page *pg);
17extern void flush_icache_range(unsigned long start, unsigned long end); 13extern void flush_icache_range(unsigned long start, unsigned long end);
18 14
15/* XXX .. */
16extern void (*__flush_wback_region)(void *start, int size);
17extern void (*__flush_purge_region)(void *start, int size);
18extern void (*__flush_invalidate_region)(void *start, int size);
19
19#define flush_cache_dup_mm(mm) flush_cache_mm(mm) 20#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
20#define flush_icache_page(vma, page) do { } while (0) 21#define flush_icache_page(vma, page) do { } while (0)
21 22
22#endif /* __ASSEMBLY__ */ 23#endif /* __ASSEMBLY__ */
23 24
24#endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */ 25#endif /* __ASM_SH_CPU_SH5_CACHEFLUSH_H */
25
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index b5860535e61f..05cb04bc3940 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -26,13 +26,6 @@
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ 26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27#define MAX_ICACHE_PAGES 32 27#define MAX_ICACHE_PAGES 32
28 28
29static void __flush_dcache_segment_1way(unsigned long start,
30 unsigned long extent);
31static void __flush_dcache_segment_2way(unsigned long start,
32 unsigned long extent);
33static void __flush_dcache_segment_4way(unsigned long start,
34 unsigned long extent);
35
36static void __flush_cache_4096(unsigned long addr, unsigned long phys, 29static void __flush_cache_4096(unsigned long addr, unsigned long phys,
37 unsigned long exec_offset); 30 unsigned long exec_offset);
38 31
@@ -45,38 +38,12 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
45 (void (*)(unsigned long, unsigned long))0xdeadbeef; 38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
46 39
47/* 40/*
48 * SH-4 has virtually indexed and physically tagged cache.
49 */
50void __init sh4_cache_init(void)
51{
52 printk("PVR=%08x CVR=%08x PRR=%08x\n",
53 ctrl_inl(CCN_PVR),
54 ctrl_inl(CCN_CVR),
55 ctrl_inl(CCN_PRR));
56
57 switch (boot_cpu_data.dcache.ways) {
58 case 1:
59 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
60 break;
61 case 2:
62 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
63 break;
64 case 4:
65 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
66 break;
67 default:
68 panic("unknown number of cache ways\n");
69 break;
70 }
71}
72
73/*
74 * Write back the range of D-cache, and purge the I-cache. 41 * Write back the range of D-cache, and purge the I-cache.
75 * 42 *
76 * Called from kernel/module.c:sys_init_module and routine for a.out format, 43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
77 * signal handler code and kprobes code 44 * signal handler code and kprobes code
78 */ 45 */
79void flush_icache_range(unsigned long start, unsigned long end) 46static void sh4_flush_icache_range(unsigned long start, unsigned long end)
80{ 47{
81 int icacheaddr; 48 int icacheaddr;
82 unsigned long flags, v; 49 unsigned long flags, v;
@@ -137,7 +104,7 @@ static inline void flush_cache_4096(unsigned long start,
137 * Write back & invalidate the D-cache of the page. 104 * Write back & invalidate the D-cache of the page.
138 * (To avoid "alias" issues) 105 * (To avoid "alias" issues)
139 */ 106 */
140void flush_dcache_page(struct page *page) 107static void sh4_flush_dcache_page(struct page *page)
141{ 108{
142 struct address_space *mapping = page_mapping(page); 109 struct address_space *mapping = page_mapping(page);
143 110
@@ -188,7 +155,7 @@ static inline void flush_dcache_all(void)
188 wmb(); 155 wmb();
189} 156}
190 157
191void flush_cache_all(void) 158static void sh4_flush_cache_all(void)
192{ 159{
193 flush_dcache_all(); 160 flush_dcache_all();
194 flush_icache_all(); 161 flush_icache_all();
@@ -280,7 +247,7 @@ loop_exit:
280 * 247 *
281 * Caller takes mm->mmap_sem. 248 * Caller takes mm->mmap_sem.
282 */ 249 */
283void flush_cache_mm(struct mm_struct *mm) 250static void sh4_flush_cache_mm(struct mm_struct *mm)
284{ 251{
285 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) 252 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
286 return; 253 return;
@@ -320,8 +287,8 @@ void flush_cache_mm(struct mm_struct *mm)
320 * ADDR: Virtual Address (U0 address) 287 * ADDR: Virtual Address (U0 address)
321 * PFN: Physical page number 288 * PFN: Physical page number
322 */ 289 */
323void flush_cache_page(struct vm_area_struct *vma, unsigned long address, 290static void sh4_flush_cache_page(struct vm_area_struct *vma,
324 unsigned long pfn) 291 unsigned long address, unsigned long pfn)
325{ 292{
326 unsigned long phys = pfn << PAGE_SHIFT; 293 unsigned long phys = pfn << PAGE_SHIFT;
327 unsigned int alias_mask; 294 unsigned int alias_mask;
@@ -368,8 +335,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
368 * Flushing the cache lines for U0 only isn't enough. 335 * Flushing the cache lines for U0 only isn't enough.
369 * We need to flush for P1 too, which may contain aliases. 336 * We need to flush for P1 too, which may contain aliases.
370 */ 337 */
371void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 338static void sh4_flush_cache_range(struct vm_area_struct *vma,
372 unsigned long end) 339 unsigned long start, unsigned long end)
373{ 340{
374 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 341 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
375 return; 342 return;
@@ -668,3 +635,41 @@ static void __flush_dcache_segment_4way(unsigned long start,
668 a3 += linesz; 635 a3 += linesz;
669 } while (a0 < a0e); 636 } while (a0 < a0e);
670} 637}
638
639extern void __weak sh4__flush_region_init(void);
640
641/*
642 * SH-4 has virtually indexed and physically tagged cache.
643 */
644void __init sh4_cache_init(void)
645{
646 printk("PVR=%08x CVR=%08x PRR=%08x\n",
647 ctrl_inl(CCN_PVR),
648 ctrl_inl(CCN_CVR),
649 ctrl_inl(CCN_PRR));
650
651 switch (boot_cpu_data.dcache.ways) {
652 case 1:
653 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
654 break;
655 case 2:
656 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
657 break;
658 case 4:
659 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
660 break;
661 default:
662 panic("unknown number of cache ways\n");
663 break;
664 }
665
666 flush_icache_range = sh4_flush_icache_range;
667 flush_dcache_page = sh4_flush_dcache_page;
668 flush_cache_all = sh4_flush_cache_all;
669 flush_cache_mm = sh4_flush_cache_mm;
670 flush_cache_dup_mm = sh4_flush_cache_mm;
671 flush_cache_page = sh4_flush_cache_page;
672 flush_cache_range = sh4_flush_cache_range;
673
674 sh4__flush_region_init();
675}
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index a50d23caf015..a8f5142dc2cf 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -20,6 +20,8 @@
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
22 22
23extern void __weak sh4__flush_region_init(void);
24
23/* Wired TLB entry for the D-cache */ 25/* Wired TLB entry for the D-cache */
24static unsigned long long dtlb_cache_slot; 26static unsigned long long dtlb_cache_slot;
25 27
@@ -27,6 +29,8 @@ void __init cpu_cache_init(void)
27{ 29{
28 /* Reserve a slot for dcache colouring in the DTLB */ 30 /* Reserve a slot for dcache colouring in the DTLB */
29 dtlb_cache_slot = sh64_get_wired_dtlb_entry(); 31 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
32
33 sh4__flush_region_init();
30} 34}
31 35
32void __init kmap_coherent_init(void) 36void __init kmap_coherent_init(void)
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index a31e5c46e7a6..da5bc6ac1b28 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -15,6 +15,62 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17 17
18void (*flush_cache_all)(void);
19void (*flush_cache_mm)(struct mm_struct *mm);
20void (*flush_cache_dup_mm)(struct mm_struct *mm);
21void (*flush_cache_page)(struct vm_area_struct *vma,
22 unsigned long addr, unsigned long pfn);
23void (*flush_cache_range)(struct vm_area_struct *vma,
24 unsigned long start, unsigned long end);
25void (*flush_dcache_page)(struct page *page);
26void (*flush_icache_range)(unsigned long start, unsigned long end);
27void (*flush_icache_page)(struct vm_area_struct *vma,
28 struct page *page);
29void (*flush_cache_sigtramp)(unsigned long address);
30void (*__flush_wback_region)(void *start, int size);
31void (*__flush_purge_region)(void *start, int size);
32void (*__flush_invalidate_region)(void *start, int size);
33
34static inline void noop_flush_cache_all(void)
35{
36}
37
38static inline void noop_flush_cache_mm(struct mm_struct *mm)
39{
40}
41
42static inline void noop_flush_cache_page(struct vm_area_struct *vma,
43 unsigned long addr, unsigned long pfn)
44{
45}
46
47static inline void noop_flush_cache_range(struct vm_area_struct *vma,
48 unsigned long start, unsigned long end)
49{
50}
51
52static inline void noop_flush_dcache_page(struct page *page)
53{
54}
55
56static inline void noop_flush_icache_range(unsigned long start,
57 unsigned long end)
58{
59}
60
61static inline void noop_flush_icache_page(struct vm_area_struct *vma,
62 struct page *page)
63{
64}
65
66static inline void noop_flush_cache_sigtramp(unsigned long address)
67{
68}
69
70static inline void noop__flush_region(void *start, int size)
71{
72}
73
18void copy_to_user_page(struct vm_area_struct *vma, struct page *page, 74void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
19 unsigned long vaddr, void *dst, const void *src, 75 unsigned long vaddr, void *dst, const void *src,
20 unsigned long len) 76 unsigned long len)
@@ -174,6 +230,20 @@ void __init cpu_cache_init(void)
174 compute_alias(&boot_cpu_data.dcache); 230 compute_alias(&boot_cpu_data.dcache);
175 compute_alias(&boot_cpu_data.scache); 231 compute_alias(&boot_cpu_data.scache);
176 232
233 flush_cache_all = noop_flush_cache_all;
234 flush_cache_mm = noop_flush_cache_mm;
235 flush_cache_dup_mm = noop_flush_cache_mm;
236 flush_cache_page = noop_flush_cache_page;
237 flush_cache_range = noop_flush_cache_range;
238 flush_dcache_page = noop_flush_dcache_page;
239 flush_icache_range = noop_flush_icache_range;
240 flush_icache_page = noop_flush_icache_page;
241 flush_cache_sigtramp = noop_flush_cache_sigtramp;
242
243 __flush_wback_region = noop__flush_region;
244 __flush_purge_region = noop__flush_region;
245 __flush_invalidate_region = noop__flush_region;
246
177 if ((boot_cpu_data.family == CPU_FAMILY_SH4) || 247 if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
178 (boot_cpu_data.family == CPU_FAMILY_SH4A) || 248 (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
179 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { 249 (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
index 1b6b6a12a99b..99c50dc7551e 100644
--- a/arch/sh/mm/flush-sh4.c
+++ b/arch/sh/mm/flush-sh4.c
@@ -8,7 +8,7 @@
8 * START: Virtual Address (U0, P1, or P3) 8 * START: Virtual Address (U0, P1, or P3)
9 * SIZE: Size of the region. 9 * SIZE: Size of the region.
10 */ 10 */
11void __weak __flush_wback_region(void *start, int size) 11static void sh4__flush_wback_region(void *start, int size)
12{ 12{
13 reg_size_t aligned_start, v, cnt, end; 13 reg_size_t aligned_start, v, cnt, end;
14 14
@@ -51,7 +51,7 @@ void __weak __flush_wback_region(void *start, int size)
51 * START: Virtual Address (U0, P1, or P3) 51 * START: Virtual Address (U0, P1, or P3)
52 * SIZE: Size of the region. 52 * SIZE: Size of the region.
53 */ 53 */
54void __weak __flush_purge_region(void *start, int size) 54static void sh4__flush_purge_region(void *start, int size)
55{ 55{
56 reg_size_t aligned_start, v, cnt, end; 56 reg_size_t aligned_start, v, cnt, end;
57 57
@@ -90,7 +90,7 @@ void __weak __flush_purge_region(void *start, int size)
90/* 90/*
91 * No write back please 91 * No write back please
92 */ 92 */
93void __weak __flush_invalidate_region(void *start, int size) 93static void sh4__flush_invalidate_region(void *start, int size)
94{ 94{
95 reg_size_t aligned_start, v, cnt, end; 95 reg_size_t aligned_start, v, cnt, end;
96 96
@@ -126,3 +126,10 @@ void __weak __flush_invalidate_region(void *start, int size)
126 cnt--; 126 cnt--;
127 } 127 }
128} 128}
129
130void __init sh4__flush_region_init(void)
131{
132 __flush_wback_region = sh4__flush_wback_region;
133 __flush_invalidate_region = sh4__flush_invalidate_region;
134 __flush_purge_region = sh4__flush_purge_region;
135}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index cf0e9c5146b1..0a9b4d855bc9 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -210,6 +210,9 @@ void __init mem_init(void)
210 high_memory = node_high_memory; 210 high_memory = node_high_memory;
211 } 211 }
212 212
213 /* Set this up early, so we can take care of the zero page */
214 cpu_cache_init();
215
213 /* clear the zero-page */ 216 /* clear the zero-page */
214 memset(empty_zero_page, 0, PAGE_SIZE); 217 memset(empty_zero_page, 0, PAGE_SIZE);
215 __flush_wback_region(empty_zero_page, PAGE_SIZE); 218 __flush_wback_region(empty_zero_page, PAGE_SIZE);
@@ -230,8 +233,6 @@ void __init mem_init(void)
230 datasize >> 10, 233 datasize >> 10,
231 initsize >> 10); 234 initsize >> 10);
232 235
233 cpu_cache_init();
234
235 /* Initialize the vDSO */ 236 /* Initialize the vDSO */
236 vsyscall_init(); 237 vsyscall_init();
237} 238}