diff options
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 87 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 70 | ||||
-rw-r--r-- | arch/sh/mm/flush-sh4.c | 13 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 5 |
5 files changed, 133 insertions, 46 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b5860535e61f..05cb04bc3940 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -26,13 +26,6 @@ | |||
26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | 26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ |
27 | #define MAX_ICACHE_PAGES 32 | 27 | #define MAX_ICACHE_PAGES 32 |
28 | 28 | ||
29 | static void __flush_dcache_segment_1way(unsigned long start, | ||
30 | unsigned long extent); | ||
31 | static void __flush_dcache_segment_2way(unsigned long start, | ||
32 | unsigned long extent); | ||
33 | static void __flush_dcache_segment_4way(unsigned long start, | ||
34 | unsigned long extent); | ||
35 | |||
36 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | 29 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, |
37 | unsigned long exec_offset); | 30 | unsigned long exec_offset); |
38 | 31 | ||
@@ -45,38 +38,12 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | |||
45 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | 38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; |
46 | 39 | ||
47 | /* | 40 | /* |
48 | * SH-4 has virtually indexed and physically tagged cache. | ||
49 | */ | ||
50 | void __init sh4_cache_init(void) | ||
51 | { | ||
52 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
53 | ctrl_inl(CCN_PVR), | ||
54 | ctrl_inl(CCN_CVR), | ||
55 | ctrl_inl(CCN_PRR)); | ||
56 | |||
57 | switch (boot_cpu_data.dcache.ways) { | ||
58 | case 1: | ||
59 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
60 | break; | ||
61 | case 2: | ||
62 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
63 | break; | ||
64 | case 4: | ||
65 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
66 | break; | ||
67 | default: | ||
68 | panic("unknown number of cache ways\n"); | ||
69 | break; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Write back the range of D-cache, and purge the I-cache. | 41 | * Write back the range of D-cache, and purge the I-cache. |
75 | * | 42 | * |
76 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
77 | * signal handler code and kprobes code | 44 | * signal handler code and kprobes code |
78 | */ | 45 | */ |
79 | void flush_icache_range(unsigned long start, unsigned long end) | 46 | static void sh4_flush_icache_range(unsigned long start, unsigned long end) |
80 | { | 47 | { |
81 | int icacheaddr; | 48 | int icacheaddr; |
82 | unsigned long flags, v; | 49 | unsigned long flags, v; |
@@ -137,7 +104,7 @@ static inline void flush_cache_4096(unsigned long start, | |||
137 | * Write back & invalidate the D-cache of the page. | 104 | * Write back & invalidate the D-cache of the page. |
138 | * (To avoid "alias" issues) | 105 | * (To avoid "alias" issues) |
139 | */ | 106 | */ |
140 | void flush_dcache_page(struct page *page) | 107 | static void sh4_flush_dcache_page(struct page *page) |
141 | { | 108 | { |
142 | struct address_space *mapping = page_mapping(page); | 109 | struct address_space *mapping = page_mapping(page); |
143 | 110 | ||
@@ -188,7 +155,7 @@ static inline void flush_dcache_all(void) | |||
188 | wmb(); | 155 | wmb(); |
189 | } | 156 | } |
190 | 157 | ||
191 | void flush_cache_all(void) | 158 | static void sh4_flush_cache_all(void) |
192 | { | 159 | { |
193 | flush_dcache_all(); | 160 | flush_dcache_all(); |
194 | flush_icache_all(); | 161 | flush_icache_all(); |
@@ -280,7 +247,7 @@ loop_exit: | |||
280 | * | 247 | * |
281 | * Caller takes mm->mmap_sem. | 248 | * Caller takes mm->mmap_sem. |
282 | */ | 249 | */ |
283 | void flush_cache_mm(struct mm_struct *mm) | 250 | static void sh4_flush_cache_mm(struct mm_struct *mm) |
284 | { | 251 | { |
285 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | 252 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) |
286 | return; | 253 | return; |
@@ -320,8 +287,8 @@ void flush_cache_mm(struct mm_struct *mm) | |||
320 | * ADDR: Virtual Address (U0 address) | 287 | * ADDR: Virtual Address (U0 address) |
321 | * PFN: Physical page number | 288 | * PFN: Physical page number |
322 | */ | 289 | */ |
323 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | 290 | static void sh4_flush_cache_page(struct vm_area_struct *vma, |
324 | unsigned long pfn) | 291 | unsigned long address, unsigned long pfn) |
325 | { | 292 | { |
326 | unsigned long phys = pfn << PAGE_SHIFT; | 293 | unsigned long phys = pfn << PAGE_SHIFT; |
327 | unsigned int alias_mask; | 294 | unsigned int alias_mask; |
@@ -368,8 +335,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
368 | * Flushing the cache lines for U0 only isn't enough. | 335 | * Flushing the cache lines for U0 only isn't enough. |
369 | * We need to flush for P1 too, which may contain aliases. | 336 | * We need to flush for P1 too, which may contain aliases. |
370 | */ | 337 | */ |
371 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 338 | static void sh4_flush_cache_range(struct vm_area_struct *vma, |
372 | unsigned long end) | 339 | unsigned long start, unsigned long end) |
373 | { | 340 | { |
374 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 341 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) |
375 | return; | 342 | return; |
@@ -668,3 +635,41 @@ static void __flush_dcache_segment_4way(unsigned long start, | |||
668 | a3 += linesz; | 635 | a3 += linesz; |
669 | } while (a0 < a0e); | 636 | } while (a0 < a0e); |
670 | } | 637 | } |
638 | |||
639 | extern void __weak sh4__flush_region_init(void); | ||
640 | |||
641 | /* | ||
642 | * SH-4 has virtually indexed and physically tagged cache. | ||
643 | */ | ||
644 | void __init sh4_cache_init(void) | ||
645 | { | ||
646 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
647 | ctrl_inl(CCN_PVR), | ||
648 | ctrl_inl(CCN_CVR), | ||
649 | ctrl_inl(CCN_PRR)); | ||
650 | |||
651 | switch (boot_cpu_data.dcache.ways) { | ||
652 | case 1: | ||
653 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
654 | break; | ||
655 | case 2: | ||
656 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
657 | break; | ||
658 | case 4: | ||
659 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
660 | break; | ||
661 | default: | ||
662 | panic("unknown number of cache ways\n"); | ||
663 | break; | ||
664 | } | ||
665 | |||
666 | flush_icache_range = sh4_flush_icache_range; | ||
667 | flush_dcache_page = sh4_flush_dcache_page; | ||
668 | flush_cache_all = sh4_flush_cache_all; | ||
669 | flush_cache_mm = sh4_flush_cache_mm; | ||
670 | flush_cache_dup_mm = sh4_flush_cache_mm; | ||
671 | flush_cache_page = sh4_flush_cache_page; | ||
672 | flush_cache_range = sh4_flush_cache_range; | ||
673 | |||
674 | sh4__flush_region_init(); | ||
675 | } | ||
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index a50d23caf015..a8f5142dc2cf 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -20,6 +20,8 @@ | |||
20 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
21 | #include <asm/mmu_context.h> | 21 | #include <asm/mmu_context.h> |
22 | 22 | ||
23 | extern void __weak sh4__flush_region_init(void); | ||
24 | |||
23 | /* Wired TLB entry for the D-cache */ | 25 | /* Wired TLB entry for the D-cache */ |
24 | static unsigned long long dtlb_cache_slot; | 26 | static unsigned long long dtlb_cache_slot; |
25 | 27 | ||
@@ -27,6 +29,8 @@ void __init cpu_cache_init(void) | |||
27 | { | 29 | { |
28 | /* Reserve a slot for dcache colouring in the DTLB */ | 30 | /* Reserve a slot for dcache colouring in the DTLB */ |
29 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); | 31 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); |
32 | |||
33 | sh4__flush_region_init(); | ||
30 | } | 34 | } |
31 | 35 | ||
32 | void __init kmap_coherent_init(void) | 36 | void __init kmap_coherent_init(void) |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index a31e5c46e7a6..da5bc6ac1b28 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -15,6 +15,62 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | 17 | ||
18 | void (*flush_cache_all)(void); | ||
19 | void (*flush_cache_mm)(struct mm_struct *mm); | ||
20 | void (*flush_cache_dup_mm)(struct mm_struct *mm); | ||
21 | void (*flush_cache_page)(struct vm_area_struct *vma, | ||
22 | unsigned long addr, unsigned long pfn); | ||
23 | void (*flush_cache_range)(struct vm_area_struct *vma, | ||
24 | unsigned long start, unsigned long end); | ||
25 | void (*flush_dcache_page)(struct page *page); | ||
26 | void (*flush_icache_range)(unsigned long start, unsigned long end); | ||
27 | void (*flush_icache_page)(struct vm_area_struct *vma, | ||
28 | struct page *page); | ||
29 | void (*flush_cache_sigtramp)(unsigned long address); | ||
30 | void (*__flush_wback_region)(void *start, int size); | ||
31 | void (*__flush_purge_region)(void *start, int size); | ||
32 | void (*__flush_invalidate_region)(void *start, int size); | ||
33 | |||
34 | static inline void noop_flush_cache_all(void) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | static inline void noop_flush_cache_mm(struct mm_struct *mm) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | static inline void noop_flush_cache_page(struct vm_area_struct *vma, | ||
43 | unsigned long addr, unsigned long pfn) | ||
44 | { | ||
45 | } | ||
46 | |||
47 | static inline void noop_flush_cache_range(struct vm_area_struct *vma, | ||
48 | unsigned long start, unsigned long end) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | static inline void noop_flush_dcache_page(struct page *page) | ||
53 | { | ||
54 | } | ||
55 | |||
56 | static inline void noop_flush_icache_range(unsigned long start, | ||
57 | unsigned long end) | ||
58 | { | ||
59 | } | ||
60 | |||
61 | static inline void noop_flush_icache_page(struct vm_area_struct *vma, | ||
62 | struct page *page) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | static inline void noop_flush_cache_sigtramp(unsigned long address) | ||
67 | { | ||
68 | } | ||
69 | |||
70 | static inline void noop__flush_region(void *start, int size) | ||
71 | { | ||
72 | } | ||
73 | |||
18 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 74 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
19 | unsigned long vaddr, void *dst, const void *src, | 75 | unsigned long vaddr, void *dst, const void *src, |
20 | unsigned long len) | 76 | unsigned long len) |
@@ -174,6 +230,20 @@ void __init cpu_cache_init(void) | |||
174 | compute_alias(&boot_cpu_data.dcache); | 230 | compute_alias(&boot_cpu_data.dcache); |
175 | compute_alias(&boot_cpu_data.scache); | 231 | compute_alias(&boot_cpu_data.scache); |
176 | 232 | ||
233 | flush_cache_all = noop_flush_cache_all; | ||
234 | flush_cache_mm = noop_flush_cache_mm; | ||
235 | flush_cache_dup_mm = noop_flush_cache_mm; | ||
236 | flush_cache_page = noop_flush_cache_page; | ||
237 | flush_cache_range = noop_flush_cache_range; | ||
238 | flush_dcache_page = noop_flush_dcache_page; | ||
239 | flush_icache_range = noop_flush_icache_range; | ||
240 | flush_icache_page = noop_flush_icache_page; | ||
241 | flush_cache_sigtramp = noop_flush_cache_sigtramp; | ||
242 | |||
243 | __flush_wback_region = noop__flush_region; | ||
244 | __flush_purge_region = noop__flush_region; | ||
245 | __flush_invalidate_region = noop__flush_region; | ||
246 | |||
177 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || | 247 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || |
178 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || | 248 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || |
179 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { | 249 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { |
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c index 1b6b6a12a99b..99c50dc7551e 100644 --- a/arch/sh/mm/flush-sh4.c +++ b/arch/sh/mm/flush-sh4.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * START: Virtual Address (U0, P1, or P3) | 8 | * START: Virtual Address (U0, P1, or P3) |
9 | * SIZE: Size of the region. | 9 | * SIZE: Size of the region. |
10 | */ | 10 | */ |
11 | void __weak __flush_wback_region(void *start, int size) | 11 | static void sh4__flush_wback_region(void *start, int size) |
12 | { | 12 | { |
13 | reg_size_t aligned_start, v, cnt, end; | 13 | reg_size_t aligned_start, v, cnt, end; |
14 | 14 | ||
@@ -51,7 +51,7 @@ void __weak __flush_wback_region(void *start, int size) | |||
51 | * START: Virtual Address (U0, P1, or P3) | 51 | * START: Virtual Address (U0, P1, or P3) |
52 | * SIZE: Size of the region. | 52 | * SIZE: Size of the region. |
53 | */ | 53 | */ |
54 | void __weak __flush_purge_region(void *start, int size) | 54 | static void sh4__flush_purge_region(void *start, int size) |
55 | { | 55 | { |
56 | reg_size_t aligned_start, v, cnt, end; | 56 | reg_size_t aligned_start, v, cnt, end; |
57 | 57 | ||
@@ -90,7 +90,7 @@ void __weak __flush_purge_region(void *start, int size) | |||
90 | /* | 90 | /* |
91 | * No write back please | 91 | * No write back please |
92 | */ | 92 | */ |
93 | void __weak __flush_invalidate_region(void *start, int size) | 93 | static void sh4__flush_invalidate_region(void *start, int size) |
94 | { | 94 | { |
95 | reg_size_t aligned_start, v, cnt, end; | 95 | reg_size_t aligned_start, v, cnt, end; |
96 | 96 | ||
@@ -126,3 +126,10 @@ void __weak __flush_invalidate_region(void *start, int size) | |||
126 | cnt--; | 126 | cnt--; |
127 | } | 127 | } |
128 | } | 128 | } |
129 | |||
130 | void __init sh4__flush_region_init(void) | ||
131 | { | ||
132 | __flush_wback_region = sh4__flush_wback_region; | ||
133 | __flush_invalidate_region = sh4__flush_invalidate_region; | ||
134 | __flush_purge_region = sh4__flush_purge_region; | ||
135 | } | ||
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index cf0e9c5146b1..0a9b4d855bc9 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -210,6 +210,9 @@ void __init mem_init(void) | |||
210 | high_memory = node_high_memory; | 210 | high_memory = node_high_memory; |
211 | } | 211 | } |
212 | 212 | ||
213 | /* Set this up early, so we can take care of the zero page */ | ||
214 | cpu_cache_init(); | ||
215 | |||
213 | /* clear the zero-page */ | 216 | /* clear the zero-page */ |
214 | memset(empty_zero_page, 0, PAGE_SIZE); | 217 | memset(empty_zero_page, 0, PAGE_SIZE); |
215 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | 218 | __flush_wback_region(empty_zero_page, PAGE_SIZE); |
@@ -230,8 +233,6 @@ void __init mem_init(void) | |||
230 | datasize >> 10, | 233 | datasize >> 10, |
231 | initsize >> 10); | 234 | initsize >> 10); |
232 | 235 | ||
233 | cpu_cache_init(); | ||
234 | |||
235 | /* Initialize the vDSO */ | 236 | /* Initialize the vDSO */ |
236 | vsyscall_init(); | 237 | vsyscall_init(); |
237 | } | 238 | } |