aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-14 23:29:49 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-14 23:29:49 -0400
commit37443ef3f0406e855e169c87ae3f4ffb4b6ff635 (patch)
treedd57e6fe91e6058b45660b6e4629249a50bc448e /arch/sh/mm/cache-sh4.c
parent916e97834e023f89b31f796b53cc9c7956e7fe17 (diff)
sh: Migrate SH-4 cacheflush ops to function pointers.
This paves the way for allowing individual CPUs to overload the individual flushing routines that they care about without having to depend on weak aliases. SH-4 is converted over initially, as it wires up pretty much everything. The majority of the other CPUs will simply use the default no-op implementation with their own region flushers wired up. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c87
1 files changed, 46 insertions, 41 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index b5860535e61f..05cb04bc3940 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -26,13 +26,6 @@
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ 26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27#define MAX_ICACHE_PAGES 32 27#define MAX_ICACHE_PAGES 32
28 28
29static void __flush_dcache_segment_1way(unsigned long start,
30 unsigned long extent);
31static void __flush_dcache_segment_2way(unsigned long start,
32 unsigned long extent);
33static void __flush_dcache_segment_4way(unsigned long start,
34 unsigned long extent);
35
36static void __flush_cache_4096(unsigned long addr, unsigned long phys, 29static void __flush_cache_4096(unsigned long addr, unsigned long phys,
37 unsigned long exec_offset); 30 unsigned long exec_offset);
38 31
@@ -45,38 +38,12 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
45 (void (*)(unsigned long, unsigned long))0xdeadbeef; 38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
46 39
47/* 40/*
48 * SH-4 has virtually indexed and physically tagged cache.
49 */
50void __init sh4_cache_init(void)
51{
52 printk("PVR=%08x CVR=%08x PRR=%08x\n",
53 ctrl_inl(CCN_PVR),
54 ctrl_inl(CCN_CVR),
55 ctrl_inl(CCN_PRR));
56
57 switch (boot_cpu_data.dcache.ways) {
58 case 1:
59 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
60 break;
61 case 2:
62 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
63 break;
64 case 4:
65 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
66 break;
67 default:
68 panic("unknown number of cache ways\n");
69 break;
70 }
71}
72
73/*
74 * Write back the range of D-cache, and purge the I-cache. 41 * Write back the range of D-cache, and purge the I-cache.
75 * 42 *
76 * Called from kernel/module.c:sys_init_module and routine for a.out format, 43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
77 * signal handler code and kprobes code 44 * signal handler code and kprobes code
78 */ 45 */
79void flush_icache_range(unsigned long start, unsigned long end) 46static void sh4_flush_icache_range(unsigned long start, unsigned long end)
80{ 47{
81 int icacheaddr; 48 int icacheaddr;
82 unsigned long flags, v; 49 unsigned long flags, v;
@@ -137,7 +104,7 @@ static inline void flush_cache_4096(unsigned long start,
137 * Write back & invalidate the D-cache of the page. 104 * Write back & invalidate the D-cache of the page.
138 * (To avoid "alias" issues) 105 * (To avoid "alias" issues)
139 */ 106 */
140void flush_dcache_page(struct page *page) 107static void sh4_flush_dcache_page(struct page *page)
141{ 108{
142 struct address_space *mapping = page_mapping(page); 109 struct address_space *mapping = page_mapping(page);
143 110
@@ -188,7 +155,7 @@ static inline void flush_dcache_all(void)
188 wmb(); 155 wmb();
189} 156}
190 157
191void flush_cache_all(void) 158static void sh4_flush_cache_all(void)
192{ 159{
193 flush_dcache_all(); 160 flush_dcache_all();
194 flush_icache_all(); 161 flush_icache_all();
@@ -280,7 +247,7 @@ loop_exit:
280 * 247 *
281 * Caller takes mm->mmap_sem. 248 * Caller takes mm->mmap_sem.
282 */ 249 */
283void flush_cache_mm(struct mm_struct *mm) 250static void sh4_flush_cache_mm(struct mm_struct *mm)
284{ 251{
285 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) 252 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
286 return; 253 return;
@@ -320,8 +287,8 @@ void flush_cache_mm(struct mm_struct *mm)
320 * ADDR: Virtual Address (U0 address) 287 * ADDR: Virtual Address (U0 address)
321 * PFN: Physical page number 288 * PFN: Physical page number
322 */ 289 */
323void flush_cache_page(struct vm_area_struct *vma, unsigned long address, 290static void sh4_flush_cache_page(struct vm_area_struct *vma,
324 unsigned long pfn) 291 unsigned long address, unsigned long pfn)
325{ 292{
326 unsigned long phys = pfn << PAGE_SHIFT; 293 unsigned long phys = pfn << PAGE_SHIFT;
327 unsigned int alias_mask; 294 unsigned int alias_mask;
@@ -368,8 +335,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
368 * Flushing the cache lines for U0 only isn't enough. 335 * Flushing the cache lines for U0 only isn't enough.
369 * We need to flush for P1 too, which may contain aliases. 336 * We need to flush for P1 too, which may contain aliases.
370 */ 337 */
371void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 338static void sh4_flush_cache_range(struct vm_area_struct *vma,
372 unsigned long end) 339 unsigned long start, unsigned long end)
373{ 340{
374 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 341 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
375 return; 342 return;
@@ -668,3 +635,41 @@ static void __flush_dcache_segment_4way(unsigned long start,
668 a3 += linesz; 635 a3 += linesz;
669 } while (a0 < a0e); 636 } while (a0 < a0e);
670} 637}
638
639extern void __weak sh4__flush_region_init(void);
640
641/*
642 * SH-4 has virtually indexed and physically tagged cache.
643 */
644void __init sh4_cache_init(void)
645{
646 printk("PVR=%08x CVR=%08x PRR=%08x\n",
647 ctrl_inl(CCN_PVR),
648 ctrl_inl(CCN_CVR),
649 ctrl_inl(CCN_PRR));
650
651 switch (boot_cpu_data.dcache.ways) {
652 case 1:
653 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
654 break;
655 case 2:
656 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
657 break;
658 case 4:
659 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
660 break;
661 default:
662 panic("unknown number of cache ways\n");
663 break;
664 }
665
666 flush_icache_range = sh4_flush_icache_range;
667 flush_dcache_page = sh4_flush_dcache_page;
668 flush_cache_all = sh4_flush_cache_all;
669 flush_cache_mm = sh4_flush_cache_mm;
670 flush_cache_dup_mm = sh4_flush_cache_mm;
671 flush_cache_page = sh4_flush_cache_page;
672 flush_cache_range = sh4_flush_cache_range;
673
674 sh4__flush_region_init();
675}