aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c362
1 files changed, 167 insertions, 195 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 5cfe08dbb59e..b2453bbef4cd 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -14,6 +14,7 @@
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/fs.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19 20
@@ -25,13 +26,6 @@
25#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ 26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
26#define MAX_ICACHE_PAGES 32 27#define MAX_ICACHE_PAGES 32
27 28
28static void __flush_dcache_segment_1way(unsigned long start,
29 unsigned long extent);
30static void __flush_dcache_segment_2way(unsigned long start,
31 unsigned long extent);
32static void __flush_dcache_segment_4way(unsigned long start,
33 unsigned long extent);
34
35static void __flush_cache_4096(unsigned long addr, unsigned long phys, 29static void __flush_cache_4096(unsigned long addr, unsigned long phys,
36 unsigned long exec_offset); 30 unsigned long exec_offset);
37 31
@@ -43,182 +37,56 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
43static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = 37static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
44 (void (*)(unsigned long, unsigned long))0xdeadbeef; 38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
45 39
46static void compute_alias(struct cache_info *c) 40/*
41 * Write back the range of D-cache, and purge the I-cache.
42 *
43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
44 * signal handler code and kprobes code
45 */
46static void sh4_flush_icache_range(void *args)
47{ 47{
48 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); 48 struct flusher_data *data = args;
49 c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; 49 unsigned long start, end;
50} 50 unsigned long flags, v;
51 int i;
51 52
52static void __init emit_cache_params(void) 53 start = data->addr1;
53{ 54 end = data->addr2;
54 printk("PVR=%08x CVR=%08x PRR=%08x\n",
55 ctrl_inl(CCN_PVR),
56 ctrl_inl(CCN_CVR),
57 ctrl_inl(CCN_PRR));
58 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
59 boot_cpu_data.icache.ways,
60 boot_cpu_data.icache.sets,
61 boot_cpu_data.icache.way_incr);
62 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
63 boot_cpu_data.icache.entry_mask,
64 boot_cpu_data.icache.alias_mask,
65 boot_cpu_data.icache.n_aliases);
66 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
67 boot_cpu_data.dcache.ways,
68 boot_cpu_data.dcache.sets,
69 boot_cpu_data.dcache.way_incr);
70 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
71 boot_cpu_data.dcache.entry_mask,
72 boot_cpu_data.dcache.alias_mask,
73 boot_cpu_data.dcache.n_aliases);
74 55
75 /* 56 /* If there are too many pages then just blow away the caches */
76 * Emit Secondary Cache parameters if the CPU has a probed L2. 57 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
77 */ 58 local_flush_cache_all(NULL);
78 if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { 59 return;
79 printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
80 boot_cpu_data.scache.ways,
81 boot_cpu_data.scache.sets,
82 boot_cpu_data.scache.way_incr);
83 printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
84 boot_cpu_data.scache.entry_mask,
85 boot_cpu_data.scache.alias_mask,
86 boot_cpu_data.scache.n_aliases);
87 } 60 }
88 61
89 if (!__flush_dcache_segment_fn) 62 /*
90 panic("unknown number of cache ways\n"); 63 * Selectively flush d-cache then invalidate the i-cache.
91} 64 * This is inefficient, so only use this for small ranges.
65 */
66 start &= ~(L1_CACHE_BYTES-1);
67 end += L1_CACHE_BYTES-1;
68 end &= ~(L1_CACHE_BYTES-1);
92 69
93/* 70 local_irq_save(flags);
94 * SH-4 has virtually indexed and physically tagged cache. 71 jump_to_uncached();
95 */
96void __init p3_cache_init(void)
97{
98 compute_alias(&boot_cpu_data.icache);
99 compute_alias(&boot_cpu_data.dcache);
100 compute_alias(&boot_cpu_data.scache);
101
102 switch (boot_cpu_data.dcache.ways) {
103 case 1:
104 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
105 break;
106 case 2:
107 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
108 break;
109 case 4:
110 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
111 break;
112 default:
113 __flush_dcache_segment_fn = NULL;
114 break;
115 }
116 72
117 emit_cache_params(); 73 for (v = start; v < end; v += L1_CACHE_BYTES) {
118} 74 unsigned long icacheaddr;
119 75
120/* 76 __ocbwb(v);
121 * Write back the dirty D-caches, but not invalidate them.
122 *
123 * START: Virtual Address (U0, P1, or P3)
124 * SIZE: Size of the region.
125 */
126void __flush_wback_region(void *start, int size)
127{
128 unsigned long v;
129 unsigned long begin, end;
130
131 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
132 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
133 & ~(L1_CACHE_BYTES-1);
134 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
135 asm volatile("ocbwb %0"
136 : /* no output */
137 : "m" (__m(v)));
138 }
139}
140 77
141/* 78 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
142 * Write back the dirty D-caches and invalidate them. 79 cpu_data->icache.entry_mask);
143 *
144 * START: Virtual Address (U0, P1, or P3)
145 * SIZE: Size of the region.
146 */
147void __flush_purge_region(void *start, int size)
148{
149 unsigned long v;
150 unsigned long begin, end;
151
152 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
153 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
154 & ~(L1_CACHE_BYTES-1);
155 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
156 asm volatile("ocbp %0"
157 : /* no output */
158 : "m" (__m(v)));
159 }
160}
161 80
162/* 81 /* Clear i-cache line valid-bit */
163 * No write back please 82 for (i = 0; i < cpu_data->icache.ways; i++) {
164 */ 83 __raw_writel(0, icacheaddr);
165void __flush_invalidate_region(void *start, int size) 84 icacheaddr += cpu_data->icache.way_incr;
166{ 85 }
167 unsigned long v;
168 unsigned long begin, end;
169
170 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
171 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
172 & ~(L1_CACHE_BYTES-1);
173 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
174 asm volatile("ocbi %0"
175 : /* no output */
176 : "m" (__m(v)));
177 } 86 }
178}
179
180/*
181 * Write back the range of D-cache, and purge the I-cache.
182 *
183 * Called from kernel/module.c:sys_init_module and routine for a.out format,
184 * signal handler code and kprobes code
185 */
186void flush_icache_range(unsigned long start, unsigned long end)
187{
188 int icacheaddr;
189 unsigned long flags, v;
190 int i;
191 87
192 /* If there are too many pages then just blow the caches */ 88 back_to_cached();
193 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { 89 local_irq_restore(flags);
194 flush_cache_all();
195 } else {
196 /* selectively flush d-cache then invalidate the i-cache */
197 /* this is inefficient, so only use for small ranges */
198 start &= ~(L1_CACHE_BYTES-1);
199 end += L1_CACHE_BYTES-1;
200 end &= ~(L1_CACHE_BYTES-1);
201
202 local_irq_save(flags);
203 jump_to_uncached();
204
205 for (v = start; v < end; v+=L1_CACHE_BYTES) {
206 asm volatile("ocbwb %0"
207 : /* no output */
208 : "m" (__m(v)));
209
210 icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
211 v & cpu_data->icache.entry_mask);
212
213 for (i = 0; i < cpu_data->icache.ways;
214 i++, icacheaddr += cpu_data->icache.way_incr)
215 /* Clear i-cache line valid-bit */
216 ctrl_outl(0, icacheaddr);
217 }
218
219 back_to_cached();
220 local_irq_restore(flags);
221 }
222} 90}
223 91
224static inline void flush_cache_4096(unsigned long start, 92static inline void flush_cache_4096(unsigned long start,
@@ -244,9 +112,17 @@ static inline void flush_cache_4096(unsigned long start,
244 * Write back & invalidate the D-cache of the page. 112 * Write back & invalidate the D-cache of the page.
245 * (To avoid "alias" issues) 113 * (To avoid "alias" issues)
246 */ 114 */
247void flush_dcache_page(struct page *page) 115static void sh4_flush_dcache_page(void *arg)
248{ 116{
249 if (test_bit(PG_mapped, &page->flags)) { 117 struct page *page = arg;
118#ifndef CONFIG_SMP
119 struct address_space *mapping = page_mapping(page);
120
121 if (mapping && !mapping_mapped(mapping))
122 set_bit(PG_dcache_dirty, &page->flags);
123 else
124#endif
125 {
250 unsigned long phys = PHYSADDR(page_address(page)); 126 unsigned long phys = PHYSADDR(page_address(page));
251 unsigned long addr = CACHE_OC_ADDRESS_ARRAY; 127 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
252 int i, n; 128 int i, n;
@@ -282,13 +158,13 @@ static void __uses_jump_to_uncached flush_icache_all(void)
282 local_irq_restore(flags); 158 local_irq_restore(flags);
283} 159}
284 160
285void flush_dcache_all(void) 161static inline void flush_dcache_all(void)
286{ 162{
287 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); 163 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
288 wmb(); 164 wmb();
289} 165}
290 166
291void flush_cache_all(void) 167static void sh4_flush_cache_all(void *unused)
292{ 168{
293 flush_dcache_all(); 169 flush_dcache_all();
294 flush_icache_all(); 170 flush_icache_all();
@@ -380,8 +256,13 @@ loop_exit:
380 * 256 *
381 * Caller takes mm->mmap_sem. 257 * Caller takes mm->mmap_sem.
382 */ 258 */
383void flush_cache_mm(struct mm_struct *mm) 259static void sh4_flush_cache_mm(void *arg)
384{ 260{
261 struct mm_struct *mm = arg;
262
263 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
264 return;
265
385 /* 266 /*
386 * If cache is only 4k-per-way, there are never any 'aliases'. Since 267 * If cache is only 4k-per-way, there are never any 'aliases'. Since
387 * the cache is physically tagged, the data can just be left in there. 268 * the cache is physically tagged, the data can just be left in there.
@@ -417,12 +298,21 @@ void flush_cache_mm(struct mm_struct *mm)
417 * ADDR: Virtual Address (U0 address) 298 * ADDR: Virtual Address (U0 address)
418 * PFN: Physical page number 299 * PFN: Physical page number
419 */ 300 */
420void flush_cache_page(struct vm_area_struct *vma, unsigned long address, 301static void sh4_flush_cache_page(void *args)
421 unsigned long pfn)
422{ 302{
423 unsigned long phys = pfn << PAGE_SHIFT; 303 struct flusher_data *data = args;
304 struct vm_area_struct *vma;
305 unsigned long address, pfn, phys;
424 unsigned int alias_mask; 306 unsigned int alias_mask;
425 307
308 vma = data->vma;
309 address = data->addr1;
310 pfn = data->addr2;
311 phys = pfn << PAGE_SHIFT;
312
313 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
314 return;
315
426 alias_mask = boot_cpu_data.dcache.alias_mask; 316 alias_mask = boot_cpu_data.dcache.alias_mask;
427 317
428 /* We only need to flush D-cache when we have alias */ 318 /* We only need to flush D-cache when we have alias */
@@ -462,9 +352,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
462 * Flushing the cache lines for U0 only isn't enough. 352 * Flushing the cache lines for U0 only isn't enough.
463 * We need to flush for P1 too, which may contain aliases. 353 * We need to flush for P1 too, which may contain aliases.
464 */ 354 */
465void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 355static void sh4_flush_cache_range(void *args)
466 unsigned long end)
467{ 356{
357 struct flusher_data *data = args;
358 struct vm_area_struct *vma;
359 unsigned long start, end;
360
361 vma = data->vma;
362 start = data->addr1;
363 end = data->addr2;
364
365 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
366 return;
367
468 /* 368 /*
469 * If cache is only 4k-per-way, there are never any 'aliases'. Since 369 * If cache is only 4k-per-way, there are never any 'aliases'. Since
470 * the cache is physically tagged, the data can just be left in there. 370 * the cache is physically tagged, the data can just be left in there.
@@ -492,20 +392,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
492 } 392 }
493} 393}
494 394
495/*
496 * flush_icache_user_range
497 * @vma: VMA of the process
498 * @page: page
499 * @addr: U0 address
500 * @len: length of the range (< page size)
501 */
502void flush_icache_user_range(struct vm_area_struct *vma,
503 struct page *page, unsigned long addr, int len)
504{
505 flush_cache_page(vma, addr, page_to_pfn(page));
506 mb();
507}
508
509/** 395/**
510 * __flush_cache_4096 396 * __flush_cache_4096
511 * 397 *
@@ -581,7 +467,49 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
581 * Break the 1, 2 and 4 way variants of this out into separate functions to 467 * Break the 1, 2 and 4 way variants of this out into separate functions to
582 * avoid nearly all the overhead of having the conditional stuff in the function 468 * avoid nearly all the overhead of having the conditional stuff in the function
583 * bodies (+ the 1 and 2 way cases avoid saving any registers too). 469 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
470 *
471 * We want to eliminate unnecessary bus transactions, so this code uses
472 * a non-obvious technique.
473 *
474 * Loop over a cache way sized block of, one cache line at a time. For each
475 * line, use movca.a to cause the current cache line contents to be written
476 * back, but without reading anything from main memory. However this has the
477 * side effect that the cache is now caching that memory location. So follow
478 * this with a cache invalidate to mark the cache line invalid. And do all
479 * this with interrupts disabled, to avoid the cache line being accidently
480 * evicted while it is holding garbage.
481 *
482 * This also breaks in a number of circumstances:
483 * - if there are modifications to the region of memory just above
484 * empty_zero_page (for example because a breakpoint has been placed
485 * there), then these can be lost.
486 *
487 * This is because the the memory address which the cache temporarily
488 * caches in the above description is empty_zero_page. So the
489 * movca.l hits the cache (it is assumed that it misses, or at least
490 * isn't dirty), modifies the line and then invalidates it, losing the
491 * required change.
492 *
493 * - If caches are disabled or configured in write-through mode, then
494 * the movca.l writes garbage directly into memory.
584 */ 495 */
496static void __flush_dcache_segment_writethrough(unsigned long start,
497 unsigned long extent_per_way)
498{
499 unsigned long addr;
500 int i;
501
502 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
503
504 while (extent_per_way) {
505 for (i = 0; i < cpu_data->dcache.ways; i++)
506 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
507
508 addr += cpu_data->dcache.linesz;
509 extent_per_way -= cpu_data->dcache.linesz;
510 }
511}
512
585static void __flush_dcache_segment_1way(unsigned long start, 513static void __flush_dcache_segment_1way(unsigned long start,
586 unsigned long extent_per_way) 514 unsigned long extent_per_way)
587{ 515{
@@ -773,3 +701,47 @@ static void __flush_dcache_segment_4way(unsigned long start,
773 a3 += linesz; 701 a3 += linesz;
774 } while (a0 < a0e); 702 } while (a0 < a0e);
775} 703}
704
705extern void __weak sh4__flush_region_init(void);
706
707/*
708 * SH-4 has virtually indexed and physically tagged cache.
709 */
710void __init sh4_cache_init(void)
711{
712 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
713
714 printk("PVR=%08x CVR=%08x PRR=%08x\n",
715 ctrl_inl(CCN_PVR),
716 ctrl_inl(CCN_CVR),
717 ctrl_inl(CCN_PRR));
718
719 if (wt_enabled)
720 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
721 else {
722 switch (boot_cpu_data.dcache.ways) {
723 case 1:
724 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
725 break;
726 case 2:
727 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
728 break;
729 case 4:
730 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
731 break;
732 default:
733 panic("unknown number of cache ways\n");
734 break;
735 }
736 }
737
738 local_flush_icache_range = sh4_flush_icache_range;
739 local_flush_dcache_page = sh4_flush_dcache_page;
740 local_flush_cache_all = sh4_flush_cache_all;
741 local_flush_cache_mm = sh4_flush_cache_mm;
742 local_flush_cache_dup_mm = sh4_flush_cache_mm;
743 local_flush_cache_page = sh4_flush_cache_page;
744 local_flush_cache_range = sh4_flush_cache_range;
745
746 sh4__flush_region_init();
747}