diff options
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 329 |
1 files changed, 119 insertions, 210 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index b36a9c986a58..70fb906419dd 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/io.h> | 15 | #include <linux/io.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | #include <linux/fs.h> | ||
17 | #include <asm/mmu_context.h> | 18 | #include <asm/mmu_context.h> |
18 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
19 | 20 | ||
@@ -25,14 +26,6 @@ | |||
25 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ | 26 | #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */ |
26 | #define MAX_ICACHE_PAGES 32 | 27 | #define MAX_ICACHE_PAGES 32 |
27 | 28 | ||
28 | static void __flush_dcache_segment_writethrough(unsigned long start, | ||
29 | unsigned long extent); | ||
30 | static void __flush_dcache_segment_1way(unsigned long start, | ||
31 | unsigned long extent); | ||
32 | static void __flush_dcache_segment_2way(unsigned long start, | ||
33 | unsigned long extent); | ||
34 | static void __flush_dcache_segment_4way(unsigned long start, | ||
35 | unsigned long extent); | ||
36 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, | 29 | static void __flush_cache_4096(unsigned long addr, unsigned long phys, |
37 | unsigned long exec_offset); | 30 | unsigned long exec_offset); |
38 | 31 | ||
@@ -44,196 +37,55 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys, | |||
44 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | 37 | static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = |
45 | (void (*)(unsigned long, unsigned long))0xdeadbeef; | 38 | (void (*)(unsigned long, unsigned long))0xdeadbeef; |
46 | 39 | ||
47 | static void compute_alias(struct cache_info *c) | ||
48 | { | ||
49 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | ||
50 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | ||
51 | } | ||
52 | |||
53 | static void __init emit_cache_params(void) | ||
54 | { | ||
55 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
56 | ctrl_inl(CCN_PVR), | ||
57 | ctrl_inl(CCN_CVR), | ||
58 | ctrl_inl(CCN_PRR)); | ||
59 | printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
60 | boot_cpu_data.icache.ways, | ||
61 | boot_cpu_data.icache.sets, | ||
62 | boot_cpu_data.icache.way_incr); | ||
63 | printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
64 | boot_cpu_data.icache.entry_mask, | ||
65 | boot_cpu_data.icache.alias_mask, | ||
66 | boot_cpu_data.icache.n_aliases); | ||
67 | printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
68 | boot_cpu_data.dcache.ways, | ||
69 | boot_cpu_data.dcache.sets, | ||
70 | boot_cpu_data.dcache.way_incr); | ||
71 | printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
72 | boot_cpu_data.dcache.entry_mask, | ||
73 | boot_cpu_data.dcache.alias_mask, | ||
74 | boot_cpu_data.dcache.n_aliases); | ||
75 | |||
76 | /* | ||
77 | * Emit Secondary Cache parameters if the CPU has a probed L2. | ||
78 | */ | ||
79 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | ||
80 | printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | ||
81 | boot_cpu_data.scache.ways, | ||
82 | boot_cpu_data.scache.sets, | ||
83 | boot_cpu_data.scache.way_incr); | ||
84 | printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | ||
85 | boot_cpu_data.scache.entry_mask, | ||
86 | boot_cpu_data.scache.alias_mask, | ||
87 | boot_cpu_data.scache.n_aliases); | ||
88 | } | ||
89 | |||
90 | if (!__flush_dcache_segment_fn) | ||
91 | panic("unknown number of cache ways\n"); | ||
92 | } | ||
93 | |||
94 | /* | ||
95 | * SH-4 has virtually indexed and physically tagged cache. | ||
96 | */ | ||
97 | void __init p3_cache_init(void) | ||
98 | { | ||
99 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
100 | |||
101 | compute_alias(&boot_cpu_data.icache); | ||
102 | compute_alias(&boot_cpu_data.dcache); | ||
103 | compute_alias(&boot_cpu_data.scache); | ||
104 | |||
105 | if (wt_enabled) { | ||
106 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
107 | goto out; | ||
108 | } | ||
109 | |||
110 | switch (boot_cpu_data.dcache.ways) { | ||
111 | case 1: | ||
112 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
113 | break; | ||
114 | case 2: | ||
115 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
116 | break; | ||
117 | case 4: | ||
118 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
119 | break; | ||
120 | default: | ||
121 | __flush_dcache_segment_fn = NULL; | ||
122 | break; | ||
123 | } | ||
124 | |||
125 | out: | ||
126 | emit_cache_params(); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Write back the dirty D-caches, but not invalidate them. | ||
131 | * | ||
132 | * START: Virtual Address (U0, P1, or P3) | ||
133 | * SIZE: Size of the region. | ||
134 | */ | ||
135 | void __flush_wback_region(void *start, int size) | ||
136 | { | ||
137 | unsigned long v; | ||
138 | unsigned long begin, end; | ||
139 | |||
140 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
141 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
142 | & ~(L1_CACHE_BYTES-1); | ||
143 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
144 | asm volatile("ocbwb %0" | ||
145 | : /* no output */ | ||
146 | : "m" (__m(v))); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * Write back the dirty D-caches and invalidate them. | ||
152 | * | ||
153 | * START: Virtual Address (U0, P1, or P3) | ||
154 | * SIZE: Size of the region. | ||
155 | */ | ||
156 | void __flush_purge_region(void *start, int size) | ||
157 | { | ||
158 | unsigned long v; | ||
159 | unsigned long begin, end; | ||
160 | |||
161 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
162 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
163 | & ~(L1_CACHE_BYTES-1); | ||
164 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
165 | asm volatile("ocbp %0" | ||
166 | : /* no output */ | ||
167 | : "m" (__m(v))); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * No write back please | ||
173 | */ | ||
174 | void __flush_invalidate_region(void *start, int size) | ||
175 | { | ||
176 | unsigned long v; | ||
177 | unsigned long begin, end; | ||
178 | |||
179 | begin = (unsigned long)start & ~(L1_CACHE_BYTES-1); | ||
180 | end = ((unsigned long)start + size + L1_CACHE_BYTES-1) | ||
181 | & ~(L1_CACHE_BYTES-1); | ||
182 | for (v = begin; v < end; v+=L1_CACHE_BYTES) { | ||
183 | asm volatile("ocbi %0" | ||
184 | : /* no output */ | ||
185 | : "m" (__m(v))); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | /* | 40 | /* |
190 | * Write back the range of D-cache, and purge the I-cache. | 41 | * Write back the range of D-cache, and purge the I-cache. |
191 | * | 42 | * |
192 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
193 | * signal handler code and kprobes code | 44 | * signal handler code and kprobes code |
194 | */ | 45 | */ |
195 | void flush_icache_range(unsigned long start, unsigned long end) | 46 | static void sh4_flush_icache_range(void *args) |
196 | { | 47 | { |
48 | struct flusher_data *data = args; | ||
197 | int icacheaddr; | 49 | int icacheaddr; |
198 | unsigned long flags, v; | 50 | unsigned long start, end; |
51 | unsigned long v; | ||
199 | int i; | 52 | int i; |
200 | 53 | ||
201 | /* If there are too many pages then just blow the caches */ | 54 | start = data->addr1; |
202 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 55 | end = data->addr2; |
203 | flush_cache_all(); | 56 | |
204 | } else { | 57 | /* If there are too many pages then just blow the caches */ |
205 | /* selectively flush d-cache then invalidate the i-cache */ | 58 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
206 | /* this is inefficient, so only use for small ranges */ | 59 | local_flush_cache_all(args); |
207 | start &= ~(L1_CACHE_BYTES-1); | 60 | } else { |
208 | end += L1_CACHE_BYTES-1; | 61 | /* selectively flush d-cache then invalidate the i-cache */ |
209 | end &= ~(L1_CACHE_BYTES-1); | 62 | /* this is inefficient, so only use for small ranges */ |
210 | 63 | start &= ~(L1_CACHE_BYTES-1); | |
211 | local_irq_save(flags); | 64 | end += L1_CACHE_BYTES-1; |
212 | jump_to_uncached(); | 65 | end &= ~(L1_CACHE_BYTES-1); |
213 | 66 | ||
214 | for (v = start; v < end; v+=L1_CACHE_BYTES) { | 67 | jump_to_uncached(); |
215 | asm volatile("ocbwb %0" | 68 | |
216 | : /* no output */ | 69 | for (v = start; v < end; v+=L1_CACHE_BYTES) { |
217 | : "m" (__m(v))); | 70 | __ocbwb(v); |
218 | 71 | ||
219 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | ( | 72 | icacheaddr = CACHE_IC_ADDRESS_ARRAY | |
220 | v & cpu_data->icache.entry_mask); | 73 | (v & cpu_data->icache.entry_mask); |
221 | 74 | ||
222 | for (i = 0; i < cpu_data->icache.ways; | 75 | for (i = 0; i < cpu_data->icache.ways; |
223 | i++, icacheaddr += cpu_data->icache.way_incr) | 76 | i++, icacheaddr += cpu_data->icache.way_incr) |
224 | /* Clear i-cache line valid-bit */ | 77 | /* Clear i-cache line valid-bit */ |
225 | ctrl_outl(0, icacheaddr); | 78 | ctrl_outl(0, icacheaddr); |
226 | } | 79 | } |
227 | 80 | ||
228 | back_to_cached(); | 81 | back_to_cached(); |
229 | local_irq_restore(flags); | ||
230 | } | 82 | } |
231 | } | 83 | } |
232 | 84 | ||
233 | static inline void flush_cache_4096(unsigned long start, | 85 | static inline void flush_cache_4096(unsigned long start, |
234 | unsigned long phys) | 86 | unsigned long phys) |
235 | { | 87 | { |
236 | unsigned long flags, exec_offset = 0; | 88 | unsigned long exec_offset = 0; |
237 | 89 | ||
238 | /* | 90 | /* |
239 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. | 91 | * All types of SH-4 require PC to be in P2 to operate on the I-cache. |
@@ -243,19 +95,25 @@ static inline void flush_cache_4096(unsigned long start, | |||
243 | (start < CACHE_OC_ADDRESS_ARRAY)) | 95 | (start < CACHE_OC_ADDRESS_ARRAY)) |
244 | exec_offset = 0x20000000; | 96 | exec_offset = 0x20000000; |
245 | 97 | ||
246 | local_irq_save(flags); | ||
247 | __flush_cache_4096(start | SH_CACHE_ASSOC, | 98 | __flush_cache_4096(start | SH_CACHE_ASSOC, |
248 | P1SEGADDR(phys), exec_offset); | 99 | P1SEGADDR(phys), exec_offset); |
249 | local_irq_restore(flags); | ||
250 | } | 100 | } |
251 | 101 | ||
252 | /* | 102 | /* |
253 | * Write back & invalidate the D-cache of the page. | 103 | * Write back & invalidate the D-cache of the page. |
254 | * (To avoid "alias" issues) | 104 | * (To avoid "alias" issues) |
255 | */ | 105 | */ |
256 | void flush_dcache_page(struct page *page) | 106 | static void sh4_flush_dcache_page(void *arg) |
257 | { | 107 | { |
258 | if (test_bit(PG_mapped, &page->flags)) { | 108 | struct page *page = arg; |
109 | #ifndef CONFIG_SMP | ||
110 | struct address_space *mapping = page_mapping(page); | ||
111 | |||
112 | if (mapping && !mapping_mapped(mapping)) | ||
113 | set_bit(PG_dcache_dirty, &page->flags); | ||
114 | else | ||
115 | #endif | ||
116 | { | ||
259 | unsigned long phys = PHYSADDR(page_address(page)); | 117 | unsigned long phys = PHYSADDR(page_address(page)); |
260 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; | 118 | unsigned long addr = CACHE_OC_ADDRESS_ARRAY; |
261 | int i, n; | 119 | int i, n; |
@@ -272,9 +130,8 @@ void flush_dcache_page(struct page *page) | |||
272 | /* TODO: Selective icache invalidation through IC address array.. */ | 130 | /* TODO: Selective icache invalidation through IC address array.. */ |
273 | static void __uses_jump_to_uncached flush_icache_all(void) | 131 | static void __uses_jump_to_uncached flush_icache_all(void) |
274 | { | 132 | { |
275 | unsigned long flags, ccr; | 133 | unsigned long ccr; |
276 | 134 | ||
277 | local_irq_save(flags); | ||
278 | jump_to_uncached(); | 135 | jump_to_uncached(); |
279 | 136 | ||
280 | /* Flush I-cache */ | 137 | /* Flush I-cache */ |
@@ -286,18 +143,16 @@ static void __uses_jump_to_uncached flush_icache_all(void) | |||
286 | * back_to_cached() will take care of the barrier for us, don't add | 143 | * back_to_cached() will take care of the barrier for us, don't add |
287 | * another one! | 144 | * another one! |
288 | */ | 145 | */ |
289 | |||
290 | back_to_cached(); | 146 | back_to_cached(); |
291 | local_irq_restore(flags); | ||
292 | } | 147 | } |
293 | 148 | ||
294 | void flush_dcache_all(void) | 149 | static inline void flush_dcache_all(void) |
295 | { | 150 | { |
296 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); | 151 | (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); |
297 | wmb(); | 152 | wmb(); |
298 | } | 153 | } |
299 | 154 | ||
300 | void flush_cache_all(void) | 155 | static void sh4_flush_cache_all(void *unused) |
301 | { | 156 | { |
302 | flush_dcache_all(); | 157 | flush_dcache_all(); |
303 | flush_icache_all(); | 158 | flush_icache_all(); |
@@ -389,8 +244,13 @@ loop_exit: | |||
389 | * | 244 | * |
390 | * Caller takes mm->mmap_sem. | 245 | * Caller takes mm->mmap_sem. |
391 | */ | 246 | */ |
392 | void flush_cache_mm(struct mm_struct *mm) | 247 | static void sh4_flush_cache_mm(void *arg) |
393 | { | 248 | { |
249 | struct mm_struct *mm = arg; | ||
250 | |||
251 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | ||
252 | return; | ||
253 | |||
394 | /* | 254 | /* |
395 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 255 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
396 | * the cache is physically tagged, the data can just be left in there. | 256 | * the cache is physically tagged, the data can just be left in there. |
@@ -426,12 +286,21 @@ void flush_cache_mm(struct mm_struct *mm) | |||
426 | * ADDR: Virtual Address (U0 address) | 286 | * ADDR: Virtual Address (U0 address) |
427 | * PFN: Physical page number | 287 | * PFN: Physical page number |
428 | */ | 288 | */ |
429 | void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | 289 | static void sh4_flush_cache_page(void *args) |
430 | unsigned long pfn) | ||
431 | { | 290 | { |
432 | unsigned long phys = pfn << PAGE_SHIFT; | 291 | struct flusher_data *data = args; |
292 | struct vm_area_struct *vma; | ||
293 | unsigned long address, pfn, phys; | ||
433 | unsigned int alias_mask; | 294 | unsigned int alias_mask; |
434 | 295 | ||
296 | vma = data->vma; | ||
297 | address = data->addr1; | ||
298 | pfn = data->addr2; | ||
299 | phys = pfn << PAGE_SHIFT; | ||
300 | |||
301 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
302 | return; | ||
303 | |||
435 | alias_mask = boot_cpu_data.dcache.alias_mask; | 304 | alias_mask = boot_cpu_data.dcache.alias_mask; |
436 | 305 | ||
437 | /* We only need to flush D-cache when we have alias */ | 306 | /* We only need to flush D-cache when we have alias */ |
@@ -471,9 +340,19 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, | |||
471 | * Flushing the cache lines for U0 only isn't enough. | 340 | * Flushing the cache lines for U0 only isn't enough. |
472 | * We need to flush for P1 too, which may contain aliases. | 341 | * We need to flush for P1 too, which may contain aliases. |
473 | */ | 342 | */ |
474 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | 343 | static void sh4_flush_cache_range(void *args) |
475 | unsigned long end) | ||
476 | { | 344 | { |
345 | struct flusher_data *data = args; | ||
346 | struct vm_area_struct *vma; | ||
347 | unsigned long start, end; | ||
348 | |||
349 | vma = data->vma; | ||
350 | start = data->addr1; | ||
351 | end = data->addr2; | ||
352 | |||
353 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | ||
354 | return; | ||
355 | |||
477 | /* | 356 | /* |
478 | * If cache is only 4k-per-way, there are never any 'aliases'. Since | 357 | * If cache is only 4k-per-way, there are never any 'aliases'. Since |
479 | * the cache is physically tagged, the data can just be left in there. | 358 | * the cache is physically tagged, the data can just be left in there. |
@@ -501,20 +380,6 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |||
501 | } | 380 | } |
502 | } | 381 | } |
503 | 382 | ||
504 | /* | ||
505 | * flush_icache_user_range | ||
506 | * @vma: VMA of the process | ||
507 | * @page: page | ||
508 | * @addr: U0 address | ||
509 | * @len: length of the range (< page size) | ||
510 | */ | ||
511 | void flush_icache_user_range(struct vm_area_struct *vma, | ||
512 | struct page *page, unsigned long addr, int len) | ||
513 | { | ||
514 | flush_cache_page(vma, addr, page_to_pfn(page)); | ||
515 | mb(); | ||
516 | } | ||
517 | |||
518 | /** | 383 | /** |
519 | * __flush_cache_4096 | 384 | * __flush_cache_4096 |
520 | * | 385 | * |
@@ -824,3 +689,47 @@ static void __flush_dcache_segment_4way(unsigned long start, | |||
824 | a3 += linesz; | 689 | a3 += linesz; |
825 | } while (a0 < a0e); | 690 | } while (a0 < a0e); |
826 | } | 691 | } |
692 | |||
693 | extern void __weak sh4__flush_region_init(void); | ||
694 | |||
695 | /* | ||
696 | * SH-4 has virtually indexed and physically tagged cache. | ||
697 | */ | ||
698 | void __init sh4_cache_init(void) | ||
699 | { | ||
700 | unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT); | ||
701 | |||
702 | printk("PVR=%08x CVR=%08x PRR=%08x\n", | ||
703 | ctrl_inl(CCN_PVR), | ||
704 | ctrl_inl(CCN_CVR), | ||
705 | ctrl_inl(CCN_PRR)); | ||
706 | |||
707 | if (wt_enabled) | ||
708 | __flush_dcache_segment_fn = __flush_dcache_segment_writethrough; | ||
709 | else { | ||
710 | switch (boot_cpu_data.dcache.ways) { | ||
711 | case 1: | ||
712 | __flush_dcache_segment_fn = __flush_dcache_segment_1way; | ||
713 | break; | ||
714 | case 2: | ||
715 | __flush_dcache_segment_fn = __flush_dcache_segment_2way; | ||
716 | break; | ||
717 | case 4: | ||
718 | __flush_dcache_segment_fn = __flush_dcache_segment_4way; | ||
719 | break; | ||
720 | default: | ||
721 | panic("unknown number of cache ways\n"); | ||
722 | break; | ||
723 | } | ||
724 | } | ||
725 | |||
726 | local_flush_icache_range = sh4_flush_icache_range; | ||
727 | local_flush_dcache_page = sh4_flush_dcache_page; | ||
728 | local_flush_cache_all = sh4_flush_cache_all; | ||
729 | local_flush_cache_mm = sh4_flush_cache_mm; | ||
730 | local_flush_cache_dup_mm = sh4_flush_cache_mm; | ||
731 | local_flush_cache_page = sh4_flush_cache_page; | ||
732 | local_flush_cache_range = sh4_flush_cache_range; | ||
733 | |||
734 | sh4__flush_region_init(); | ||
735 | } | ||