aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/include/asm/fixmap.h6
-rw-r--r--arch/sh/mm/cache-sh4.c496
-rw-r--r--arch/sh/mm/cache.c6
-rw-r--r--arch/sh/mm/kmap.c4
4 files changed, 87 insertions, 425 deletions
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index 721fcc4d5e98..76c5a3099cb8 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -14,9 +14,9 @@
14#define _ASM_FIXMAP_H 14#define _ASM_FIXMAP_H
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/threads.h>
17#include <asm/page.h> 18#include <asm/page.h>
18#ifdef CONFIG_HIGHMEM 19#ifdef CONFIG_HIGHMEM
19#include <linux/threads.h>
20#include <asm/kmap_types.h> 20#include <asm/kmap_types.h>
21#endif 21#endif
22 22
@@ -46,9 +46,9 @@
46 * fix-mapped? 46 * fix-mapped?
47 */ 47 */
48enum fixed_addresses { 48enum fixed_addresses {
49#define FIX_N_COLOURS 16 49#define FIX_N_COLOURS 8
50 FIX_CMAP_BEGIN, 50 FIX_CMAP_BEGIN,
51 FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, 51 FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS),
52 FIX_UNCACHED, 52 FIX_UNCACHED,
53#ifdef CONFIG_HIGHMEM 53#ifdef CONFIG_HIGHMEM
54 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ 54 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index a98c7d8984fa..60588c5bf7f9 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -2,7 +2,7 @@
2 * arch/sh/mm/cache-sh4.c 2 * arch/sh/mm/cache-sh4.c
3 * 3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001 - 2007 Paul Mundt 5 * Copyright (C) 2001 - 2009 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow 6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd. 7 * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
8 * 8 *
@@ -15,6 +15,8 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/highmem.h>
19#include <asm/pgtable.h>
18#include <asm/mmu_context.h> 20#include <asm/mmu_context.h>
19#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
20 22
@@ -23,21 +25,12 @@
23 * flushing. Anything exceeding this will simply flush the dcache in its 25 * flushing. Anything exceeding this will simply flush the dcache in its
24 * entirety. 26 * entirety.
25 */ 27 */
26#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
27#define MAX_ICACHE_PAGES 32 28#define MAX_ICACHE_PAGES 32
28 29
29static void __flush_cache_4096(unsigned long addr, unsigned long phys, 30static void __flush_cache_4096(unsigned long addr, unsigned long phys,
30 unsigned long exec_offset); 31 unsigned long exec_offset);
31 32
32/* 33/*
33 * This is initialised here to ensure that it is not placed in the BSS. If
34 * that were to happen, note that cache_init gets called before the BSS is
35 * cleared, so this would get nulled out which would be hopeless.
36 */
37static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
38 (void (*)(unsigned long, unsigned long))0xdeadbeef;
39
40/*
41 * Write back the range of D-cache, and purge the I-cache. 34 * Write back the range of D-cache, and purge the I-cache.
42 * 35 *
43 * Called from kernel/module.c:sys_init_module and routine for a.out format, 36 * Called from kernel/module.c:sys_init_module and routine for a.out format,
@@ -123,12 +116,12 @@ static void sh4_flush_dcache_page(void *arg)
123 else 116 else
124#endif 117#endif
125 { 118 {
126 unsigned long phys = PHYSADDR(page_address(page)); 119 unsigned long phys = page_to_phys(page);
127 unsigned long addr = CACHE_OC_ADDRESS_ARRAY; 120 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
128 int i, n; 121 int i, n;
129 122
130 /* Loop all the D-cache */ 123 /* Loop all the D-cache */
131 n = boot_cpu_data.dcache.n_aliases; 124 n = boot_cpu_data.dcache.way_incr >> 12;
132 for (i = 0; i < n; i++, addr += 4096) 125 for (i = 0; i < n; i++, addr += 4096)
133 flush_cache_4096(addr, phys); 126 flush_cache_4096(addr, phys);
134 } 127 }
@@ -158,10 +151,27 @@ static void __uses_jump_to_uncached flush_icache_all(void)
158 local_irq_restore(flags); 151 local_irq_restore(flags);
159} 152}
160 153
161static inline void flush_dcache_all(void) 154static void flush_dcache_all(void)
162{ 155{
163 (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size); 156 unsigned long addr, end_addr, entry_offset;
164 wmb(); 157
158 end_addr = CACHE_OC_ADDRESS_ARRAY +
159 (current_cpu_data.dcache.sets <<
160 current_cpu_data.dcache.entry_shift) *
161 current_cpu_data.dcache.ways;
162
163 entry_offset = 1 << current_cpu_data.dcache.entry_shift;
164
165 for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
166 __raw_writel(0, addr); addr += entry_offset;
167 __raw_writel(0, addr); addr += entry_offset;
168 __raw_writel(0, addr); addr += entry_offset;
169 __raw_writel(0, addr); addr += entry_offset;
170 __raw_writel(0, addr); addr += entry_offset;
171 __raw_writel(0, addr); addr += entry_offset;
172 __raw_writel(0, addr); addr += entry_offset;
173 __raw_writel(0, addr); addr += entry_offset;
174 }
165} 175}
166 176
167static void sh4_flush_cache_all(void *unused) 177static void sh4_flush_cache_all(void *unused)
@@ -170,89 +180,13 @@ static void sh4_flush_cache_all(void *unused)
170 flush_icache_all(); 180 flush_icache_all();
171} 181}
172 182
173static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
174 unsigned long end)
175{
176 unsigned long d = 0, p = start & PAGE_MASK;
177 unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
178 unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
179 unsigned long select_bit;
180 unsigned long all_aliases_mask;
181 unsigned long addr_offset;
182 pgd_t *dir;
183 pmd_t *pmd;
184 pud_t *pud;
185 pte_t *pte;
186 int i;
187
188 dir = pgd_offset(mm, p);
189 pud = pud_offset(dir, p);
190 pmd = pmd_offset(pud, p);
191 end = PAGE_ALIGN(end);
192
193 all_aliases_mask = (1 << n_aliases) - 1;
194
195 do {
196 if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
197 p &= PMD_MASK;
198 p += PMD_SIZE;
199 pmd++;
200
201 continue;
202 }
203
204 pte = pte_offset_kernel(pmd, p);
205
206 do {
207 unsigned long phys;
208 pte_t entry = *pte;
209
210 if (!(pte_val(entry) & _PAGE_PRESENT)) {
211 pte++;
212 p += PAGE_SIZE;
213 continue;
214 }
215
216 phys = pte_val(entry) & PTE_PHYS_MASK;
217
218 if ((p ^ phys) & alias_mask) {
219 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
220 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
221
222 if (d == all_aliases_mask)
223 goto loop_exit;
224 }
225
226 pte++;
227 p += PAGE_SIZE;
228 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
229 pmd++;
230 } while (p < end);
231
232loop_exit:
233 addr_offset = 0;
234 select_bit = 1;
235
236 for (i = 0; i < n_aliases; i++) {
237 if (d & select_bit) {
238 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
239 wmb();
240 }
241
242 select_bit <<= 1;
243 addr_offset += PAGE_SIZE;
244 }
245}
246
247/* 183/*
248 * Note : (RPC) since the caches are physically tagged, the only point 184 * Note : (RPC) since the caches are physically tagged, the only point
249 * of flush_cache_mm for SH-4 is to get rid of aliases from the 185 * of flush_cache_mm for SH-4 is to get rid of aliases from the
250 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that 186 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
251 * lines can stay resident so long as the virtual address they were 187 * lines can stay resident so long as the virtual address they were
252 * accessed with (hence cache set) is in accord with the physical 188 * accessed with (hence cache set) is in accord with the physical
253 * address (i.e. tag). It's no different here. So I reckon we don't 189 * address (i.e. tag). It's no different here.
254 * need to flush the I-cache, since aliases don't matter for that. We
255 * should try that.
256 * 190 *
257 * Caller takes mm->mmap_sem. 191 * Caller takes mm->mmap_sem.
258 */ 192 */
@@ -263,33 +197,7 @@ static void sh4_flush_cache_mm(void *arg)
263 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) 197 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
264 return; 198 return;
265 199
266 /* 200 flush_dcache_all();
267 * If cache is only 4k-per-way, there are never any 'aliases'. Since
268 * the cache is physically tagged, the data can just be left in there.
269 */
270 if (boot_cpu_data.dcache.n_aliases == 0)
271 return;
272
273 /*
274 * Don't bother groveling around the dcache for the VMA ranges
275 * if there are too many PTEs to make it worthwhile.
276 */
277 if (mm->nr_ptes >= MAX_DCACHE_PAGES)
278 flush_dcache_all();
279 else {
280 struct vm_area_struct *vma;
281
282 /*
283 * In this case there are reasonably sized ranges to flush,
284 * iterate through the VMA list and take care of any aliases.
285 */
286 for (vma = mm->mmap; vma; vma = vma->vm_next)
287 __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
288 }
289
290 /* Only touch the icache if one of the VMAs has VM_EXEC set. */
291 if (mm->exec_vm)
292 flush_icache_all();
293} 201}
294 202
295/* 203/*
@@ -302,44 +210,64 @@ static void sh4_flush_cache_page(void *args)
302{ 210{
303 struct flusher_data *data = args; 211 struct flusher_data *data = args;
304 struct vm_area_struct *vma; 212 struct vm_area_struct *vma;
213 struct page *page;
305 unsigned long address, pfn, phys; 214 unsigned long address, pfn, phys;
306 unsigned int alias_mask; 215 int map_coherent = 0;
216 pgd_t *pgd;
217 pud_t *pud;
218 pmd_t *pmd;
219 pte_t *pte;
220 void *vaddr;
307 221
308 vma = data->vma; 222 vma = data->vma;
309 address = data->addr1; 223 address = data->addr1;
310 pfn = data->addr2; 224 pfn = data->addr2;
311 phys = pfn << PAGE_SHIFT; 225 phys = pfn << PAGE_SHIFT;
226 page = pfn_to_page(pfn);
312 227
313 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 228 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
314 return; 229 return;
315 230
316 alias_mask = boot_cpu_data.dcache.alias_mask; 231 address &= PAGE_MASK;
317 232 pgd = pgd_offset(vma->vm_mm, address);
318 /* We only need to flush D-cache when we have alias */ 233 pud = pud_offset(pgd, address);
319 if ((address^phys) & alias_mask) { 234 pmd = pmd_offset(pud, address);
320 /* Loop 4K of the D-cache */ 235 pte = pte_offset_kernel(pmd, address);
321 flush_cache_4096( 236
322 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask), 237 /* If the page isn't present, there is nothing to do here. */
323 phys); 238 if (!(pte_val(*pte) & _PAGE_PRESENT))
324 /* Loop another 4K of the D-cache */ 239 return;
325 flush_cache_4096(
326 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
327 phys);
328 }
329 240
330 alias_mask = boot_cpu_data.icache.alias_mask; 241 if ((vma->vm_mm == current->active_mm))
331 if (vma->vm_flags & VM_EXEC) { 242 vaddr = NULL;
243 else {
332 /* 244 /*
333 * Evict entries from the portion of the cache from which code 245 * Use kmap_coherent or kmap_atomic to do flushes for
334 * may have been executed at this address (virtual). There's 246 * another ASID than the current one.
335 * no need to evict from the portion corresponding to the
336 * physical address as for the D-cache, because we know the
337 * kernel has never executed the code through its identity
338 * translation.
339 */ 247 */
340 flush_cache_4096( 248 map_coherent = (current_cpu_data.dcache.n_aliases &&
341 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask), 249 !test_bit(PG_dcache_dirty, &page->flags) &&
342 phys); 250 page_mapped(page));
251 if (map_coherent)
252 vaddr = kmap_coherent(page, address);
253 else
254 vaddr = kmap_atomic(page, KM_USER0);
255
256 address = (unsigned long)vaddr;
257 }
258
259 if (pages_do_alias(address, phys))
260 flush_cache_4096(CACHE_OC_ADDRESS_ARRAY |
261 (address & shm_align_mask), phys);
262
263 if (vma->vm_flags & VM_EXEC)
264 flush_icache_all();
265
266 if (vaddr) {
267 if (map_coherent)
268 kunmap_coherent(vaddr);
269 else
270 kunmap_atomic(vaddr, KM_USER0);
343 } 271 }
344} 272}
345 273
@@ -372,24 +300,10 @@ static void sh4_flush_cache_range(void *args)
372 if (boot_cpu_data.dcache.n_aliases == 0) 300 if (boot_cpu_data.dcache.n_aliases == 0)
373 return; 301 return;
374 302
375 /* 303 flush_dcache_all();
376 * Don't bother with the lookup and alias check if we have a
377 * wide range to cover, just blow away the dcache in its
378 * entirety instead. -- PFM.
379 */
380 if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
381 flush_dcache_all();
382 else
383 __flush_cache_mm(vma->vm_mm, start, end);
384 304
385 if (vma->vm_flags & VM_EXEC) { 305 if (vma->vm_flags & VM_EXEC)
386 /*
387 * TODO: Is this required??? Need to look at how I-cache
388 * coherency is assured when new programs are loaded to see if
389 * this matters.
390 */
391 flush_icache_all(); 306 flush_icache_all();
392 }
393} 307}
394 308
395/** 309/**
@@ -443,7 +357,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
443 * pointless nead-of-loop check for 0 iterations. 357 * pointless nead-of-loop check for 0 iterations.
444 */ 358 */
445 do { 359 do {
446 ea = base_addr + PAGE_SIZE; 360 ea = base_addr + 4096;
447 a = base_addr; 361 a = base_addr;
448 p = phys; 362 p = phys;
449 363
@@ -463,245 +377,6 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
463 } while (--way_count != 0); 377 } while (--way_count != 0);
464} 378}
465 379
466/*
467 * Break the 1, 2 and 4 way variants of this out into separate functions to
468 * avoid nearly all the overhead of having the conditional stuff in the function
469 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
470 *
471 * We want to eliminate unnecessary bus transactions, so this code uses
472 * a non-obvious technique.
473 *
474 * Loop over a cache way sized block of, one cache line at a time. For each
475 * line, use movca.a to cause the current cache line contents to be written
476 * back, but without reading anything from main memory. However this has the
477 * side effect that the cache is now caching that memory location. So follow
478 * this with a cache invalidate to mark the cache line invalid. And do all
479 * this with interrupts disabled, to avoid the cache line being accidently
480 * evicted while it is holding garbage.
481 *
482 * This also breaks in a number of circumstances:
483 * - if there are modifications to the region of memory just above
484 * empty_zero_page (for example because a breakpoint has been placed
485 * there), then these can be lost.
486 *
487 * This is because the the memory address which the cache temporarily
488 * caches in the above description is empty_zero_page. So the
489 * movca.l hits the cache (it is assumed that it misses, or at least
490 * isn't dirty), modifies the line and then invalidates it, losing the
491 * required change.
492 *
493 * - If caches are disabled or configured in write-through mode, then
494 * the movca.l writes garbage directly into memory.
495 */
496static void __flush_dcache_segment_writethrough(unsigned long start,
497 unsigned long extent_per_way)
498{
499 unsigned long addr;
500 int i;
501
502 addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
503
504 while (extent_per_way) {
505 for (i = 0; i < cpu_data->dcache.ways; i++)
506 __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
507
508 addr += cpu_data->dcache.linesz;
509 extent_per_way -= cpu_data->dcache.linesz;
510 }
511}
512
513static void __flush_dcache_segment_1way(unsigned long start,
514 unsigned long extent_per_way)
515{
516 unsigned long orig_sr, sr_with_bl;
517 unsigned long base_addr;
518 unsigned long way_incr, linesz, way_size;
519 struct cache_info *dcache;
520 register unsigned long a0, a0e;
521
522 asm volatile("stc sr, %0" : "=r" (orig_sr));
523 sr_with_bl = orig_sr | (1<<28);
524 base_addr = ((unsigned long)&empty_zero_page[0]);
525
526 /*
527 * The previous code aligned base_addr to 16k, i.e. the way_size of all
528 * existing SH-4 D-caches. Whilst I don't see a need to have this
529 * aligned to any better than the cache line size (which it will be
530 * anyway by construction), let's align it to at least the way_size of
531 * any existing or conceivable SH-4 D-cache. -- RPC
532 */
533 base_addr = ((base_addr >> 16) << 16);
534 base_addr |= start;
535
536 dcache = &boot_cpu_data.dcache;
537 linesz = dcache->linesz;
538 way_incr = dcache->way_incr;
539 way_size = dcache->way_size;
540
541 a0 = base_addr;
542 a0e = base_addr + extent_per_way;
543 do {
544 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
545 asm volatile("movca.l r0, @%0\n\t"
546 "ocbi @%0" : : "r" (a0));
547 a0 += linesz;
548 asm volatile("movca.l r0, @%0\n\t"
549 "ocbi @%0" : : "r" (a0));
550 a0 += linesz;
551 asm volatile("movca.l r0, @%0\n\t"
552 "ocbi @%0" : : "r" (a0));
553 a0 += linesz;
554 asm volatile("movca.l r0, @%0\n\t"
555 "ocbi @%0" : : "r" (a0));
556 asm volatile("ldc %0, sr" : : "r" (orig_sr));
557 a0 += linesz;
558 } while (a0 < a0e);
559}
560
561static void __flush_dcache_segment_2way(unsigned long start,
562 unsigned long extent_per_way)
563{
564 unsigned long orig_sr, sr_with_bl;
565 unsigned long base_addr;
566 unsigned long way_incr, linesz, way_size;
567 struct cache_info *dcache;
568 register unsigned long a0, a1, a0e;
569
570 asm volatile("stc sr, %0" : "=r" (orig_sr));
571 sr_with_bl = orig_sr | (1<<28);
572 base_addr = ((unsigned long)&empty_zero_page[0]);
573
574 /* See comment under 1-way above */
575 base_addr = ((base_addr >> 16) << 16);
576 base_addr |= start;
577
578 dcache = &boot_cpu_data.dcache;
579 linesz = dcache->linesz;
580 way_incr = dcache->way_incr;
581 way_size = dcache->way_size;
582
583 a0 = base_addr;
584 a1 = a0 + way_incr;
585 a0e = base_addr + extent_per_way;
586 do {
587 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
588 asm volatile("movca.l r0, @%0\n\t"
589 "movca.l r0, @%1\n\t"
590 "ocbi @%0\n\t"
591 "ocbi @%1" : :
592 "r" (a0), "r" (a1));
593 a0 += linesz;
594 a1 += linesz;
595 asm volatile("movca.l r0, @%0\n\t"
596 "movca.l r0, @%1\n\t"
597 "ocbi @%0\n\t"
598 "ocbi @%1" : :
599 "r" (a0), "r" (a1));
600 a0 += linesz;
601 a1 += linesz;
602 asm volatile("movca.l r0, @%0\n\t"
603 "movca.l r0, @%1\n\t"
604 "ocbi @%0\n\t"
605 "ocbi @%1" : :
606 "r" (a0), "r" (a1));
607 a0 += linesz;
608 a1 += linesz;
609 asm volatile("movca.l r0, @%0\n\t"
610 "movca.l r0, @%1\n\t"
611 "ocbi @%0\n\t"
612 "ocbi @%1" : :
613 "r" (a0), "r" (a1));
614 asm volatile("ldc %0, sr" : : "r" (orig_sr));
615 a0 += linesz;
616 a1 += linesz;
617 } while (a0 < a0e);
618}
619
620static void __flush_dcache_segment_4way(unsigned long start,
621 unsigned long extent_per_way)
622{
623 unsigned long orig_sr, sr_with_bl;
624 unsigned long base_addr;
625 unsigned long way_incr, linesz, way_size;
626 struct cache_info *dcache;
627 register unsigned long a0, a1, a2, a3, a0e;
628
629 asm volatile("stc sr, %0" : "=r" (orig_sr));
630 sr_with_bl = orig_sr | (1<<28);
631 base_addr = ((unsigned long)&empty_zero_page[0]);
632
633 /* See comment under 1-way above */
634 base_addr = ((base_addr >> 16) << 16);
635 base_addr |= start;
636
637 dcache = &boot_cpu_data.dcache;
638 linesz = dcache->linesz;
639 way_incr = dcache->way_incr;
640 way_size = dcache->way_size;
641
642 a0 = base_addr;
643 a1 = a0 + way_incr;
644 a2 = a1 + way_incr;
645 a3 = a2 + way_incr;
646 a0e = base_addr + extent_per_way;
647 do {
648 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
649 asm volatile("movca.l r0, @%0\n\t"
650 "movca.l r0, @%1\n\t"
651 "movca.l r0, @%2\n\t"
652 "movca.l r0, @%3\n\t"
653 "ocbi @%0\n\t"
654 "ocbi @%1\n\t"
655 "ocbi @%2\n\t"
656 "ocbi @%3\n\t" : :
657 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
658 a0 += linesz;
659 a1 += linesz;
660 a2 += linesz;
661 a3 += linesz;
662 asm volatile("movca.l r0, @%0\n\t"
663 "movca.l r0, @%1\n\t"
664 "movca.l r0, @%2\n\t"
665 "movca.l r0, @%3\n\t"
666 "ocbi @%0\n\t"
667 "ocbi @%1\n\t"
668 "ocbi @%2\n\t"
669 "ocbi @%3\n\t" : :
670 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
671 a0 += linesz;
672 a1 += linesz;
673 a2 += linesz;
674 a3 += linesz;
675 asm volatile("movca.l r0, @%0\n\t"
676 "movca.l r0, @%1\n\t"
677 "movca.l r0, @%2\n\t"
678 "movca.l r0, @%3\n\t"
679 "ocbi @%0\n\t"
680 "ocbi @%1\n\t"
681 "ocbi @%2\n\t"
682 "ocbi @%3\n\t" : :
683 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
684 a0 += linesz;
685 a1 += linesz;
686 a2 += linesz;
687 a3 += linesz;
688 asm volatile("movca.l r0, @%0\n\t"
689 "movca.l r0, @%1\n\t"
690 "movca.l r0, @%2\n\t"
691 "movca.l r0, @%3\n\t"
692 "ocbi @%0\n\t"
693 "ocbi @%1\n\t"
694 "ocbi @%2\n\t"
695 "ocbi @%3\n\t" : :
696 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
697 asm volatile("ldc %0, sr" : : "r" (orig_sr));
698 a0 += linesz;
699 a1 += linesz;
700 a2 += linesz;
701 a3 += linesz;
702 } while (a0 < a0e);
703}
704
705extern void __weak sh4__flush_region_init(void); 380extern void __weak sh4__flush_region_init(void);
706 381
707/* 382/*
@@ -709,32 +384,11 @@ extern void __weak sh4__flush_region_init(void);
709 */ 384 */
710void __init sh4_cache_init(void) 385void __init sh4_cache_init(void)
711{ 386{
712 unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
713
714 printk("PVR=%08x CVR=%08x PRR=%08x\n", 387 printk("PVR=%08x CVR=%08x PRR=%08x\n",
715 ctrl_inl(CCN_PVR), 388 ctrl_inl(CCN_PVR),
716 ctrl_inl(CCN_CVR), 389 ctrl_inl(CCN_CVR),
717 ctrl_inl(CCN_PRR)); 390 ctrl_inl(CCN_PRR));
718 391
719 if (wt_enabled)
720 __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
721 else {
722 switch (boot_cpu_data.dcache.ways) {
723 case 1:
724 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
725 break;
726 case 2:
727 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
728 break;
729 case 4:
730 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
731 break;
732 default:
733 panic("unknown number of cache ways\n");
734 break;
735 }
736 }
737
738 local_flush_icache_range = sh4_flush_icache_range; 392 local_flush_icache_range = sh4_flush_icache_range;
739 local_flush_dcache_page = sh4_flush_dcache_page; 393 local_flush_dcache_page = sh4_flush_dcache_page;
740 local_flush_cache_all = sh4_flush_cache_all; 394 local_flush_cache_all = sh4_flush_cache_all;
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 35c37b7f717a..4aa926054531 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -164,11 +164,17 @@ void flush_cache_all(void)
164 164
165void flush_cache_mm(struct mm_struct *mm) 165void flush_cache_mm(struct mm_struct *mm)
166{ 166{
167 if (boot_cpu_data.dcache.n_aliases == 0)
168 return;
169
167 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); 170 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
168} 171}
169 172
170void flush_cache_dup_mm(struct mm_struct *mm) 173void flush_cache_dup_mm(struct mm_struct *mm)
171{ 174{
175 if (boot_cpu_data.dcache.n_aliases == 0)
176 return;
177
172 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); 178 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
173} 179}
174 180
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 16e01b5fed04..15d74ea42094 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -39,7 +39,9 @@ void *kmap_coherent(struct page *page, unsigned long addr)
39 pagefault_disable(); 39 pagefault_disable();
40 40
41 idx = FIX_CMAP_END - 41 idx = FIX_CMAP_END -
42 ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); 42 (((addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1)) +
43 (FIX_N_COLOURS * smp_processor_id()));
44
43 vaddr = __fix_to_virt(idx); 45 vaddr = __fix_to_virt(idx);
44 46
45 BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); 47 BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));