diff options
author | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 04:23:14 -0400 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2009-08-21 04:23:14 -0400 |
commit | f26b2a562b46ab186c8383993ab1332673ac4a47 (patch) | |
tree | 5cf52089da5ca762c07cf6c1364a6aa411fb3038 /arch/sh | |
parent | f9bd71f255b4349c4f9f596863161fd5182f67fa (diff) |
sh: Make cache flushers SMP-aware.
This does a bit of rework for making the cache flushers SMP-aware. The
function pointer-based flushers are renamed to local variants with the
exported interface being commonly implemented and wrapping as necessary.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/cacheflush.h | 41 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh2a.c | 10 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 54 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 64 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 67 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 137 |
6 files changed, 213 insertions, 160 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 32299b7c2b48..11e416630585 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h | |||
@@ -19,23 +19,40 @@ | |||
19 | * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache | 19 | * - flush_icache_page(vma, pg) flushes(invalidates) a page for icache |
20 | * - flush_cache_sigtramp(vaddr) flushes the signal trampoline | 20 | * - flush_cache_sigtramp(vaddr) flushes the signal trampoline |
21 | */ | 21 | */ |
22 | extern void (*flush_cache_all)(void); | 22 | extern void (*local_flush_cache_all)(void *args); |
23 | extern void (*flush_cache_mm)(struct mm_struct *mm); | 23 | extern void (*local_flush_cache_mm)(void *args); |
24 | extern void (*flush_cache_dup_mm)(struct mm_struct *mm); | 24 | extern void (*local_flush_cache_dup_mm)(void *args); |
25 | extern void (*flush_cache_page)(struct vm_area_struct *vma, | 25 | extern void (*local_flush_cache_page)(void *args); |
26 | unsigned long addr, unsigned long pfn); | 26 | extern void (*local_flush_cache_range)(void *args); |
27 | extern void (*flush_cache_range)(struct vm_area_struct *vma, | 27 | extern void (*local_flush_dcache_page)(void *args); |
28 | unsigned long start, unsigned long end); | 28 | extern void (*local_flush_icache_range)(void *args); |
29 | extern void (*flush_dcache_page)(struct page *page); | 29 | extern void (*local_flush_icache_page)(void *args); |
30 | extern void (*flush_icache_range)(unsigned long start, unsigned long end); | 30 | extern void (*local_flush_cache_sigtramp)(void *args); |
31 | extern void (*flush_icache_page)(struct vm_area_struct *vma, | 31 | |
32 | struct page *page); | 32 | static inline void cache_noop(void *args) { } |
33 | extern void (*flush_cache_sigtramp)(unsigned long address); | ||
34 | 33 | ||
35 | extern void (*__flush_wback_region)(void *start, int size); | 34 | extern void (*__flush_wback_region)(void *start, int size); |
36 | extern void (*__flush_purge_region)(void *start, int size); | 35 | extern void (*__flush_purge_region)(void *start, int size); |
37 | extern void (*__flush_invalidate_region)(void *start, int size); | 36 | extern void (*__flush_invalidate_region)(void *start, int size); |
38 | 37 | ||
38 | extern void flush_cache_all(void); | ||
39 | extern void flush_cache_mm(struct mm_struct *mm); | ||
40 | extern void flush_cache_dup_mm(struct mm_struct *mm); | ||
41 | extern void flush_cache_page(struct vm_area_struct *vma, | ||
42 | unsigned long addr, unsigned long pfn); | ||
43 | extern void flush_cache_range(struct vm_area_struct *vma, | ||
44 | unsigned long start, unsigned long end); | ||
45 | extern void flush_dcache_page(struct page *page); | ||
46 | extern void flush_icache_range(unsigned long start, unsigned long end); | ||
47 | extern void flush_icache_page(struct vm_area_struct *vma, | ||
48 | struct page *page); | ||
49 | extern void flush_cache_sigtramp(unsigned long address); | ||
50 | |||
51 | struct flusher_data { | ||
52 | struct vm_area_struct *vma; | ||
53 | unsigned long addr1, addr2; | ||
54 | }; | ||
55 | |||
39 | #define ARCH_HAS_FLUSH_ANON_PAGE | 56 | #define ARCH_HAS_FLUSH_ANON_PAGE |
40 | extern void __flush_anon_page(struct page *page, unsigned long); | 57 | extern void __flush_anon_page(struct page *page, unsigned long); |
41 | 58 | ||
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 96a41872dfd3..975899d83564 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -97,13 +97,15 @@ static void sh2a__flush_invalidate_region(void *start, int size) | |||
97 | } | 97 | } |
98 | 98 | ||
99 | /* WBack O-Cache and flush I-Cache */ | 99 | /* WBack O-Cache and flush I-Cache */ |
100 | static void sh2a_flush_icache_range(unsigned long start, unsigned long end) | 100 | static void sh2a_flush_icache_range(void *args) |
101 | { | 101 | { |
102 | struct flusher_data *data = args; | ||
103 | unsigned long start, end; | ||
102 | unsigned long v; | 104 | unsigned long v; |
103 | unsigned long flags; | 105 | unsigned long flags; |
104 | 106 | ||
105 | start = start & ~(L1_CACHE_BYTES-1); | 107 | start = data->addr1 & ~(L1_CACHE_BYTES-1); |
106 | end = (end + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); | 108 | end = (data->addr2 + L1_CACHE_BYTES-1) & ~(L1_CACHE_BYTES-1); |
107 | 109 | ||
108 | local_irq_save(flags); | 110 | local_irq_save(flags); |
109 | jump_to_uncached(); | 111 | jump_to_uncached(); |
@@ -130,7 +132,7 @@ static void sh2a_flush_icache_range(unsigned long start, unsigned long end) | |||
130 | 132 | ||
131 | void __init sh2a_cache_init(void) | 133 | void __init sh2a_cache_init(void) |
132 | { | 134 | { |
133 | flush_icache_range = sh2a_flush_icache_range; | 135 | local_flush_icache_range = sh2a_flush_icache_range; |
134 | 136 | ||
135 | __flush_wback_region = sh2a__flush_wback_region; | 137 | __flush_wback_region = sh2a__flush_wback_region; |
136 | __flush_purge_region = sh2a__flush_purge_region; | 138 | __flush_purge_region = sh2a__flush_purge_region; |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 6c2db1401080..9201b37c7cca 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -43,15 +43,20 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) = | |||
43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, | 43 | * Called from kernel/module.c:sys_init_module and routine for a.out format, |
44 | * signal handler code and kprobes code | 44 | * signal handler code and kprobes code |
45 | */ | 45 | */ |
46 | static void sh4_flush_icache_range(unsigned long start, unsigned long end) | 46 | static void sh4_flush_icache_range(void *args) |
47 | { | 47 | { |
48 | struct flusher_data *data = args; | ||
48 | int icacheaddr; | 49 | int icacheaddr; |
50 | unsigned long start, end; | ||
49 | unsigned long flags, v; | 51 | unsigned long flags, v; |
50 | int i; | 52 | int i; |
51 | 53 | ||
54 | start = data->addr1; | ||
55 | end = data->addr2; | ||
56 | |||
52 | /* If there are too many pages then just blow the caches */ | 57 | /* If there are too many pages then just blow the caches */ |
53 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 58 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
54 | flush_cache_all(); | 59 | local_flush_cache_all(args); |
55 | } else { | 60 | } else { |
56 | /* selectively flush d-cache then invalidate the i-cache */ | 61 | /* selectively flush d-cache then invalidate the i-cache */ |
57 | /* this is inefficient, so only use for small ranges */ | 62 | /* this is inefficient, so only use for small ranges */ |
@@ -104,7 +109,7 @@ static inline void flush_cache_4096(unsigned long start, | |||
104 | * Write back & invalidate the D-cache of the page. | 109 | * Write back & invalidate the D-cache of the page. |
105 | * (To avoid "alias" issues) | 110 | * (To avoid "alias" issues) |
106 | */ | 111 | */ |
107 | static void sh4_flush_dcache_page(struct page *page) | 112 | static void sh4_flush_dcache_page(void *page) |
108 | { | 113 | { |
109 | #ifndef CONFIG_SMP | 114 | #ifndef CONFIG_SMP |
110 | struct address_space *mapping = page_mapping(page); | 115 | struct address_space *mapping = page_mapping(page); |
@@ -155,7 +160,7 @@ static inline void flush_dcache_all(void) | |||
155 | wmb(); | 160 | wmb(); |
156 | } | 161 | } |
157 | 162 | ||
158 | static void sh4_flush_cache_all(void) | 163 | static void sh4_flush_cache_all(void *unused) |
159 | { | 164 | { |
160 | flush_dcache_all(); | 165 | flush_dcache_all(); |
161 | flush_icache_all(); | 166 | flush_icache_all(); |
@@ -247,8 +252,10 @@ loop_exit: | |||
247 | * | 252 | * |
248 | * Caller takes mm->mmap_sem. | 253 | * Caller takes mm->mmap_sem. |
249 | */ | 254 | */ |
250 | static void sh4_flush_cache_mm(struct mm_struct *mm) | 255 | static void sh4_flush_cache_mm(void *arg) |
251 | { | 256 | { |
257 | struct mm_struct *mm = arg; | ||
258 | |||
252 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) | 259 | if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) |
253 | return; | 260 | return; |
254 | 261 | ||
@@ -287,12 +294,18 @@ static void sh4_flush_cache_mm(struct mm_struct *mm) | |||
287 | * ADDR: Virtual Address (U0 address) | 294 | * ADDR: Virtual Address (U0 address) |
288 | * PFN: Physical page number | 295 | * PFN: Physical page number |
289 | */ | 296 | */ |
290 | static void sh4_flush_cache_page(struct vm_area_struct *vma, | 297 | static void sh4_flush_cache_page(void *args) |
291 | unsigned long address, unsigned long pfn) | ||
292 | { | 298 | { |
293 | unsigned long phys = pfn << PAGE_SHIFT; | 299 | struct flusher_data *data = args; |
300 | struct vm_area_struct *vma; | ||
301 | unsigned long address, pfn, phys; | ||
294 | unsigned int alias_mask; | 302 | unsigned int alias_mask; |
295 | 303 | ||
304 | vma = data->vma; | ||
305 | address = data->addr1; | ||
306 | pfn = data->addr2; | ||
307 | phys = pfn << PAGE_SHIFT; | ||
308 | |||
296 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 309 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) |
297 | return; | 310 | return; |
298 | 311 | ||
@@ -335,9 +348,16 @@ static void sh4_flush_cache_page(struct vm_area_struct *vma, | |||
335 | * Flushing the cache lines for U0 only isn't enough. | 348 | * Flushing the cache lines for U0 only isn't enough. |
336 | * We need to flush for P1 too, which may contain aliases. | 349 | * We need to flush for P1 too, which may contain aliases. |
337 | */ | 350 | */ |
338 | static void sh4_flush_cache_range(struct vm_area_struct *vma, | 351 | static void sh4_flush_cache_range(void *args) |
339 | unsigned long start, unsigned long end) | ||
340 | { | 352 | { |
353 | struct flusher_data *data = args; | ||
354 | struct vm_area_struct *vma; | ||
355 | unsigned long start, end; | ||
356 | |||
357 | vma = data->vma; | ||
358 | start = data->addr1; | ||
359 | end = data->addr2; | ||
360 | |||
341 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) | 361 | if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) |
342 | return; | 362 | return; |
343 | 363 | ||
@@ -663,13 +683,13 @@ void __init sh4_cache_init(void) | |||
663 | break; | 683 | break; |
664 | } | 684 | } |
665 | 685 | ||
666 | flush_icache_range = sh4_flush_icache_range; | 686 | local_flush_icache_range = sh4_flush_icache_range; |
667 | flush_dcache_page = sh4_flush_dcache_page; | 687 | local_flush_dcache_page = sh4_flush_dcache_page; |
668 | flush_cache_all = sh4_flush_cache_all; | 688 | local_flush_cache_all = sh4_flush_cache_all; |
669 | flush_cache_mm = sh4_flush_cache_mm; | 689 | local_flush_cache_mm = sh4_flush_cache_mm; |
670 | flush_cache_dup_mm = sh4_flush_cache_mm; | 690 | local_flush_cache_dup_mm = sh4_flush_cache_mm; |
671 | flush_cache_page = sh4_flush_cache_page; | 691 | local_flush_cache_page = sh4_flush_cache_page; |
672 | flush_cache_range = sh4_flush_cache_range; | 692 | local_flush_cache_range = sh4_flush_cache_range; |
673 | 693 | ||
674 | sh4__flush_region_init(); | 694 | sh4__flush_region_init(); |
675 | } | 695 | } |
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index d4a445c865d7..467ff8e260f7 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c | |||
@@ -483,7 +483,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm, | |||
483 | * Invalidate the entire contents of both caches, after writing back to | 483 | * Invalidate the entire contents of both caches, after writing back to |
484 | * memory any dirty data from the D-cache. | 484 | * memory any dirty data from the D-cache. |
485 | */ | 485 | */ |
486 | static void sh5_flush_cache_all(void) | 486 | static void sh5_flush_cache_all(void *unused) |
487 | { | 487 | { |
488 | sh64_dcache_purge_all(); | 488 | sh64_dcache_purge_all(); |
489 | sh64_icache_inv_all(); | 489 | sh64_icache_inv_all(); |
@@ -510,7 +510,7 @@ static void sh5_flush_cache_all(void) | |||
510 | * I-cache. This is similar to the lack of action needed in | 510 | * I-cache. This is similar to the lack of action needed in |
511 | * flush_tlb_mm - see fault.c. | 511 | * flush_tlb_mm - see fault.c. |
512 | */ | 512 | */ |
513 | static void sh5_flush_cache_mm(struct mm_struct *mm) | 513 | static void sh5_flush_cache_mm(void *unused) |
514 | { | 514 | { |
515 | sh64_dcache_purge_all(); | 515 | sh64_dcache_purge_all(); |
516 | } | 516 | } |
@@ -522,13 +522,18 @@ static void sh5_flush_cache_mm(struct mm_struct *mm) | |||
522 | * | 522 | * |
523 | * Note, 'end' is 1 byte beyond the end of the range to flush. | 523 | * Note, 'end' is 1 byte beyond the end of the range to flush. |
524 | */ | 524 | */ |
525 | static void sh5_flush_cache_range(struct vm_area_struct *vma, | 525 | static void sh5_flush_cache_range(void *args) |
526 | unsigned long start, unsigned long end) | ||
527 | { | 526 | { |
528 | struct mm_struct *mm = vma->vm_mm; | 527 | struct flusher_data *data = args; |
528 | struct vm_area_struct *vma; | ||
529 | unsigned long start, end; | ||
529 | 530 | ||
530 | sh64_dcache_purge_user_range(mm, start, end); | 531 | vma = data->vma; |
531 | sh64_icache_inv_user_page_range(mm, start, end); | 532 | start = data->addr1; |
533 | end = data->addr2; | ||
534 | |||
535 | sh64_dcache_purge_user_range(vma->vm_mm, start, end); | ||
536 | sh64_icache_inv_user_page_range(vma->vm_mm, start, end); | ||
532 | } | 537 | } |
533 | 538 | ||
534 | /* | 539 | /* |
@@ -540,16 +545,23 @@ static void sh5_flush_cache_range(struct vm_area_struct *vma, | |||
540 | * | 545 | * |
541 | * Note, this is called with pte lock held. | 546 | * Note, this is called with pte lock held. |
542 | */ | 547 | */ |
543 | static void sh5_flush_cache_page(struct vm_area_struct *vma, | 548 | static void sh5_flush_cache_page(void *args) |
544 | unsigned long eaddr, unsigned long pfn) | ||
545 | { | 549 | { |
550 | struct flusher_data *data = args; | ||
551 | struct vm_area_struct *vma; | ||
552 | unsigned long eaddr, pfn; | ||
553 | |||
554 | vma = data->vma; | ||
555 | eaddr = data->addr1; | ||
556 | pfn = data->addr2; | ||
557 | |||
546 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); | 558 | sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); |
547 | 559 | ||
548 | if (vma->vm_flags & VM_EXEC) | 560 | if (vma->vm_flags & VM_EXEC) |
549 | sh64_icache_inv_user_page(vma, eaddr); | 561 | sh64_icache_inv_user_page(vma, eaddr); |
550 | } | 562 | } |
551 | 563 | ||
552 | static void sh5_flush_dcache_page(struct page *page) | 564 | static void sh5_flush_dcache_page(void *page) |
553 | { | 565 | { |
554 | sh64_dcache_purge_phy_page(page_to_phys(page)); | 566 | sh64_dcache_purge_phy_page(page_to_phys(page)); |
555 | wmb(); | 567 | wmb(); |
@@ -563,8 +575,14 @@ static void sh5_flush_dcache_page(struct page *page) | |||
563 | * mapping, therefore it's guaranteed that there no cache entries for | 575 | * mapping, therefore it's guaranteed that there no cache entries for |
564 | * the range in cache sets of the wrong colour. | 576 | * the range in cache sets of the wrong colour. |
565 | */ | 577 | */ |
566 | static void sh5_flush_icache_range(unsigned long start, unsigned long end) | 578 | static void sh5_flush_icache_range(void *args) |
567 | { | 579 | { |
580 | struct flusher_data *data = args; | ||
581 | unsigned long start, end; | ||
582 | |||
583 | start = data->addr1; | ||
584 | end = data->addr2; | ||
585 | |||
568 | __flush_purge_region((void *)start, end); | 586 | __flush_purge_region((void *)start, end); |
569 | wmb(); | 587 | wmb(); |
570 | sh64_icache_inv_kernel_range(start, end); | 588 | sh64_icache_inv_kernel_range(start, end); |
@@ -576,25 +594,25 @@ static void sh5_flush_icache_range(unsigned long start, unsigned long end) | |||
576 | * current process. Used to flush signal trampolines on the stack to | 594 | * current process. Used to flush signal trampolines on the stack to |
577 | * make them executable. | 595 | * make them executable. |
578 | */ | 596 | */ |
579 | static void sh5_flush_cache_sigtramp(unsigned long vaddr) | 597 | static void sh5_flush_cache_sigtramp(void *vaddr) |
580 | { | 598 | { |
581 | unsigned long end = vaddr + L1_CACHE_BYTES; | 599 | unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES; |
582 | 600 | ||
583 | __flush_wback_region((void *)vaddr, L1_CACHE_BYTES); | 601 | __flush_wback_region(vaddr, L1_CACHE_BYTES); |
584 | wmb(); | 602 | wmb(); |
585 | sh64_icache_inv_current_user_range(vaddr, end); | 603 | sh64_icache_inv_current_user_range((unsigned long)vaddr, end); |
586 | } | 604 | } |
587 | 605 | ||
588 | void __init sh5_cache_init(void) | 606 | void __init sh5_cache_init(void) |
589 | { | 607 | { |
590 | flush_cache_all = sh5_flush_cache_all; | 608 | local_flush_cache_all = sh5_flush_cache_all; |
591 | flush_cache_mm = sh5_flush_cache_mm; | 609 | local_flush_cache_mm = sh5_flush_cache_mm; |
592 | flush_cache_dup_mm = sh5_flush_cache_mm; | 610 | local_flush_cache_dup_mm = sh5_flush_cache_mm; |
593 | flush_cache_page = sh5_flush_cache_page; | 611 | local_flush_cache_page = sh5_flush_cache_page; |
594 | flush_cache_range = sh5_flush_cache_range; | 612 | local_flush_cache_range = sh5_flush_cache_range; |
595 | flush_dcache_page = sh5_flush_dcache_page; | 613 | local_flush_dcache_page = sh5_flush_dcache_page; |
596 | flush_icache_range = sh5_flush_icache_range; | 614 | local_flush_icache_range = sh5_flush_icache_range; |
597 | flush_cache_sigtramp = sh5_flush_cache_sigtramp; | 615 | local_flush_cache_sigtramp = sh5_flush_cache_sigtramp; |
598 | 616 | ||
599 | /* Reserve a slot for dcache colouring in the DTLB */ | 617 | /* Reserve a slot for dcache colouring in the DTLB */ |
600 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); | 618 | dtlb_cache_slot = sh64_get_wired_dtlb_entry(); |
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index f1d5c803c04b..6293f57fa888 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c | |||
@@ -64,8 +64,14 @@ static inline void cache_wback_all(void) | |||
64 | * | 64 | * |
65 | * Called from kernel/module.c:sys_init_module and routine for a.out format. | 65 | * Called from kernel/module.c:sys_init_module and routine for a.out format. |
66 | */ | 66 | */ |
67 | static void sh7705_flush_icache_range(unsigned long start, unsigned long end) | 67 | static void sh7705_flush_icache_range(void *args) |
68 | { | 68 | { |
69 | struct flusher_data *data = args; | ||
70 | unsigned long start, end; | ||
71 | |||
72 | start = data->addr1; | ||
73 | end = data->addr2; | ||
74 | |||
69 | __flush_wback_region((void *)start, end - start); | 75 | __flush_wback_region((void *)start, end - start); |
70 | } | 76 | } |
71 | 77 | ||
@@ -127,7 +133,7 @@ static void __flush_dcache_page(unsigned long phys) | |||
127 | * Write back & invalidate the D-cache of the page. | 133 | * Write back & invalidate the D-cache of the page. |
128 | * (To avoid "alias" issues) | 134 | * (To avoid "alias" issues) |
129 | */ | 135 | */ |
130 | static void sh7705_flush_dcache_page(struct page *page) | 136 | static void sh7705_flush_dcache_page(void *page) |
131 | { | 137 | { |
132 | struct address_space *mapping = page_mapping(page); | 138 | struct address_space *mapping = page_mapping(page); |
133 | 139 | ||
@@ -137,7 +143,7 @@ static void sh7705_flush_dcache_page(struct page *page) | |||
137 | __flush_dcache_page(PHYSADDR(page_address(page))); | 143 | __flush_dcache_page(PHYSADDR(page_address(page))); |
138 | } | 144 | } |
139 | 145 | ||
140 | static void sh7705_flush_cache_all(void) | 146 | static void sh7705_flush_cache_all(void *args) |
141 | { | 147 | { |
142 | unsigned long flags; | 148 | unsigned long flags; |
143 | 149 | ||
@@ -149,44 +155,16 @@ static void sh7705_flush_cache_all(void) | |||
149 | local_irq_restore(flags); | 155 | local_irq_restore(flags); |
150 | } | 156 | } |
151 | 157 | ||
152 | static void sh7705_flush_cache_mm(struct mm_struct *mm) | ||
153 | { | ||
154 | /* Is there any good way? */ | ||
155 | /* XXX: possibly call flush_cache_range for each vm area */ | ||
156 | flush_cache_all(); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * Write back and invalidate D-caches. | ||
161 | * | ||
162 | * START, END: Virtual Address (U0 address) | ||
163 | * | ||
164 | * NOTE: We need to flush the _physical_ page entry. | ||
165 | * Flushing the cache lines for U0 only isn't enough. | ||
166 | * We need to flush for P1 too, which may contain aliases. | ||
167 | */ | ||
168 | static void sh7705_flush_cache_range(struct vm_area_struct *vma, | ||
169 | unsigned long start, unsigned long end) | ||
170 | { | ||
171 | |||
172 | /* | ||
173 | * We could call flush_cache_page for the pages of these range, | ||
174 | * but it's not efficient (scan the caches all the time...). | ||
175 | * | ||
176 | * We can't use A-bit magic, as there's the case we don't have | ||
177 | * valid entry on TLB. | ||
178 | */ | ||
179 | flush_cache_all(); | ||
180 | } | ||
181 | |||
182 | /* | 158 | /* |
183 | * Write back and invalidate I/D-caches for the page. | 159 | * Write back and invalidate I/D-caches for the page. |
184 | * | 160 | * |
185 | * ADDRESS: Virtual Address (U0 address) | 161 | * ADDRESS: Virtual Address (U0 address) |
186 | */ | 162 | */ |
187 | static void sh7705_flush_cache_page(struct vm_area_struct *vma, | 163 | static void sh7705_flush_cache_page(void *args) |
188 | unsigned long address, unsigned long pfn) | ||
189 | { | 164 | { |
165 | struct flusher_data *data = args; | ||
166 | unsigned long pfn = data->addr2; | ||
167 | |||
190 | __flush_dcache_page(pfn << PAGE_SHIFT); | 168 | __flush_dcache_page(pfn << PAGE_SHIFT); |
191 | } | 169 | } |
192 | 170 | ||
@@ -198,20 +176,19 @@ static void sh7705_flush_cache_page(struct vm_area_struct *vma, | |||
198 | * Not entirely sure why this is necessary on SH3 with 32K cache but | 176 | * Not entirely sure why this is necessary on SH3 with 32K cache but |
199 | * without it we get occasional "Memory fault" when loading a program. | 177 | * without it we get occasional "Memory fault" when loading a program. |
200 | */ | 178 | */ |
201 | static void sh7705_flush_icache_page(struct vm_area_struct *vma, | 179 | static void sh7705_flush_icache_page(void *page) |
202 | struct page *page) | ||
203 | { | 180 | { |
204 | __flush_purge_region(page_address(page), PAGE_SIZE); | 181 | __flush_purge_region(page_address(page), PAGE_SIZE); |
205 | } | 182 | } |
206 | 183 | ||
207 | void __init sh7705_cache_init(void) | 184 | void __init sh7705_cache_init(void) |
208 | { | 185 | { |
209 | flush_icache_range = sh7705_flush_icache_range; | 186 | local_flush_icache_range = sh7705_flush_icache_range; |
210 | flush_dcache_page = sh7705_flush_dcache_page; | 187 | local_flush_dcache_page = sh7705_flush_dcache_page; |
211 | flush_cache_all = sh7705_flush_cache_all; | 188 | local_flush_cache_all = sh7705_flush_cache_all; |
212 | flush_cache_mm = sh7705_flush_cache_mm; | 189 | local_flush_cache_mm = sh7705_flush_cache_all; |
213 | flush_cache_dup_mm = sh7705_flush_cache_mm; | 190 | local_flush_cache_dup_mm = sh7705_flush_cache_all; |
214 | flush_cache_range = sh7705_flush_cache_range; | 191 | local_flush_cache_range = sh7705_flush_cache_all; |
215 | flush_cache_page = sh7705_flush_cache_page; | 192 | local_flush_cache_page = sh7705_flush_cache_page; |
216 | flush_icache_page = sh7705_flush_icache_page; | 193 | local_flush_icache_page = sh7705_flush_icache_page; |
217 | } | 194 | } |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index d60239460436..411fe6058429 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * arch/sh/mm/pg-mmu.c | 2 | * arch/sh/mm/cache.c |
3 | * | 3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | 4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka |
5 | * Copyright (C) 2002 - 2009 Paul Mundt | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
@@ -10,63 +10,26 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/mutex.h> | 11 | #include <linux/mutex.h> |
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | #include <linux/smp.h> | ||
13 | #include <linux/highmem.h> | 14 | #include <linux/highmem.h> |
14 | #include <linux/module.h> | 15 | #include <linux/module.h> |
15 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | 18 | ||
18 | void (*flush_cache_all)(void); | 19 | void (*local_flush_cache_all)(void *args) = cache_noop; |
19 | void (*flush_cache_mm)(struct mm_struct *mm); | 20 | void (*local_flush_cache_mm)(void *args) = cache_noop; |
20 | void (*flush_cache_dup_mm)(struct mm_struct *mm); | 21 | void (*local_flush_cache_dup_mm)(void *args) = cache_noop; |
21 | void (*flush_cache_page)(struct vm_area_struct *vma, | 22 | void (*local_flush_cache_page)(void *args) = cache_noop; |
22 | unsigned long addr, unsigned long pfn); | 23 | void (*local_flush_cache_range)(void *args) = cache_noop; |
23 | void (*flush_cache_range)(struct vm_area_struct *vma, | 24 | void (*local_flush_dcache_page)(void *args) = cache_noop; |
24 | unsigned long start, unsigned long end); | 25 | void (*local_flush_icache_range)(void *args) = cache_noop; |
25 | void (*flush_dcache_page)(struct page *page); | 26 | void (*local_flush_icache_page)(void *args) = cache_noop; |
26 | void (*flush_icache_range)(unsigned long start, unsigned long end); | 27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; |
27 | void (*flush_icache_page)(struct vm_area_struct *vma, | 28 | |
28 | struct page *page); | ||
29 | void (*flush_cache_sigtramp)(unsigned long address); | ||
30 | void (*__flush_wback_region)(void *start, int size); | 29 | void (*__flush_wback_region)(void *start, int size); |
31 | void (*__flush_purge_region)(void *start, int size); | 30 | void (*__flush_purge_region)(void *start, int size); |
32 | void (*__flush_invalidate_region)(void *start, int size); | 31 | void (*__flush_invalidate_region)(void *start, int size); |
33 | 32 | ||
34 | static inline void noop_flush_cache_all(void) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | static inline void noop_flush_cache_mm(struct mm_struct *mm) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | static inline void noop_flush_cache_page(struct vm_area_struct *vma, | ||
43 | unsigned long addr, unsigned long pfn) | ||
44 | { | ||
45 | } | ||
46 | |||
47 | static inline void noop_flush_cache_range(struct vm_area_struct *vma, | ||
48 | unsigned long start, unsigned long end) | ||
49 | { | ||
50 | } | ||
51 | |||
52 | static inline void noop_flush_dcache_page(struct page *page) | ||
53 | { | ||
54 | } | ||
55 | |||
56 | static inline void noop_flush_icache_range(unsigned long start, | ||
57 | unsigned long end) | ||
58 | { | ||
59 | } | ||
60 | |||
61 | static inline void noop_flush_icache_page(struct vm_area_struct *vma, | ||
62 | struct page *page) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | static inline void noop_flush_cache_sigtramp(unsigned long address) | ||
67 | { | ||
68 | } | ||
69 | |||
70 | static inline void noop__flush_region(void *start, int size) | 33 | static inline void noop__flush_region(void *start, int size) |
71 | { | 34 | { |
72 | } | 35 | } |
@@ -184,6 +147,72 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) | |||
184 | } | 147 | } |
185 | } | 148 | } |
186 | 149 | ||
150 | void flush_cache_all(void) | ||
151 | { | ||
152 | on_each_cpu(local_flush_cache_all, NULL, 1); | ||
153 | } | ||
154 | |||
155 | void flush_cache_mm(struct mm_struct *mm) | ||
156 | { | ||
157 | on_each_cpu(local_flush_cache_mm, mm, 1); | ||
158 | } | ||
159 | |||
160 | void flush_cache_dup_mm(struct mm_struct *mm) | ||
161 | { | ||
162 | on_each_cpu(local_flush_cache_dup_mm, mm, 1); | ||
163 | } | ||
164 | |||
165 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | ||
166 | unsigned long pfn) | ||
167 | { | ||
168 | struct flusher_data data; | ||
169 | |||
170 | data.vma = vma; | ||
171 | data.addr1 = addr; | ||
172 | data.addr2 = pfn; | ||
173 | |||
174 | on_each_cpu(local_flush_cache_page, (void *)&data, 1); | ||
175 | } | ||
176 | |||
177 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
178 | unsigned long end) | ||
179 | { | ||
180 | struct flusher_data data; | ||
181 | |||
182 | data.vma = vma; | ||
183 | data.addr1 = start; | ||
184 | data.addr2 = end; | ||
185 | |||
186 | on_each_cpu(local_flush_cache_range, (void *)&data, 1); | ||
187 | } | ||
188 | |||
189 | void flush_dcache_page(struct page *page) | ||
190 | { | ||
191 | on_each_cpu(local_flush_dcache_page, page, 1); | ||
192 | } | ||
193 | |||
194 | void flush_icache_range(unsigned long start, unsigned long end) | ||
195 | { | ||
196 | struct flusher_data data; | ||
197 | |||
198 | data.vma = NULL; | ||
199 | data.addr1 = start; | ||
200 | data.addr2 = end; | ||
201 | |||
202 | on_each_cpu(local_flush_icache_range, (void *)&data, 1); | ||
203 | } | ||
204 | |||
205 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | ||
206 | { | ||
207 | /* Nothing uses the VMA, so just pass the struct page along */ | ||
208 | on_each_cpu(local_flush_icache_page, page, 1); | ||
209 | } | ||
210 | |||
211 | void flush_cache_sigtramp(unsigned long address) | ||
212 | { | ||
213 | on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); | ||
214 | } | ||
215 | |||
187 | static void compute_alias(struct cache_info *c) | 216 | static void compute_alias(struct cache_info *c) |
188 | { | 217 | { |
189 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | 218 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); |
@@ -230,16 +259,6 @@ void __init cpu_cache_init(void) | |||
230 | compute_alias(&boot_cpu_data.dcache); | 259 | compute_alias(&boot_cpu_data.dcache); |
231 | compute_alias(&boot_cpu_data.scache); | 260 | compute_alias(&boot_cpu_data.scache); |
232 | 261 | ||
233 | flush_cache_all = noop_flush_cache_all; | ||
234 | flush_cache_mm = noop_flush_cache_mm; | ||
235 | flush_cache_dup_mm = noop_flush_cache_mm; | ||
236 | flush_cache_page = noop_flush_cache_page; | ||
237 | flush_cache_range = noop_flush_cache_range; | ||
238 | flush_dcache_page = noop_flush_dcache_page; | ||
239 | flush_icache_range = noop_flush_icache_range; | ||
240 | flush_icache_page = noop_flush_icache_page; | ||
241 | flush_cache_sigtramp = noop_flush_cache_sigtramp; | ||
242 | |||
243 | __flush_wback_region = noop__flush_region; | 262 | __flush_wback_region = noop__flush_region; |
244 | __flush_purge_region = noop__flush_region; | 263 | __flush_purge_region = noop__flush_region; |
245 | __flush_invalidate_region = noop__flush_region; | 264 | __flush_invalidate_region = noop__flush_region; |