aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh5.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-21 04:23:14 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-21 04:23:14 -0400
commitf26b2a562b46ab186c8383993ab1332673ac4a47 (patch)
tree5cf52089da5ca762c07cf6c1364a6aa411fb3038 /arch/sh/mm/cache-sh5.c
parentf9bd71f255b4349c4f9f596863161fd5182f67fa (diff)
sh: Make cache flushers SMP-aware.
This does a bit of rework for making the cache flushers SMP-aware. The function pointer-based flushers are renamed to local variants with the exported interface being commonly implemented and wrapping as necessary. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh5.c')
-rw-r--r--arch/sh/mm/cache-sh5.c64
1 files changed, 41 insertions, 23 deletions
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index d4a445c865d7..467ff8e260f7 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -483,7 +483,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
483 * Invalidate the entire contents of both caches, after writing back to 483 * Invalidate the entire contents of both caches, after writing back to
484 * memory any dirty data from the D-cache. 484 * memory any dirty data from the D-cache.
485 */ 485 */
486static void sh5_flush_cache_all(void) 486static void sh5_flush_cache_all(void *unused)
487{ 487{
488 sh64_dcache_purge_all(); 488 sh64_dcache_purge_all();
489 sh64_icache_inv_all(); 489 sh64_icache_inv_all();
@@ -510,7 +510,7 @@ static void sh5_flush_cache_all(void)
510 * I-cache. This is similar to the lack of action needed in 510 * I-cache. This is similar to the lack of action needed in
511 * flush_tlb_mm - see fault.c. 511 * flush_tlb_mm - see fault.c.
512 */ 512 */
513static void sh5_flush_cache_mm(struct mm_struct *mm) 513static void sh5_flush_cache_mm(void *unused)
514{ 514{
515 sh64_dcache_purge_all(); 515 sh64_dcache_purge_all();
516} 516}
@@ -522,13 +522,18 @@ static void sh5_flush_cache_mm(struct mm_struct *mm)
522 * 522 *
523 * Note, 'end' is 1 byte beyond the end of the range to flush. 523 * Note, 'end' is 1 byte beyond the end of the range to flush.
524 */ 524 */
525static void sh5_flush_cache_range(struct vm_area_struct *vma, 525static void sh5_flush_cache_range(void *args)
526 unsigned long start, unsigned long end)
527{ 526{
528 struct mm_struct *mm = vma->vm_mm; 527 struct flusher_data *data = args;
528 struct vm_area_struct *vma;
529 unsigned long start, end;
529 530
530 sh64_dcache_purge_user_range(mm, start, end); 531 vma = data->vma;
531 sh64_icache_inv_user_page_range(mm, start, end); 532 start = data->addr1;
533 end = data->addr2;
534
535 sh64_dcache_purge_user_range(vma->vm_mm, start, end);
536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
532} 537}
533 538
534/* 539/*
@@ -540,16 +545,23 @@ static void sh5_flush_cache_range(struct vm_area_struct *vma,
540 * 545 *
541 * Note, this is called with pte lock held. 546 * Note, this is called with pte lock held.
542 */ 547 */
543static void sh5_flush_cache_page(struct vm_area_struct *vma, 548static void sh5_flush_cache_page(void *args)
544 unsigned long eaddr, unsigned long pfn)
545{ 549{
550 struct flusher_data *data = args;
551 struct vm_area_struct *vma;
552 unsigned long eaddr, pfn;
553
554 vma = data->vma;
555 eaddr = data->addr1;
556 pfn = data->addr2;
557
546 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); 558 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
547 559
548 if (vma->vm_flags & VM_EXEC) 560 if (vma->vm_flags & VM_EXEC)
549 sh64_icache_inv_user_page(vma, eaddr); 561 sh64_icache_inv_user_page(vma, eaddr);
550} 562}
551 563
552static void sh5_flush_dcache_page(struct page *page) 564static void sh5_flush_dcache_page(void *page)
553{ 565{
554 sh64_dcache_purge_phy_page(page_to_phys(page)); 566 sh64_dcache_purge_phy_page(page_to_phys(page));
555 wmb(); 567 wmb();
@@ -563,8 +575,14 @@ static void sh5_flush_dcache_page(struct page *page)
563 * mapping, therefore it's guaranteed that there no cache entries for 575 * mapping, therefore it's guaranteed that there no cache entries for
564 * the range in cache sets of the wrong colour. 576 * the range in cache sets of the wrong colour.
565 */ 577 */
566static void sh5_flush_icache_range(unsigned long start, unsigned long end) 578static void sh5_flush_icache_range(void *args)
567{ 579{
580 struct flusher_data *data = args;
581 unsigned long start, end;
582
583 start = data->addr1;
584 end = data->addr2;
585
568 __flush_purge_region((void *)start, end); 586 __flush_purge_region((void *)start, end);
569 wmb(); 587 wmb();
570 sh64_icache_inv_kernel_range(start, end); 588 sh64_icache_inv_kernel_range(start, end);
@@ -576,25 +594,25 @@ static void sh5_flush_icache_range(unsigned long start, unsigned long end)
576 * current process. Used to flush signal trampolines on the stack to 594 * current process. Used to flush signal trampolines on the stack to
577 * make them executable. 595 * make them executable.
578 */ 596 */
579static void sh5_flush_cache_sigtramp(unsigned long vaddr) 597static void sh5_flush_cache_sigtramp(void *vaddr)
580{ 598{
581 unsigned long end = vaddr + L1_CACHE_BYTES; 599 unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
582 600
583 __flush_wback_region((void *)vaddr, L1_CACHE_BYTES); 601 __flush_wback_region(vaddr, L1_CACHE_BYTES);
584 wmb(); 602 wmb();
585 sh64_icache_inv_current_user_range(vaddr, end); 603 sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
586} 604}
587 605
588void __init sh5_cache_init(void) 606void __init sh5_cache_init(void)
589{ 607{
590 flush_cache_all = sh5_flush_cache_all; 608 local_flush_cache_all = sh5_flush_cache_all;
591 flush_cache_mm = sh5_flush_cache_mm; 609 local_flush_cache_mm = sh5_flush_cache_mm;
592 flush_cache_dup_mm = sh5_flush_cache_mm; 610 local_flush_cache_dup_mm = sh5_flush_cache_mm;
593 flush_cache_page = sh5_flush_cache_page; 611 local_flush_cache_page = sh5_flush_cache_page;
594 flush_cache_range = sh5_flush_cache_range; 612 local_flush_cache_range = sh5_flush_cache_range;
595 flush_dcache_page = sh5_flush_dcache_page; 613 local_flush_dcache_page = sh5_flush_dcache_page;
596 flush_icache_range = sh5_flush_icache_range; 614 local_flush_icache_range = sh5_flush_icache_range;
597 flush_cache_sigtramp = sh5_flush_cache_sigtramp; 615 local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
598 616
599 /* Reserve a slot for dcache colouring in the DTLB */ 617 /* Reserve a slot for dcache colouring in the DTLB */
600 dtlb_cache_slot = sh64_get_wired_dtlb_entry(); 618 dtlb_cache_slot = sh64_get_wired_dtlb_entry();