aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh5.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-09-24 16:22:33 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-24 16:22:33 -0400
commitbaea7b946f00a291b166ccae7fcfed6c01530cc6 (patch)
tree4aa275fbdbec9c7b9b4629e8bee2bbecd3c6a6af /arch/sh/mm/cache-sh5.c
parentae19ffbadc1b2100285a5b5b3d0a4e0a11390904 (diff)
parent94e0fb086fc5663c38bbc0fe86d698be8314f82f (diff)
Merge branch 'origin' into for-linus
Conflicts: MAINTAINERS
Diffstat (limited to 'arch/sh/mm/cache-sh5.c')
-rw-r--r--arch/sh/mm/cache-sh5.c307
1 files changed, 47 insertions, 260 deletions
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index 86762092508c..467ff8e260f7 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -20,23 +20,11 @@
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
22 22
23extern void __weak sh4__flush_region_init(void);
24
23/* Wired TLB entry for the D-cache */ 25/* Wired TLB entry for the D-cache */
24static unsigned long long dtlb_cache_slot; 26static unsigned long long dtlb_cache_slot;
25 27
26void __init p3_cache_init(void)
27{
28 /* Reserve a slot for dcache colouring in the DTLB */
29 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
30}
31
32#ifdef CONFIG_DCACHE_DISABLED
33#define sh64_dcache_purge_all() do { } while (0)
34#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
35#define sh64_dcache_purge_user_range(mm, start, end) do { } while (0)
36#define sh64_dcache_purge_phy_page(paddr) do { } while (0)
37#define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0)
38#endif
39
40/* 28/*
41 * The following group of functions deal with mapping and unmapping a 29 * The following group of functions deal with mapping and unmapping a
42 * temporary page into a DTLB slot that has been set aside for exclusive 30 * temporary page into a DTLB slot that has been set aside for exclusive
@@ -56,7 +44,6 @@ static inline void sh64_teardown_dtlb_cache_slot(void)
56 local_irq_enable(); 44 local_irq_enable();
57} 45}
58 46
59#ifndef CONFIG_ICACHE_DISABLED
60static inline void sh64_icache_inv_all(void) 47static inline void sh64_icache_inv_all(void)
61{ 48{
62 unsigned long long addr, flag, data; 49 unsigned long long addr, flag, data;
@@ -214,52 +201,6 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
214 } 201 }
215} 202}
216 203
217/*
218 * Invalidate a small range of user context I-cache, not necessarily page
219 * (or even cache-line) aligned.
220 *
221 * Since this is used inside ptrace, the ASID in the mm context typically
222 * won't match current_asid. We'll have to switch ASID to do this. For
223 * safety, and given that the range will be small, do all this under cli.
224 *
225 * Note, there is a hazard that the ASID in mm->context is no longer
226 * actually associated with mm, i.e. if the mm->context has started a new
227 * cycle since mm was last active. However, this is just a performance
228 * issue: all that happens is that we invalidate lines belonging to
229 * another mm, so the owning process has to refill them when that mm goes
230 * live again. mm itself can't have any cache entries because there will
231 * have been a flush_cache_all when the new mm->context cycle started.
232 */
233static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
234 unsigned long start, int len)
235{
236 unsigned long long eaddr = start;
237 unsigned long long eaddr_end = start + len;
238 unsigned long current_asid, mm_asid;
239 unsigned long flags;
240 unsigned long long epage_start;
241
242 /*
243 * Align to start of cache line. Otherwise, suppose len==8 and
244 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
245 */
246 eaddr = L1_CACHE_ALIGN(start);
247 eaddr_end = start + len;
248
249 mm_asid = cpu_asid(smp_processor_id(), mm);
250 local_irq_save(flags);
251 current_asid = switch_and_save_asid(mm_asid);
252
253 epage_start = eaddr & PAGE_MASK;
254
255 while (eaddr < eaddr_end) {
256 __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
257 eaddr += L1_CACHE_BYTES;
258 }
259 switch_and_save_asid(current_asid);
260 local_irq_restore(flags);
261}
262
263static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) 204static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
264{ 205{
265 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a 206 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
@@ -287,9 +228,7 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon
287 addr += L1_CACHE_BYTES; 228 addr += L1_CACHE_BYTES;
288 } 229 }
289} 230}
290#endif /* !CONFIG_ICACHE_DISABLED */
291 231
292#ifndef CONFIG_DCACHE_DISABLED
293/* Buffer used as the target of alloco instructions to purge data from cache 232/* Buffer used as the target of alloco instructions to purge data from cache
294 sets by natural eviction. -- RPC */ 233 sets by natural eviction. -- RPC */
295#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) 234#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
@@ -541,59 +480,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
541} 480}
542 481
543/* 482/*
544 * Purge the range of addresses from the D-cache.
545 *
546 * The addresses lie in the superpage mapping. There's no harm if we
547 * overpurge at either end - just a small performance loss.
548 */
549void __flush_purge_region(void *start, int size)
550{
551 unsigned long long ullend, addr, aligned_start;
552
553 aligned_start = (unsigned long long)(signed long long)(signed long) start;
554 addr = L1_CACHE_ALIGN(aligned_start);
555 ullend = (unsigned long long) (signed long long) (signed long) start + size;
556
557 while (addr <= ullend) {
558 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (addr));
559 addr += L1_CACHE_BYTES;
560 }
561}
562
563void __flush_wback_region(void *start, int size)
564{
565 unsigned long long ullend, addr, aligned_start;
566
567 aligned_start = (unsigned long long)(signed long long)(signed long) start;
568 addr = L1_CACHE_ALIGN(aligned_start);
569 ullend = (unsigned long long) (signed long long) (signed long) start + size;
570
571 while (addr < ullend) {
572 __asm__ __volatile__ ("ocbwb %0, 0" : : "r" (addr));
573 addr += L1_CACHE_BYTES;
574 }
575}
576
577void __flush_invalidate_region(void *start, int size)
578{
579 unsigned long long ullend, addr, aligned_start;
580
581 aligned_start = (unsigned long long)(signed long long)(signed long) start;
582 addr = L1_CACHE_ALIGN(aligned_start);
583 ullend = (unsigned long long) (signed long long) (signed long) start + size;
584
585 while (addr < ullend) {
586 __asm__ __volatile__ ("ocbi %0, 0" : : "r" (addr));
587 addr += L1_CACHE_BYTES;
588 }
589}
590#endif /* !CONFIG_DCACHE_DISABLED */
591
592/*
593 * Invalidate the entire contents of both caches, after writing back to 483 * Invalidate the entire contents of both caches, after writing back to
594 * memory any dirty data from the D-cache. 484 * memory any dirty data from the D-cache.
595 */ 485 */
596void flush_cache_all(void) 486static void sh5_flush_cache_all(void *unused)
597{ 487{
598 sh64_dcache_purge_all(); 488 sh64_dcache_purge_all();
599 sh64_icache_inv_all(); 489 sh64_icache_inv_all();
@@ -620,7 +510,7 @@ void flush_cache_all(void)
620 * I-cache. This is similar to the lack of action needed in 510 * I-cache. This is similar to the lack of action needed in
621 * flush_tlb_mm - see fault.c. 511 * flush_tlb_mm - see fault.c.
622 */ 512 */
623void flush_cache_mm(struct mm_struct *mm) 513static void sh5_flush_cache_mm(void *unused)
624{ 514{
625 sh64_dcache_purge_all(); 515 sh64_dcache_purge_all();
626} 516}
@@ -632,13 +522,18 @@ void flush_cache_mm(struct mm_struct *mm)
632 * 522 *
633 * Note, 'end' is 1 byte beyond the end of the range to flush. 523 * Note, 'end' is 1 byte beyond the end of the range to flush.
634 */ 524 */
635void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 525static void sh5_flush_cache_range(void *args)
636 unsigned long end)
637{ 526{
638 struct mm_struct *mm = vma->vm_mm; 527 struct flusher_data *data = args;
528 struct vm_area_struct *vma;
529 unsigned long start, end;
530
531 vma = data->vma;
532 start = data->addr1;
533 end = data->addr2;
639 534
640 sh64_dcache_purge_user_range(mm, start, end); 535 sh64_dcache_purge_user_range(vma->vm_mm, start, end);
641 sh64_icache_inv_user_page_range(mm, start, end); 536 sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
642} 537}
643 538
644/* 539/*
@@ -650,16 +545,23 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
650 * 545 *
651 * Note, this is called with pte lock held. 546 * Note, this is called with pte lock held.
652 */ 547 */
653void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, 548static void sh5_flush_cache_page(void *args)
654 unsigned long pfn)
655{ 549{
550 struct flusher_data *data = args;
551 struct vm_area_struct *vma;
552 unsigned long eaddr, pfn;
553
554 vma = data->vma;
555 eaddr = data->addr1;
556 pfn = data->addr2;
557
656 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); 558 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
657 559
658 if (vma->vm_flags & VM_EXEC) 560 if (vma->vm_flags & VM_EXEC)
659 sh64_icache_inv_user_page(vma, eaddr); 561 sh64_icache_inv_user_page(vma, eaddr);
660} 562}
661 563
662void flush_dcache_page(struct page *page) 564static void sh5_flush_dcache_page(void *page)
663{ 565{
664 sh64_dcache_purge_phy_page(page_to_phys(page)); 566 sh64_dcache_purge_phy_page(page_to_phys(page));
665 wmb(); 567 wmb();
@@ -673,162 +575,47 @@ void flush_dcache_page(struct page *page)
673 * mapping, therefore it's guaranteed that there no cache entries for 575 * mapping, therefore it's guaranteed that there no cache entries for
674 * the range in cache sets of the wrong colour. 576 * the range in cache sets of the wrong colour.
675 */ 577 */
676void flush_icache_range(unsigned long start, unsigned long end) 578static void sh5_flush_icache_range(void *args)
677{ 579{
580 struct flusher_data *data = args;
581 unsigned long start, end;
582
583 start = data->addr1;
584 end = data->addr2;
585
678 __flush_purge_region((void *)start, end); 586 __flush_purge_region((void *)start, end);
679 wmb(); 587 wmb();
680 sh64_icache_inv_kernel_range(start, end); 588 sh64_icache_inv_kernel_range(start, end);
681} 589}
682 590
683/* 591/*
684 * Flush the range of user (defined by vma->vm_mm) address space starting
685 * at 'addr' for 'len' bytes from the cache. The range does not straddle
686 * a page boundary, the unique physical page containing the range is
687 * 'page'. This seems to be used mainly for invalidating an address
688 * range following a poke into the program text through the ptrace() call
689 * from another process (e.g. for BRK instruction insertion).
690 */
691void flush_icache_user_range(struct vm_area_struct *vma,
692 struct page *page, unsigned long addr, int len)
693{
694
695 sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
696 mb();
697
698 if (vma->vm_flags & VM_EXEC)
699 sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
700}
701
702/*
703 * For the address range [start,end), write back the data from the 592 * For the address range [start,end), write back the data from the
704 * D-cache and invalidate the corresponding region of the I-cache for the 593 * D-cache and invalidate the corresponding region of the I-cache for the
705 * current process. Used to flush signal trampolines on the stack to 594 * current process. Used to flush signal trampolines on the stack to
706 * make them executable. 595 * make them executable.
707 */ 596 */
708void flush_cache_sigtramp(unsigned long vaddr) 597static void sh5_flush_cache_sigtramp(void *vaddr)
709{ 598{
710 unsigned long end = vaddr + L1_CACHE_BYTES; 599 unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
711 600
712 __flush_wback_region((void *)vaddr, L1_CACHE_BYTES); 601 __flush_wback_region(vaddr, L1_CACHE_BYTES);
713 wmb(); 602 wmb();
714 sh64_icache_inv_current_user_range(vaddr, end); 603 sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
715} 604}
716 605
717#ifdef CONFIG_MMU 606void __init sh5_cache_init(void)
718/*
719 * These *MUST* lie in an area of virtual address space that's otherwise
720 * unused.
721 */
722#define UNIQUE_EADDR_START 0xe0000000UL
723#define UNIQUE_EADDR_END 0xe8000000UL
724
725/*
726 * Given a physical address paddr, and a user virtual address user_eaddr
727 * which will eventually be mapped to it, create a one-off kernel-private
728 * eaddr mapped to the same paddr. This is used for creating special
729 * destination pages for copy_user_page and clear_user_page.
730 */
731static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
732 unsigned long paddr)
733{
734 static unsigned long current_pointer = UNIQUE_EADDR_START;
735 unsigned long coloured_pointer;
736
737 if (current_pointer == UNIQUE_EADDR_END) {
738 sh64_dcache_purge_all();
739 current_pointer = UNIQUE_EADDR_START;
740 }
741
742 coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
743 (user_eaddr & CACHE_OC_SYN_MASK);
744 sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
745
746 current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
747
748 return coloured_pointer;
749}
750
751static void sh64_copy_user_page_coloured(void *to, void *from,
752 unsigned long address)
753{ 607{
754 void *coloured_to; 608 local_flush_cache_all = sh5_flush_cache_all;
609 local_flush_cache_mm = sh5_flush_cache_mm;
610 local_flush_cache_dup_mm = sh5_flush_cache_mm;
611 local_flush_cache_page = sh5_flush_cache_page;
612 local_flush_cache_range = sh5_flush_cache_range;
613 local_flush_dcache_page = sh5_flush_dcache_page;
614 local_flush_icache_range = sh5_flush_icache_range;
615 local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
755 616
756 /* 617 /* Reserve a slot for dcache colouring in the DTLB */
757 * Discard any existing cache entries of the wrong colour. These are 618 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
758 * present quite often, if the kernel has recently used the page
759 * internally, then given it up, then it's been allocated to the user.
760 */
761 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
762
763 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
764 copy_page(from, coloured_to);
765
766 sh64_teardown_dtlb_cache_slot();
767}
768
769static void sh64_clear_user_page_coloured(void *to, unsigned long address)
770{
771 void *coloured_to;
772
773 /*
774 * Discard any existing kernel-originated lines of the wrong
775 * colour (as above)
776 */
777 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
778
779 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
780 clear_page(coloured_to);
781
782 sh64_teardown_dtlb_cache_slot();
783}
784
785/*
786 * 'from' and 'to' are kernel virtual addresses (within the superpage
787 * mapping of the physical RAM). 'address' is the user virtual address
788 * where the copy 'to' will be mapped after. This allows a custom
789 * mapping to be used to ensure that the new copy is placed in the
790 * right cache sets for the user to see it without having to bounce it
791 * out via memory. Note however : the call to flush_page_to_ram in
792 * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
793 * very important case!
794 *
795 * TBD : can we guarantee that on every call, any cache entries for
796 * 'from' are in the same colour sets as 'address' also? i.e. is this
797 * always used just to deal with COW? (I suspect not).
798 *
799 * There are two possibilities here for when the page 'from' was last accessed:
800 * - by the kernel : this is OK, no purge required.
801 * - by the/a user (e.g. for break_COW) : need to purge.
802 *
803 * If the potential user mapping at 'address' is the same colour as
804 * 'from' there is no need to purge any cache lines from the 'from'
805 * page mapped into cache sets of colour 'address'. (The copy will be
806 * accessing the page through 'from').
807 */
808void copy_user_page(void *to, void *from, unsigned long address,
809 struct page *page)
810{
811 if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
812 sh64_dcache_purge_coloured_phy_page(__pa(from), address);
813
814 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
815 copy_page(to, from);
816 else
817 sh64_copy_user_page_coloured(to, from, address);
818}
819 619
820/* 620 sh4__flush_region_init();
821 * 'to' is a kernel virtual address (within the superpage mapping of the
822 * physical RAM). 'address' is the user virtual address where the 'to'
823 * page will be mapped after. This allows a custom mapping to be used to
824 * ensure that the new copy is placed in the right cache sets for the
825 * user to see it without having to bounce it out via memory.
826 */
827void clear_user_page(void *to, unsigned long address, struct page *page)
828{
829 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
830 clear_page(to);
831 else
832 sh64_clear_user_page_coloured(to, address);
833} 621}
834#endif