aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-15 12:50:17 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-15 12:50:17 -0400
commit94ecd224c940830e2f2724c3860eb7fb74c15d31 (patch)
treeb3940834bc26796af862acf1a24810a2d0d865c9
parent1ee4ab09f38b77b3a5750429d456d6606b237924 (diff)
sh: Fix up the SH-5 build with caches enabled.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/include/asm/system.h14
-rw-r--r--arch/sh/include/asm/system_32.h10
-rw-r--r--arch/sh/include/asm/system_64.h5
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c8
-rw-r--r--arch/sh/mm/cache-sh5.c249
-rw-r--r--arch/sh/mm/flush-sh4.c81
6 files changed, 64 insertions, 303 deletions
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index ab79e1f4fbe0..bf7c4cbde372 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -14,18 +14,6 @@
14 14
15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ 15#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
16 16
17#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
18#define __icbi() \
19{ \
20 unsigned long __addr; \
21 __addr = 0xa8000000; \
22 __asm__ __volatile__( \
23 "icbi %0\n\t" \
24 : /* no output */ \
25 : "m" (__m(__addr))); \
26}
27#endif
28
29/* 17/*
30 * A brief note on ctrl_barrier(), the control register write barrier. 18 * A brief note on ctrl_barrier(), the control register write barrier.
31 * 19 *
@@ -44,7 +32,7 @@
44#define mb() __asm__ __volatile__ ("synco": : :"memory") 32#define mb() __asm__ __volatile__ ("synco": : :"memory")
45#define rmb() mb() 33#define rmb() mb()
46#define wmb() __asm__ __volatile__ ("synco": : :"memory") 34#define wmb() __asm__ __volatile__ ("synco": : :"memory")
47#define ctrl_barrier() __icbi() 35#define ctrl_barrier() __icbi(0xa8000000)
48#define read_barrier_depends() do { } while(0) 36#define read_barrier_depends() do { } while(0)
49#else 37#else
50#define mb() __asm__ __volatile__ ("": : :"memory") 38#define mb() __asm__ __volatile__ ("": : :"memory")
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index d7299d69ff79..5ddd2359f3ef 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -63,6 +63,16 @@ do { \
63#define __restore_dsp(tsk) do { } while (0) 63#define __restore_dsp(tsk) do { } while (0)
64#endif 64#endif
65 65
66#if defined(CONFIG_CPU_SH4A)
67#define __icbi(addr) __asm__ __volatile__ ( "icbi @%0\n\t" : : "r" (addr))
68#else
69#define __icbi(addr) mb()
70#endif
71
72#define __ocbp(addr) __asm__ __volatile__ ( "ocbp @%0\n\t" : : "r" (addr))
73#define __ocbi(addr) __asm__ __volatile__ ( "ocbi @%0\n\t" : : "r" (addr))
74#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb @%0\n\t" : : "r" (addr))
75
66struct task_struct *__switch_to(struct task_struct *prev, 76struct task_struct *__switch_to(struct task_struct *prev,
67 struct task_struct *next); 77 struct task_struct *next);
68 78
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 218b54d9d660..8e4a03e7966c 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -37,6 +37,11 @@ do { \
37#define jump_to_uncached() do { } while (0) 37#define jump_to_uncached() do { } while (0)
38#define back_to_cached() do { } while (0) 38#define back_to_cached() do { } while (0)
39 39
40#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
41#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
42#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
43#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
44
40static inline reg_size_t register_align(void *val) 45static inline reg_size_t register_align(void *val)
41{ 46{
42 return (unsigned long long)(signed long long)(signed long)val; 47 return (unsigned long long)(signed long long)(signed long)val;
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index f96c95c07c4b..d008e17eb257 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -30,14 +30,6 @@ extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
30EXPORT_SYMBOL(dump_fpu); 30EXPORT_SYMBOL(dump_fpu);
31EXPORT_SYMBOL(kernel_thread); 31EXPORT_SYMBOL(kernel_thread);
32 32
33#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU)
34EXPORT_SYMBOL(clear_user_page);
35#endif
36
37#ifndef CONFIG_CACHE_OFF
38EXPORT_SYMBOL(flush_dcache_page);
39#endif
40
41#ifdef CONFIG_VT 33#ifdef CONFIG_VT
42EXPORT_SYMBOL(screen_info); 34EXPORT_SYMBOL(screen_info);
43#endif 35#endif
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index a8f5142dc2cf..819e0f9e8dba 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -25,29 +25,6 @@ extern void __weak sh4__flush_region_init(void);
25/* Wired TLB entry for the D-cache */ 25/* Wired TLB entry for the D-cache */
26static unsigned long long dtlb_cache_slot; 26static unsigned long long dtlb_cache_slot;
27 27
28void __init cpu_cache_init(void)
29{
30 /* Reserve a slot for dcache colouring in the DTLB */
31 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
32
33 sh4__flush_region_init();
34}
35
36void __init kmap_coherent_init(void)
37{
38 /* XXX ... */
39}
40
41void *kmap_coherent(struct page *page, unsigned long addr)
42{
43 /* XXX ... */
44 return NULL;
45}
46
47void kunmap_coherent(void)
48{
49}
50
51#ifdef CONFIG_DCACHE_DISABLED 28#ifdef CONFIG_DCACHE_DISABLED
52#define sh64_dcache_purge_all() do { } while (0) 29#define sh64_dcache_purge_all() do { } while (0)
53#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0) 30#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
@@ -233,52 +210,6 @@ static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
233 } 210 }
234} 211}
235 212
236/*
237 * Invalidate a small range of user context I-cache, not necessarily page
238 * (or even cache-line) aligned.
239 *
240 * Since this is used inside ptrace, the ASID in the mm context typically
241 * won't match current_asid. We'll have to switch ASID to do this. For
242 * safety, and given that the range will be small, do all this under cli.
243 *
244 * Note, there is a hazard that the ASID in mm->context is no longer
245 * actually associated with mm, i.e. if the mm->context has started a new
246 * cycle since mm was last active. However, this is just a performance
247 * issue: all that happens is that we invalidate lines belonging to
248 * another mm, so the owning process has to refill them when that mm goes
249 * live again. mm itself can't have any cache entries because there will
250 * have been a flush_cache_all when the new mm->context cycle started.
251 */
252static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
253 unsigned long start, int len)
254{
255 unsigned long long eaddr = start;
256 unsigned long long eaddr_end = start + len;
257 unsigned long current_asid, mm_asid;
258 unsigned long flags;
259 unsigned long long epage_start;
260
261 /*
262 * Align to start of cache line. Otherwise, suppose len==8 and
263 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
264 */
265 eaddr = L1_CACHE_ALIGN(start);
266 eaddr_end = start + len;
267
268 mm_asid = cpu_asid(smp_processor_id(), mm);
269 local_irq_save(flags);
270 current_asid = switch_and_save_asid(mm_asid);
271
272 epage_start = eaddr & PAGE_MASK;
273
274 while (eaddr < eaddr_end) {
275 __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
276 eaddr += L1_CACHE_BYTES;
277 }
278 switch_and_save_asid(current_asid);
279 local_irq_restore(flags);
280}
281
282static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) 213static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
283{ 214{
284 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a 215 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
@@ -564,7 +495,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
564 * Invalidate the entire contents of both caches, after writing back to 495 * Invalidate the entire contents of both caches, after writing back to
565 * memory any dirty data from the D-cache. 496 * memory any dirty data from the D-cache.
566 */ 497 */
567void flush_cache_all(void) 498static void sh5_flush_cache_all(void)
568{ 499{
569 sh64_dcache_purge_all(); 500 sh64_dcache_purge_all();
570 sh64_icache_inv_all(); 501 sh64_icache_inv_all();
@@ -591,7 +522,7 @@ void flush_cache_all(void)
591 * I-cache. This is similar to the lack of action needed in 522 * I-cache. This is similar to the lack of action needed in
592 * flush_tlb_mm - see fault.c. 523 * flush_tlb_mm - see fault.c.
593 */ 524 */
594void flush_cache_mm(struct mm_struct *mm) 525static void sh5_flush_cache_mm(struct mm_struct *mm)
595{ 526{
596 sh64_dcache_purge_all(); 527 sh64_dcache_purge_all();
597} 528}
@@ -603,8 +534,8 @@ void flush_cache_mm(struct mm_struct *mm)
603 * 534 *
604 * Note, 'end' is 1 byte beyond the end of the range to flush. 535 * Note, 'end' is 1 byte beyond the end of the range to flush.
605 */ 536 */
606void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 537static void sh5_flush_cache_range(struct vm_area_struct *vma,
607 unsigned long end) 538 unsigned long start, unsigned long end)
608{ 539{
609 struct mm_struct *mm = vma->vm_mm; 540 struct mm_struct *mm = vma->vm_mm;
610 541
@@ -621,8 +552,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
621 * 552 *
622 * Note, this is called with pte lock held. 553 * Note, this is called with pte lock held.
623 */ 554 */
624void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, 555static void sh5_flush_cache_page(struct vm_area_struct *vma,
625 unsigned long pfn) 556 unsigned long eaddr, unsigned long pfn)
626{ 557{
627 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); 558 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
628 559
@@ -630,7 +561,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
630 sh64_icache_inv_user_page(vma, eaddr); 561 sh64_icache_inv_user_page(vma, eaddr);
631} 562}
632 563
633void flush_dcache_page(struct page *page) 564static void sh5_flush_dcache_page(struct page *page)
634{ 565{
635 sh64_dcache_purge_phy_page(page_to_phys(page)); 566 sh64_dcache_purge_phy_page(page_to_phys(page));
636 wmb(); 567 wmb();
@@ -644,7 +575,7 @@ void flush_dcache_page(struct page *page)
644 * mapping, therefore it's guaranteed that there no cache entries for 575 * mapping, therefore it's guaranteed that there no cache entries for
645 * the range in cache sets of the wrong colour. 576 * the range in cache sets of the wrong colour.
646 */ 577 */
647void flush_icache_range(unsigned long start, unsigned long end) 578static void sh5_flush_icache_range(unsigned long start, unsigned long end)
648{ 579{
649 __flush_purge_region((void *)start, end); 580 __flush_purge_region((void *)start, end);
650 wmb(); 581 wmb();
@@ -652,31 +583,12 @@ void flush_icache_range(unsigned long start, unsigned long end)
652} 583}
653 584
654/* 585/*
655 * Flush the range of user (defined by vma->vm_mm) address space starting
656 * at 'addr' for 'len' bytes from the cache. The range does not straddle
657 * a page boundary, the unique physical page containing the range is
658 * 'page'. This seems to be used mainly for invalidating an address
659 * range following a poke into the program text through the ptrace() call
660 * from another process (e.g. for BRK instruction insertion).
661 */
662static void flush_icache_user_range(struct vm_area_struct *vma,
663 struct page *page, unsigned long addr, int len)
664{
665
666 sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
667 mb();
668
669 if (vma->vm_flags & VM_EXEC)
670 sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
671}
672
673/*
674 * For the address range [start,end), write back the data from the 586 * For the address range [start,end), write back the data from the
675 * D-cache and invalidate the corresponding region of the I-cache for the 587 * D-cache and invalidate the corresponding region of the I-cache for the
676 * current process. Used to flush signal trampolines on the stack to 588 * current process. Used to flush signal trampolines on the stack to
677 * make them executable. 589 * make them executable.
678 */ 590 */
679void flush_cache_sigtramp(unsigned long vaddr) 591static void sh5_flush_cache_sigtramp(unsigned long vaddr)
680{ 592{
681 unsigned long end = vaddr + L1_CACHE_BYTES; 593 unsigned long end = vaddr + L1_CACHE_BYTES;
682 594
@@ -685,138 +597,19 @@ void flush_cache_sigtramp(unsigned long vaddr)
685 sh64_icache_inv_current_user_range(vaddr, end); 597 sh64_icache_inv_current_user_range(vaddr, end);
686} 598}
687 599
688#ifdef CONFIG_MMU 600void __init sh5_cache_init(void)
689/*
690 * These *MUST* lie in an area of virtual address space that's otherwise
691 * unused.
692 */
693#define UNIQUE_EADDR_START 0xe0000000UL
694#define UNIQUE_EADDR_END 0xe8000000UL
695
696/*
697 * Given a physical address paddr, and a user virtual address user_eaddr
698 * which will eventually be mapped to it, create a one-off kernel-private
699 * eaddr mapped to the same paddr. This is used for creating special
700 * destination pages for copy_user_page and clear_user_page.
701 */
702static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
703 unsigned long paddr)
704{
705 static unsigned long current_pointer = UNIQUE_EADDR_START;
706 unsigned long coloured_pointer;
707
708 if (current_pointer == UNIQUE_EADDR_END) {
709 sh64_dcache_purge_all();
710 current_pointer = UNIQUE_EADDR_START;
711 }
712
713 coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
714 (user_eaddr & CACHE_OC_SYN_MASK);
715 sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
716
717 current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
718
719 return coloured_pointer;
720}
721
722static void sh64_copy_user_page_coloured(void *to, void *from,
723 unsigned long address)
724{
725 void *coloured_to;
726
727 /*
728 * Discard any existing cache entries of the wrong colour. These are
729 * present quite often, if the kernel has recently used the page
730 * internally, then given it up, then it's been allocated to the user.
731 */
732 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
733
734 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
735 copy_page(from, coloured_to);
736
737 sh64_teardown_dtlb_cache_slot();
738}
739
740static void sh64_clear_user_page_coloured(void *to, unsigned long address)
741{
742 void *coloured_to;
743
744 /*
745 * Discard any existing kernel-originated lines of the wrong
746 * colour (as above)
747 */
748 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
749
750 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
751 clear_page(coloured_to);
752
753 sh64_teardown_dtlb_cache_slot();
754}
755
756/*
757 * 'from' and 'to' are kernel virtual addresses (within the superpage
758 * mapping of the physical RAM). 'address' is the user virtual address
759 * where the copy 'to' will be mapped after. This allows a custom
760 * mapping to be used to ensure that the new copy is placed in the
761 * right cache sets for the user to see it without having to bounce it
762 * out via memory. Note however : the call to flush_page_to_ram in
763 * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
764 * very important case!
765 *
766 * TBD : can we guarantee that on every call, any cache entries for
767 * 'from' are in the same colour sets as 'address' also? i.e. is this
768 * always used just to deal with COW? (I suspect not).
769 *
770 * There are two possibilities here for when the page 'from' was last accessed:
771 * - by the kernel : this is OK, no purge required.
772 * - by the/a user (e.g. for break_COW) : need to purge.
773 *
774 * If the potential user mapping at 'address' is the same colour as
775 * 'from' there is no need to purge any cache lines from the 'from'
776 * page mapped into cache sets of colour 'address'. (The copy will be
777 * accessing the page through 'from').
778 */
779void copy_user_page(void *to, void *from, unsigned long address,
780 struct page *page)
781{ 601{
782 if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0) 602 flush_cache_all = sh5_flush_cache_all;
783 sh64_dcache_purge_coloured_phy_page(__pa(from), address); 603 flush_cache_mm = sh5_flush_cache_mm;
604 flush_cache_dup_mm = sh5_flush_cache_mm;
605 flush_cache_page = sh5_flush_cache_page;
606 flush_cache_range = sh5_flush_cache_range;
607 flush_dcache_page = sh5_flush_dcache_page;
608 flush_icache_range = sh5_flush_icache_range;
609 flush_cache_sigtramp = sh5_flush_cache_sigtramp;
784 610
785 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0) 611 /* Reserve a slot for dcache colouring in the DTLB */
786 copy_page(to, from); 612 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
787 else
788 sh64_copy_user_page_coloured(to, from, address);
789}
790
791/*
792 * 'to' is a kernel virtual address (within the superpage mapping of the
793 * physical RAM). 'address' is the user virtual address where the 'to'
794 * page will be mapped after. This allows a custom mapping to be used to
795 * ensure that the new copy is placed in the right cache sets for the
796 * user to see it without having to bounce it out via memory.
797 */
798void clear_user_page(void *to, unsigned long address, struct page *page)
799{
800 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
801 clear_page(to);
802 else
803 sh64_clear_user_page_coloured(to, address);
804}
805
806void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
807 unsigned long vaddr, void *dst, const void *src,
808 unsigned long len)
809{
810 flush_cache_page(vma, vaddr, page_to_pfn(page));
811 memcpy(dst, src, len);
812 flush_icache_user_range(vma, page, vaddr, len);
813}
814 613
815void copy_from_user_page(struct vm_area_struct *vma, struct page *page, 614 sh4__flush_region_init();
816 unsigned long vaddr, void *dst, const void *src,
817 unsigned long len)
818{
819 flush_cache_page(vma, vaddr, page_to_pfn(page));
820 memcpy(dst, src, len);
821} 615}
822#endif
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
index 99c50dc7551e..cef402678f42 100644
--- a/arch/sh/mm/flush-sh4.c
+++ b/arch/sh/mm/flush-sh4.c
@@ -19,28 +19,19 @@ static void sh4__flush_wback_region(void *start, int size)
19 cnt = (end - v) / L1_CACHE_BYTES; 19 cnt = (end - v) / L1_CACHE_BYTES;
20 20
21 while (cnt >= 8) { 21 while (cnt >= 8) {
22 asm volatile("ocbwb @%0" : : "r" (v)); 22 __ocbwb(v); v += L1_CACHE_BYTES;
23 v += L1_CACHE_BYTES; 23 __ocbwb(v); v += L1_CACHE_BYTES;
24 asm volatile("ocbwb @%0" : : "r" (v)); 24 __ocbwb(v); v += L1_CACHE_BYTES;
25 v += L1_CACHE_BYTES; 25 __ocbwb(v); v += L1_CACHE_BYTES;
26 asm volatile("ocbwb @%0" : : "r" (v)); 26 __ocbwb(v); v += L1_CACHE_BYTES;
27 v += L1_CACHE_BYTES; 27 __ocbwb(v); v += L1_CACHE_BYTES;
28 asm volatile("ocbwb @%0" : : "r" (v)); 28 __ocbwb(v); v += L1_CACHE_BYTES;
29 v += L1_CACHE_BYTES; 29 __ocbwb(v); v += L1_CACHE_BYTES;
30 asm volatile("ocbwb @%0" : : "r" (v));
31 v += L1_CACHE_BYTES;
32 asm volatile("ocbwb @%0" : : "r" (v));
33 v += L1_CACHE_BYTES;
34 asm volatile("ocbwb @%0" : : "r" (v));
35 v += L1_CACHE_BYTES;
36 asm volatile("ocbwb @%0" : : "r" (v));
37 v += L1_CACHE_BYTES;
38 cnt -= 8; 30 cnt -= 8;
39 } 31 }
40 32
41 while (cnt) { 33 while (cnt) {
42 asm volatile("ocbwb @%0" : : "r" (v)); 34 __ocbwb(v); v += L1_CACHE_BYTES;
43 v += L1_CACHE_BYTES;
44 cnt--; 35 cnt--;
45 } 36 }
46} 37}
@@ -62,27 +53,18 @@ static void sh4__flush_purge_region(void *start, int size)
62 cnt = (end - v) / L1_CACHE_BYTES; 53 cnt = (end - v) / L1_CACHE_BYTES;
63 54
64 while (cnt >= 8) { 55 while (cnt >= 8) {
65 asm volatile("ocbp @%0" : : "r" (v)); 56 __ocbp(v); v += L1_CACHE_BYTES;
66 v += L1_CACHE_BYTES; 57 __ocbp(v); v += L1_CACHE_BYTES;
67 asm volatile("ocbp @%0" : : "r" (v)); 58 __ocbp(v); v += L1_CACHE_BYTES;
68 v += L1_CACHE_BYTES; 59 __ocbp(v); v += L1_CACHE_BYTES;
69 asm volatile("ocbp @%0" : : "r" (v)); 60 __ocbp(v); v += L1_CACHE_BYTES;
70 v += L1_CACHE_BYTES; 61 __ocbp(v); v += L1_CACHE_BYTES;
71 asm volatile("ocbp @%0" : : "r" (v)); 62 __ocbp(v); v += L1_CACHE_BYTES;
72 v += L1_CACHE_BYTES; 63 __ocbp(v); v += L1_CACHE_BYTES;
73 asm volatile("ocbp @%0" : : "r" (v));
74 v += L1_CACHE_BYTES;
75 asm volatile("ocbp @%0" : : "r" (v));
76 v += L1_CACHE_BYTES;
77 asm volatile("ocbp @%0" : : "r" (v));
78 v += L1_CACHE_BYTES;
79 asm volatile("ocbp @%0" : : "r" (v));
80 v += L1_CACHE_BYTES;
81 cnt -= 8; 64 cnt -= 8;
82 } 65 }
83 while (cnt) { 66 while (cnt) {
84 asm volatile("ocbp @%0" : : "r" (v)); 67 __ocbp(v); v += L1_CACHE_BYTES;
85 v += L1_CACHE_BYTES;
86 cnt--; 68 cnt--;
87 } 69 }
88} 70}
@@ -101,28 +83,19 @@ static void sh4__flush_invalidate_region(void *start, int size)
101 cnt = (end - v) / L1_CACHE_BYTES; 83 cnt = (end - v) / L1_CACHE_BYTES;
102 84
103 while (cnt >= 8) { 85 while (cnt >= 8) {
104 asm volatile("ocbi @%0" : : "r" (v)); 86 __ocbi(v); v += L1_CACHE_BYTES;
105 v += L1_CACHE_BYTES; 87 __ocbi(v); v += L1_CACHE_BYTES;
106 asm volatile("ocbi @%0" : : "r" (v)); 88 __ocbi(v); v += L1_CACHE_BYTES;
107 v += L1_CACHE_BYTES; 89 __ocbi(v); v += L1_CACHE_BYTES;
108 asm volatile("ocbi @%0" : : "r" (v)); 90 __ocbi(v); v += L1_CACHE_BYTES;
109 v += L1_CACHE_BYTES; 91 __ocbi(v); v += L1_CACHE_BYTES;
110 asm volatile("ocbi @%0" : : "r" (v)); 92 __ocbi(v); v += L1_CACHE_BYTES;
111 v += L1_CACHE_BYTES; 93 __ocbi(v); v += L1_CACHE_BYTES;
112 asm volatile("ocbi @%0" : : "r" (v));
113 v += L1_CACHE_BYTES;
114 asm volatile("ocbi @%0" : : "r" (v));
115 v += L1_CACHE_BYTES;
116 asm volatile("ocbi @%0" : : "r" (v));
117 v += L1_CACHE_BYTES;
118 asm volatile("ocbi @%0" : : "r" (v));
119 v += L1_CACHE_BYTES;
120 cnt -= 8; 94 cnt -= 8;
121 } 95 }
122 96
123 while (cnt) { 97 while (cnt) {
124 asm volatile("ocbi @%0" : : "r" (v)); 98 __ocbi(v); v += L1_CACHE_BYTES;
125 v += L1_CACHE_BYTES;
126 cnt--; 99 cnt--;
127 } 100 }
128} 101}