From 2277ab4a1df50e05bc732fe9488d4e902bb8399a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 22 Jul 2009 19:20:49 +0900 Subject: sh: Migrate from PG_mapped to PG_dcache_dirty. This inverts the delayed dcache flush a bit to be more in line with other platforms. At the same time this also gives us the ability to do some more optimizations and cleanup. Now that the update_mmu_cache() callsite only tests for the bit, the implementation can gradually be split out and made generic, rather than relying on special implementations for each of the peculiar CPU types. SH7705 in 32kB mode and SH-4 still need slightly different handling, but this is something that can remain isolated in the varying page copy/clear routines. On top of that, SH-X3 is dcache coherent, so there is no need to bother with any of these tests in the PTEAEX version of update_mmu_cache(), so we kill that off too. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh7705.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/sh/mm/cache-sh7705.c') diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 22dacc778823..fa37bff306b9 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -128,7 +129,11 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) */ void flush_dcache_page(struct page *page) { - if (test_bit(PG_mapped, &page->flags)) + struct address_space *mapping = page_mapping(page); + + if (mapping && !mapping_mapped(mapping)) + set_bit(PG_dcache_dirty, &page->flags); + else __flush_dcache_page(PHYSADDR(page_address(page))); } -- cgit v1.2.2 From 0d051d90bb08b516b9d6c30d25f83d3c6b5b1c1d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 12:53:39 +0900 Subject: sh: Convert SH7705 extended mode to new cacheflush interface. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh7705.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'arch/sh/mm/cache-sh7705.c') diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index fa37bff306b9..f1d5c803c04b 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -64,7 +64,7 @@ static inline void cache_wback_all(void) * * Called from kernel/module.c:sys_init_module and routine for a.out format. */ -void flush_icache_range(unsigned long start, unsigned long end) +static void sh7705_flush_icache_range(unsigned long start, unsigned long end) { __flush_wback_region((void *)start, end - start); } @@ -72,7 +72,7 @@ void flush_icache_range(unsigned long start, unsigned long end) /* * Writeback&Invalidate the D-cache of the page */ -static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) +static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; @@ -127,7 +127,7 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ -void flush_dcache_page(struct page *page) +static void sh7705_flush_dcache_page(struct page *page) { struct address_space *mapping = page_mapping(page); @@ -137,7 +137,7 @@ void flush_dcache_page(struct page *page) __flush_dcache_page(PHYSADDR(page_address(page))); } -void __uses_jump_to_uncached flush_cache_all(void) +static void sh7705_flush_cache_all(void) { unsigned long flags; @@ -149,7 +149,7 @@ void __uses_jump_to_uncached flush_cache_all(void) local_irq_restore(flags); } -void flush_cache_mm(struct mm_struct *mm) +static void sh7705_flush_cache_mm(struct mm_struct *mm) { /* Is there any good way? */ /* XXX: possibly call flush_cache_range for each vm area */ @@ -165,8 +165,8 @@ void flush_cache_mm(struct mm_struct *mm) * Flushing the cache lines for U0 only isn't enough. * We need to flush for P1 too, which may contain aliases. */ -void flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +static void sh7705_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) { /* @@ -184,8 +184,8 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, * * ADDRESS: Virtual Address (U0 address) */ -void flush_cache_page(struct vm_area_struct *vma, unsigned long address, - unsigned long pfn) +static void sh7705_flush_cache_page(struct vm_area_struct *vma, + unsigned long address, unsigned long pfn) { __flush_dcache_page(pfn << PAGE_SHIFT); } @@ -198,7 +198,20 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address, * Not entirely sure why this is necessary on SH3 with 32K cache but * without it we get occasional "Memory fault" when loading a program. */ -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +static void sh7705_flush_icache_page(struct vm_area_struct *vma, + struct page *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } + +void __init sh7705_cache_init(void) +{ + flush_icache_range = sh7705_flush_icache_range; + flush_dcache_page = sh7705_flush_dcache_page; + flush_cache_all = sh7705_flush_cache_all; + flush_cache_mm = sh7705_flush_cache_mm; + flush_cache_dup_mm = sh7705_flush_cache_mm; + flush_cache_range = sh7705_flush_cache_range; + flush_cache_page = sh7705_flush_cache_page; + flush_icache_page = sh7705_flush_icache_page; +} -- cgit v1.2.2 From f26b2a562b46ab186c8383993ab1332673ac4a47 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 21 Aug 2009 17:23:14 +0900 Subject: sh: Make cache flushers SMP-aware. This does a bit of rework for making the cache flushers SMP-aware. The function pointer-based flushers are renamed to local variants with the exported interface being commonly implemented and wrapping as necessary. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh7705.c | 67 ++++++++++++++++------------------------------- 1 file changed, 22 insertions(+), 45 deletions(-) (limited to 'arch/sh/mm/cache-sh7705.c') diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index f1d5c803c04b..6293f57fa888 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -64,8 +64,14 @@ static inline void cache_wback_all(void) * * Called from kernel/module.c:sys_init_module and routine for a.out format. */ -static void sh7705_flush_icache_range(unsigned long start, unsigned long end) +static void sh7705_flush_icache_range(void *args) { + struct flusher_data *data = args; + unsigned long start, end; + + start = data->addr1; + end = data->addr2; + __flush_wback_region((void *)start, end - start); } @@ -127,7 +133,7 @@ static void __flush_dcache_page(unsigned long phys) * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ -static void sh7705_flush_dcache_page(struct page *page) +static void sh7705_flush_dcache_page(void *page) { struct address_space *mapping = page_mapping(page); @@ -137,7 +143,7 @@ static void sh7705_flush_dcache_page(struct page *page) __flush_dcache_page(PHYSADDR(page_address(page))); } -static void sh7705_flush_cache_all(void) +static void sh7705_flush_cache_all(void *args) { unsigned long flags; @@ -149,44 +155,16 @@ static void sh7705_flush_cache_all(void) local_irq_restore(flags); } -static void sh7705_flush_cache_mm(struct mm_struct *mm) -{ - /* Is there any good way? */ - /* XXX: possibly call flush_cache_range for each vm area */ - flush_cache_all(); -} - -/* - * Write back and invalidate D-caches. - * - * START, END: Virtual Address (U0 address) - * - * NOTE: We need to flush the _physical_ page entry. - * Flushing the cache lines for U0 only isn't enough. - * We need to flush for P1 too, which may contain aliases. - */ -static void sh7705_flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - - /* - * We could call flush_cache_page for the pages of these range, - * but it's not efficient (scan the caches all the time...). - * - * We can't use A-bit magic, as there's the case we don't have - * valid entry on TLB. - */ - flush_cache_all(); -} - /* * Write back and invalidate I/D-caches for the page. * * ADDRESS: Virtual Address (U0 address) */ -static void sh7705_flush_cache_page(struct vm_area_struct *vma, - unsigned long address, unsigned long pfn) +static void sh7705_flush_cache_page(void *args) { + struct flusher_data *data = args; + unsigned long pfn = data->addr2; + __flush_dcache_page(pfn << PAGE_SHIFT); } @@ -198,20 +176,19 @@ static void sh7705_flush_cache_page(struct vm_area_struct *vma, * Not entirely sure why this is necessary on SH3 with 32K cache but * without it we get occasional "Memory fault" when loading a program. */ -static void sh7705_flush_icache_page(struct vm_area_struct *vma, - struct page *page) +static void sh7705_flush_icache_page(void *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } void __init sh7705_cache_init(void) { - flush_icache_range = sh7705_flush_icache_range; - flush_dcache_page = sh7705_flush_dcache_page; - flush_cache_all = sh7705_flush_cache_all; - flush_cache_mm = sh7705_flush_cache_mm; - flush_cache_dup_mm = sh7705_flush_cache_mm; - flush_cache_range = sh7705_flush_cache_range; - flush_cache_page = sh7705_flush_cache_page; - flush_icache_page = sh7705_flush_icache_page; + local_flush_icache_range = sh7705_flush_icache_range; + local_flush_dcache_page = sh7705_flush_dcache_page; + local_flush_cache_all = sh7705_flush_cache_all; + local_flush_cache_mm = sh7705_flush_cache_all; + local_flush_cache_dup_mm = sh7705_flush_cache_all; + local_flush_cache_range = sh7705_flush_cache_all; + local_flush_cache_page = sh7705_flush_cache_page; + local_flush_icache_page = sh7705_flush_icache_page; } -- cgit v1.2.2 From 64a6d72213dd810dd55bd0a503c36150af41c3c3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 21 Aug 2009 18:21:07 +0900 Subject: sh: Kill off now redundant local irq disabling. on_each_cpu() takes care of IRQ and preempt handling, the localized handling in each of the called functions can be killed off. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh7705.c | 8 -------- 1 file changed, 8 deletions(-) (limited to 'arch/sh/mm/cache-sh7705.c') diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 6293f57fa888..9dc38660e3de 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -81,7 +81,6 @@ static void sh7705_flush_icache_range(void *args) static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; - unsigned long flags; phys |= SH_CACHE_VALID; @@ -98,7 +97,6 @@ static void __flush_dcache_page(unsigned long phys) * potential cache aliasing, therefore the optimisation is probably not * possible. */ - local_irq_save(flags); jump_to_uncached(); ways = current_cpu_data.dcache.ways; @@ -126,7 +124,6 @@ static void __flush_dcache_page(unsigned long phys) } while (--ways); back_to_cached(); - local_irq_restore(flags); } /* @@ -145,14 +142,9 @@ static void sh7705_flush_dcache_page(void *page) static void sh7705_flush_cache_all(void *args) { - unsigned long flags; - - local_irq_save(flags); jump_to_uncached(); - cache_wback_all(); back_to_cached(); - local_irq_restore(flags); } /* -- cgit v1.2.2 From 983f4c514c4c9ddac1077a2c805fd16cbe3f7487 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 1 Sep 2009 21:12:55 +0900 Subject: Revert "sh: Kill off now redundant local irq disabling." This reverts commit 64a6d72213dd810dd55bd0a503c36150af41c3c3. Unfortunately we can't use on_each_cpu() for all of the cache ops, as some of them only require preempt disabling. This seems to be the same issue that impacts the mips r4k caches, where this code was based on. This fixes up a deadlock that showed up in some IRQ context cases. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh7705.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/sh/mm/cache-sh7705.c') diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 9dc38660e3de..6293f57fa888 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -81,6 +81,7 @@ static void sh7705_flush_icache_range(void *args) static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; + unsigned long flags; phys |= SH_CACHE_VALID; @@ -97,6 +98,7 @@ static void __flush_dcache_page(unsigned long phys) * potential cache aliasing, therefore the optimisation is probably not * possible. */ + local_irq_save(flags); jump_to_uncached(); ways = current_cpu_data.dcache.ways; @@ -124,6 +126,7 @@ static void __flush_dcache_page(unsigned long phys) } while (--ways); back_to_cached(); + local_irq_restore(flags); } /* @@ -142,9 +145,14 @@ static void sh7705_flush_dcache_page(void *page) static void sh7705_flush_cache_all(void *args) { + unsigned long flags; + + local_irq_save(flags); jump_to_uncached(); + cache_wback_all(); back_to_cached(); + local_irq_restore(flags); } /* -- cgit v1.2.2 From c8c2df9055074197ba12902c6d7e840667fb56d6 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 15 Sep 2009 09:47:35 +0900 Subject: sh: Fix up sh7705 flush_dcache_page() build. Type mismatch caused the page deref to blow up, fix it up as per the sh4 change. Signed-off-by: Paul Mundt --- arch/sh/mm/cache-sh7705.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/sh/mm/cache-sh7705.c') diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 6293f57fa888..2cadee2037ac 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -133,8 +133,9 @@ static void __flush_dcache_page(unsigned long phys) * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ -static void sh7705_flush_dcache_page(void *page) +static void sh7705_flush_dcache_page(void *arg) { + struct page *page = arg; struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) -- cgit v1.2.2