aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/cache.c')
-rw-r--r--arch/sh/mm/cache.c39
1 files changed, 30 insertions, 9 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index a2dc7f9ecc51..0f4095d7ac8b 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -2,7 +2,7 @@
2 * arch/sh/mm/cache.c 2 * arch/sh/mm/cache.c
3 * 3 *
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka 4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt 5 * Copyright (C) 2002 - 2010 Paul Mundt
6 * 6 *
7 * Released under the terms of the GNU GPL v2.0. 7 * Released under the terms of the GNU GPL v2.0.
8 */ 8 */
@@ -27,8 +27,11 @@ void (*local_flush_icache_page)(void *args) = cache_noop;
27void (*local_flush_cache_sigtramp)(void *args) = cache_noop; 27void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28 28
29void (*__flush_wback_region)(void *start, int size); 29void (*__flush_wback_region)(void *start, int size);
30EXPORT_SYMBOL(__flush_wback_region);
30void (*__flush_purge_region)(void *start, int size); 31void (*__flush_purge_region)(void *start, int size);
32EXPORT_SYMBOL(__flush_purge_region);
31void (*__flush_invalidate_region)(void *start, int size); 33void (*__flush_invalidate_region)(void *start, int size);
34EXPORT_SYMBOL(__flush_invalidate_region);
32 35
33static inline void noop__flush_region(void *start, int size) 36static inline void noop__flush_region(void *start, int size)
34{ 37{
@@ -38,8 +41,17 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
38 int wait) 41 int wait)
39{ 42{
40 preempt_disable(); 43 preempt_disable();
41 smp_call_function(func, info, wait); 44
45 /*
46 * It's possible that this gets called early on when IRQs are
47 * still disabled due to ioremapping by the boot CPU, so don't
48 * even attempt IPIs unless there are other CPUs online.
49 */
50 if (num_online_cpus() > 1)
51 smp_call_function(func, info, wait);
52
42 func(info); 53 func(info);
54
43 preempt_enable(); 55 preempt_enable();
44} 56}
45 57
@@ -130,12 +142,8 @@ void __update_cache(struct vm_area_struct *vma,
130 page = pfn_to_page(pfn); 142 page = pfn_to_page(pfn);
131 if (pfn_valid(pfn)) { 143 if (pfn_valid(pfn)) {
132 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); 144 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
133 if (dirty) { 145 if (dirty)
134 unsigned long addr = (unsigned long)page_address(page); 146 __flush_purge_region(page_address(page), PAGE_SIZE);
135
136 if (pages_do_alias(addr, address & PAGE_MASK))
137 __flush_purge_region((void *)addr, PAGE_SIZE);
138 }
139 } 147 }
140} 148}
141 149
@@ -161,14 +169,21 @@ void flush_cache_all(void)
161{ 169{
162 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); 170 cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
163} 171}
172EXPORT_SYMBOL(flush_cache_all);
164 173
165void flush_cache_mm(struct mm_struct *mm) 174void flush_cache_mm(struct mm_struct *mm)
166{ 175{
176 if (boot_cpu_data.dcache.n_aliases == 0)
177 return;
178
167 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); 179 cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
168} 180}
169 181
170void flush_cache_dup_mm(struct mm_struct *mm) 182void flush_cache_dup_mm(struct mm_struct *mm)
171{ 183{
184 if (boot_cpu_data.dcache.n_aliases == 0)
185 return;
186
172 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); 187 cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
173} 188}
174 189
@@ -195,11 +210,13 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
195 210
196 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); 211 cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
197} 212}
213EXPORT_SYMBOL(flush_cache_range);
198 214
199void flush_dcache_page(struct page *page) 215void flush_dcache_page(struct page *page)
200{ 216{
201 cacheop_on_each_cpu(local_flush_dcache_page, page, 1); 217 cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
202} 218}
219EXPORT_SYMBOL(flush_dcache_page);
203 220
204void flush_icache_range(unsigned long start, unsigned long end) 221void flush_icache_range(unsigned long start, unsigned long end)
205{ 222{
@@ -265,7 +282,11 @@ static void __init emit_cache_params(void)
265 282
266void __init cpu_cache_init(void) 283void __init cpu_cache_init(void)
267{ 284{
268 unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); 285 unsigned int cache_disabled = 0;
286
287#ifdef CCR
288 cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
289#endif
269 290
270 compute_alias(&boot_cpu_data.icache); 291 compute_alias(&boot_cpu_data.icache);
271 compute_alias(&boot_cpu_data.dcache); 292 compute_alias(&boot_cpu_data.dcache);