aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/cache-sh4.c
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-08-21 04:23:14 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-08-21 04:23:14 -0400
commitf26b2a562b46ab186c8383993ab1332673ac4a47 (patch)
tree5cf52089da5ca762c07cf6c1364a6aa411fb3038 /arch/sh/mm/cache-sh4.c
parentf9bd71f255b4349c4f9f596863161fd5182f67fa (diff)
sh: Make cache flushers SMP-aware.
This does a bit of rework for making the cache flushers SMP-aware. The function pointer-based flushers are renamed to local variants with the exported interface being commonly implemented and wrapping as necessary. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/cache-sh4.c')
-rw-r--r--arch/sh/mm/cache-sh4.c54
1 files changed, 37 insertions, 17 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 6c2db1401080..9201b37c7cca 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -43,15 +43,20 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
43 * Called from kernel/module.c:sys_init_module and routine for a.out format, 43 * Called from kernel/module.c:sys_init_module and routine for a.out format,
44 * signal handler code and kprobes code 44 * signal handler code and kprobes code
45 */ 45 */
46static void sh4_flush_icache_range(unsigned long start, unsigned long end) 46static void sh4_flush_icache_range(void *args)
47{ 47{
48 struct flusher_data *data = args;
48 int icacheaddr; 49 int icacheaddr;
50 unsigned long start, end;
49 unsigned long flags, v; 51 unsigned long flags, v;
50 int i; 52 int i;
51 53
54 start = data->addr1;
55 end = data->addr2;
56
52 /* If there are too many pages then just blow the caches */ 57 /* If there are too many pages then just blow the caches */
53 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { 58 if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
54 flush_cache_all(); 59 local_flush_cache_all(args);
55 } else { 60 } else {
56 /* selectively flush d-cache then invalidate the i-cache */ 61 /* selectively flush d-cache then invalidate the i-cache */
57 /* this is inefficient, so only use for small ranges */ 62 /* this is inefficient, so only use for small ranges */
@@ -104,7 +109,7 @@ static inline void flush_cache_4096(unsigned long start,
104 * Write back & invalidate the D-cache of the page. 109 * Write back & invalidate the D-cache of the page.
105 * (To avoid "alias" issues) 110 * (To avoid "alias" issues)
106 */ 111 */
107static void sh4_flush_dcache_page(struct page *page) 112static void sh4_flush_dcache_page(void *page)
108{ 113{
109#ifndef CONFIG_SMP 114#ifndef CONFIG_SMP
110 struct address_space *mapping = page_mapping(page); 115 struct address_space *mapping = page_mapping(page);
@@ -155,7 +160,7 @@ static inline void flush_dcache_all(void)
155 wmb(); 160 wmb();
156} 161}
157 162
158static void sh4_flush_cache_all(void) 163static void sh4_flush_cache_all(void *unused)
159{ 164{
160 flush_dcache_all(); 165 flush_dcache_all();
161 flush_icache_all(); 166 flush_icache_all();
@@ -247,8 +252,10 @@ loop_exit:
247 * 252 *
248 * Caller takes mm->mmap_sem. 253 * Caller takes mm->mmap_sem.
249 */ 254 */
250static void sh4_flush_cache_mm(struct mm_struct *mm) 255static void sh4_flush_cache_mm(void *arg)
251{ 256{
257 struct mm_struct *mm = arg;
258
252 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT) 259 if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
253 return; 260 return;
254 261
@@ -287,12 +294,18 @@ static void sh4_flush_cache_mm(struct mm_struct *mm)
287 * ADDR: Virtual Address (U0 address) 294 * ADDR: Virtual Address (U0 address)
288 * PFN: Physical page number 295 * PFN: Physical page number
289 */ 296 */
290static void sh4_flush_cache_page(struct vm_area_struct *vma, 297static void sh4_flush_cache_page(void *args)
291 unsigned long address, unsigned long pfn)
292{ 298{
293 unsigned long phys = pfn << PAGE_SHIFT; 299 struct flusher_data *data = args;
300 struct vm_area_struct *vma;
301 unsigned long address, pfn, phys;
294 unsigned int alias_mask; 302 unsigned int alias_mask;
295 303
304 vma = data->vma;
305 address = data->addr1;
306 pfn = data->addr2;
307 phys = pfn << PAGE_SHIFT;
308
296 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 309 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
297 return; 310 return;
298 311
@@ -335,9 +348,16 @@ static void sh4_flush_cache_page(struct vm_area_struct *vma,
335 * Flushing the cache lines for U0 only isn't enough. 348 * Flushing the cache lines for U0 only isn't enough.
336 * We need to flush for P1 too, which may contain aliases. 349 * We need to flush for P1 too, which may contain aliases.
337 */ 350 */
338static void sh4_flush_cache_range(struct vm_area_struct *vma, 351static void sh4_flush_cache_range(void *args)
339 unsigned long start, unsigned long end)
340{ 352{
353 struct flusher_data *data = args;
354 struct vm_area_struct *vma;
355 unsigned long start, end;
356
357 vma = data->vma;
358 start = data->addr1;
359 end = data->addr2;
360
341 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT) 361 if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
342 return; 362 return;
343 363
@@ -663,13 +683,13 @@ void __init sh4_cache_init(void)
663 break; 683 break;
664 } 684 }
665 685
666 flush_icache_range = sh4_flush_icache_range; 686 local_flush_icache_range = sh4_flush_icache_range;
667 flush_dcache_page = sh4_flush_dcache_page; 687 local_flush_dcache_page = sh4_flush_dcache_page;
668 flush_cache_all = sh4_flush_cache_all; 688 local_flush_cache_all = sh4_flush_cache_all;
669 flush_cache_mm = sh4_flush_cache_mm; 689 local_flush_cache_mm = sh4_flush_cache_mm;
670 flush_cache_dup_mm = sh4_flush_cache_mm; 690 local_flush_cache_dup_mm = sh4_flush_cache_mm;
671 flush_cache_page = sh4_flush_cache_page; 691 local_flush_cache_page = sh4_flush_cache_page;
672 flush_cache_range = sh4_flush_cache_range; 692 local_flush_cache_range = sh4_flush_cache_range;
673 693
674 sh4__flush_region_init(); 694 sh4__flush_region_init();
675} 695}