aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2012-05-20 11:27:53 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-20 16:33:36 -0400
commit1edc17832d8f49a0263d364c453ea35da0e4e2a6 (patch)
tree02c460fef4abb53a1b1ebe493b1e731ef36f6f26
parent9cd5f82246b724aae402959bffe0441b45a01a1c (diff)
sparc32: use flushi when run-time patching in per_cpu_patch
Davis S. Miller wrote: " The way we do that now is overkill. We only needed to use the MMU cache ops when we had sun4c around because sun4c lacked support for the "flush" instruction. But all sun4m and later chips have it so we can use it unconditionally. So in the per_cpu_patch() code, get rid of the cache ops invocation, and instead execute a "flush %reg" after each of the instruction patch assignments, where %reg is set to the address of the instruction that was stored into. Perhaps take the flushi() definition from asm/cacheflush_64.h and place it into asm/cacheflush.h, then you can simply use that. " Implemented as per suggestion. Moved run-time patching before we call paging_init(), so helper methods in paging_init() may utilise run-time patching too. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc/include/asm/cacheflush.h4
-rw-r--r--arch/sparc/include/asm/cacheflush_64.h3
-rw-r--r--arch/sparc/kernel/setup_32.c16
3 files changed, 10 insertions, 13 deletions
diff --git a/arch/sparc/include/asm/cacheflush.h b/arch/sparc/include/asm/cacheflush.h
index 049168087b19..f6c4839b8388 100644
--- a/arch/sparc/include/asm/cacheflush.h
+++ b/arch/sparc/include/asm/cacheflush.h
@@ -1,5 +1,9 @@
1#ifndef ___ASM_SPARC_CACHEFLUSH_H 1#ifndef ___ASM_SPARC_CACHEFLUSH_H
2#define ___ASM_SPARC_CACHEFLUSH_H 2#define ___ASM_SPARC_CACHEFLUSH_H
3
4/* flush addr - to allow use of self-modifying code */
5#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
6
3#if defined(__sparc__) && defined(__arch64__) 7#if defined(__sparc__) && defined(__arch64__)
4#include <asm/cacheflush_64.h> 8#include <asm/cacheflush_64.h>
5#else 9#else
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h
index 2efea2ff88b7..301736d9e7a1 100644
--- a/arch/sparc/include/asm/cacheflush_64.h
+++ b/arch/sparc/include/asm/cacheflush_64.h
@@ -8,9 +8,6 @@
8#include <linux/mm.h> 8#include <linux/mm.h>
9 9
10/* Cache flush operations. */ 10/* Cache flush operations. */
11
12
13#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory")
14#define flushw_all() __asm__ __volatile__("flushw") 11#define flushw_all() __asm__ __volatile__("flushw")
15 12
16extern void __flushw_user(void); 13extern void __flushw_user(void);
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index d65b5a1c2209..c052313f4dc5 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -227,16 +227,14 @@ static void __init per_cpu_patch(void)
227 prom_halt(); 227 prom_halt();
228 } 228 }
229 *(unsigned int *) (addr + 0) = insns[0]; 229 *(unsigned int *) (addr + 0) = insns[0];
230 flushi(addr + 0);
230 *(unsigned int *) (addr + 4) = insns[1]; 231 *(unsigned int *) (addr + 4) = insns[1];
232 flushi(addr + 4);
231 *(unsigned int *) (addr + 8) = insns[2]; 233 *(unsigned int *) (addr + 8) = insns[2];
234 flushi(addr + 8);
232 235
233 p++; 236 p++;
234 } 237 }
235#ifdef CONFIG_SMP
236 local_ops->cache_all();
237#else
238 sparc32_cachetlb_ops->cache_all();
239#endif
240} 238}
241 239
242enum sparc_cpu sparc_cpu_model; 240enum sparc_cpu sparc_cpu_model;
@@ -340,13 +338,11 @@ void __init setup_arch(char **cmdline_p)
340 init_mm.context = (unsigned long) NO_CONTEXT; 338 init_mm.context = (unsigned long) NO_CONTEXT;
341 init_task.thread.kregs = &fake_swapper_regs; 339 init_task.thread.kregs = &fake_swapper_regs;
342 340
343 paging_init(); 341 /* Run-time patch instructions to match the cpu model */
344
345 /* Now that we have the cache ops hooked up, we can patch
346 * instructions.
347 */
348 per_cpu_patch(); 342 per_cpu_patch();
349 343
344 paging_init();
345
350 smp_setup_cpu_possible_map(); 346 smp_setup_cpu_possible_map();
351} 347}
352 348