aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2007-03-20 15:29:54 -0400
committerPaul Mackerras <paulus@samba.org>2007-03-22 00:01:43 -0400
commit4f5fa2fb1259f506d20e8af447117ec3ec426a53 (patch)
tree71de5f42287ddb2899d1ec0a1bec54dcc346bdbb
parentb4aea36b7956eeebfc56314ce0944db1441255ce (diff)
[POWERPC] Bypass hcall stats until cpu features have run
I noticed that we execute hcalls before cpu feature code has run (eg for setting up the bolted kernel region). This means that we may be executing code that is not appropriate for the processor we have. Create an unconditional branch that we nop out all the time to fix this. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 1501b0a9e749..c1427b3634ec 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -30,9 +30,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_PURR);
30 30
31/* 31/*
32 * postcall is performed immediately before function return which 32 * postcall is performed immediately before function return which
33 * allows liberal use of volatile registers. 33 * allows liberal use of volatile registers. We branch around this
34 * in early init (eg when populating the MMU hashtable) by using an
35 * unconditional cpu feature.
34 */ 36 */
35#define HCALL_INST_POSTCALL \ 37#define HCALL_INST_POSTCALL \
38BEGIN_FTR_SECTION; \
39 b 1f; \
40END_FTR_SECTION(0, 1); \
36 ld r4,STK_PARM(r3)(r1); /* validate opcode */ \ 41 ld r4,STK_PARM(r3)(r1); /* validate opcode */ \
37 cmpldi cr7,r4,MAX_HCALL_OPCODE; \ 42 cmpldi cr7,r4,MAX_HCALL_OPCODE; \
38 bgt- cr7,1f; \ 43 bgt- cr7,1f; \