aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/power
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2005-09-03 18:56:36 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:06:11 -0400
commit4bb0d3ec3e5b1e9e2399cdc641b3b6521ac9cdaa (patch)
tree5e8d7646f5c6a2cec990b6d591f230d496b20664 /arch/i386/power
parent2a0694d15d55d0deed928786a6393d5e45e37d76 (diff)
[PATCH] i386: inline asm cleanup
i386 Inline asm cleanup. Use cr/dr accessor functions. Also, a potential bugfix. Also, some CR accessors really should be volatile. Reads from CR0 (numeric state may change in an exception handler), writes to CR4 (flipping CR4.TSD) and reads from CR2 (page fault) prevent instruction re-ordering. I did not add memory clobber to CR3 / CR4 / CR0 updates, as it was not there to begin with, and in no case should kernel memory be clobbered, except when doing a TLB flush, which already has memory clobber. I noticed that page invalidation does not have a memory clobber. I can't find a bug as a result, but there is definitely a potential for a bug here: #define __flush_tlb_single(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/power')
-rw-r--r--arch/i386/power/cpu.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c
index c547c1af6fa1..4e19c43e0954 100644
--- a/arch/i386/power/cpu.c
+++ b/arch/i386/power/cpu.c
@@ -57,10 +57,10 @@ void __save_processor_state(struct saved_context *ctxt)
57 /* 57 /*
58 * control registers 58 * control registers
59 */ 59 */
60 asm volatile ("movl %%cr0, %0" : "=r" (ctxt->cr0)); 60 ctxt->cr0 = read_cr0();
61 asm volatile ("movl %%cr2, %0" : "=r" (ctxt->cr2)); 61 ctxt->cr2 = read_cr2();
62 asm volatile ("movl %%cr3, %0" : "=r" (ctxt->cr3)); 62 ctxt->cr3 = read_cr3();
63 asm volatile ("movl %%cr4, %0" : "=r" (ctxt->cr4)); 63 ctxt->cr4 = read_cr4();
64} 64}
65 65
66void save_processor_state(void) 66void save_processor_state(void)
@@ -109,10 +109,10 @@ void __restore_processor_state(struct saved_context *ctxt)
109 /* 109 /*
110 * control registers 110 * control registers
111 */ 111 */
112 asm volatile ("movl %0, %%cr4" :: "r" (ctxt->cr4)); 112 write_cr4(ctxt->cr4);
113 asm volatile ("movl %0, %%cr3" :: "r" (ctxt->cr3)); 113 write_cr3(ctxt->cr3);
114 asm volatile ("movl %0, %%cr2" :: "r" (ctxt->cr2)); 114 write_cr2(ctxt->cr2);
115 asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0)); 115 write_cr2(ctxt->cr0);
116 116
117 /* 117 /*
118 * now restore the descriptor tables to their proper values 118 * now restore the descriptor tables to their proper values