aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/system.h
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2005-09-03 18:56:36 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:06:11 -0400
commit4bb0d3ec3e5b1e9e2399cdc641b3b6521ac9cdaa (patch)
tree5e8d7646f5c6a2cec990b6d591f230d496b20664 /include/asm-i386/system.h
parent2a0694d15d55d0deed928786a6393d5e45e37d76 (diff)
[PATCH] i386: inline asm cleanup
i386 Inline asm cleanup. Use cr/dr accessor functions. Also, a potential bugfix. Also, some CR accessors really should be volatile. Reads from CR0 (numeric state may change in an exception handler), writes to CR4 (flipping CR4.TSD) and reads from CR2 (page fault) prevent instruction re-ordering. I did not add memory clobber to CR3 / CR4 / CR0 updates, as it was not there to begin with, and in no case should kernel memory be clobbered, except when doing a TLB flush, which already has memory clobber. I noticed that page invalidation does not have a memory clobber. I can't find a bug as a result, but there is definitely a potential for a bug here: #define __flush_tlb_single(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386/system.h')
-rw-r--r--include/asm-i386/system.h28
1 files changed, 25 insertions, 3 deletions
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 3db717a244f0..8048a5e018cd 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -107,13 +107,33 @@ static inline unsigned long _get_base(char * addr)
107#define clts() __asm__ __volatile__ ("clts") 107#define clts() __asm__ __volatile__ ("clts")
108#define read_cr0() ({ \ 108#define read_cr0() ({ \
109 unsigned int __dummy; \ 109 unsigned int __dummy; \
110 __asm__( \ 110 __asm__ __volatile__( \
111 "movl %%cr0,%0\n\t" \ 111 "movl %%cr0,%0\n\t" \
112 :"=r" (__dummy)); \ 112 :"=r" (__dummy)); \
113 __dummy; \ 113 __dummy; \
114}) 114})
115#define write_cr0(x) \ 115#define write_cr0(x) \
116 __asm__("movl %0,%%cr0": :"r" (x)); 116 __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
117
118#define read_cr2() ({ \
119 unsigned int __dummy; \
120 __asm__ __volatile__( \
121 "movl %%cr2,%0\n\t" \
122 :"=r" (__dummy)); \
123 __dummy; \
124})
125#define write_cr2(x) \
126 __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
127
128#define read_cr3() ({ \
129 unsigned int __dummy; \
130 __asm__ ( \
131 "movl %%cr3,%0\n\t" \
132 :"=r" (__dummy)); \
133 __dummy; \
134})
135#define write_cr3(x) \
136 __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
117 137
118#define read_cr4() ({ \ 138#define read_cr4() ({ \
119 unsigned int __dummy; \ 139 unsigned int __dummy; \
@@ -123,7 +143,7 @@ static inline unsigned long _get_base(char * addr)
123 __dummy; \ 143 __dummy; \
124}) 144})
125#define write_cr4(x) \ 145#define write_cr4(x) \
126 __asm__("movl %0,%%cr4": :"r" (x)); 146 __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
127#define stts() write_cr0(8 | read_cr0()) 147#define stts() write_cr0(8 | read_cr0())
128 148
129#endif /* __KERNEL__ */ 149#endif /* __KERNEL__ */
@@ -447,6 +467,8 @@ struct alt_instr {
447#define local_irq_enable() __asm__ __volatile__("sti": : :"memory") 467#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
448/* used in the idle loop; sti takes one instruction cycle to complete */ 468/* used in the idle loop; sti takes one instruction cycle to complete */
449#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 469#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
470/* used when interrupts are already enabled or to shutdown the processor */
471#define halt() __asm__ __volatile__("hlt": : :"memory")
450 472
451#define irqs_disabled() \ 473#define irqs_disabled() \
452({ \ 474({ \