aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/processor.h
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2005-09-03 18:56:36 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:06:11 -0400
commit4bb0d3ec3e5b1e9e2399cdc641b3b6521ac9cdaa (patch)
tree5e8d7646f5c6a2cec990b6d591f230d496b20664 /include/asm-i386/processor.h
parent2a0694d15d55d0deed928786a6393d5e45e37d76 (diff)
[PATCH] i386: inline asm cleanup
i386 Inline asm cleanup. Use cr/dr accessor functions. Also, a potential bugfix. Also, some CR accessors really should be volatile. Reads from CR0 (numeric state may change in an exception handler), writes to CR4 (flipping CR4.TSD) and reads from CR2 (page fault) prevent instruction re-ordering. I did not add memory clobber to CR3 / CR4 / CR0 updates, as it was not there to begin with, and in no case should kernel memory be clobbered, except when doing a TLB flush, which already has memory clobber. I noticed that page invalidation does not have a memory clobber. I can't find a bug as a result, but there is definitely a potential for a bug here: #define __flush_tlb_single(addr) \ __asm__ __volatile__("invlpg %0": :"m" (*(char *) addr)) Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386/processor.h')
-rw-r--r--include/asm-i386/processor.h22
1 files changed, 9 insertions, 13 deletions
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index d0d8b0160090..7e17d3b4f65a 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -203,9 +203,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
203 return edx; 203 return edx;
204} 204}
205 205
206#define load_cr3(pgdir) \ 206#define load_cr3(pgdir) write_cr3(__pa(pgdir))
207 asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))
208
209 207
210/* 208/*
211 * Intel CPU features in CR4 209 * Intel CPU features in CR4
@@ -232,22 +230,20 @@ extern unsigned long mmu_cr4_features;
232 230
233static inline void set_in_cr4 (unsigned long mask) 231static inline void set_in_cr4 (unsigned long mask)
234{ 232{
233 unsigned cr4;
235 mmu_cr4_features |= mask; 234 mmu_cr4_features |= mask;
236 __asm__("movl %%cr4,%%eax\n\t" 235 cr4 = read_cr4();
237 "orl %0,%%eax\n\t" 236 cr4 |= mask;
238 "movl %%eax,%%cr4\n" 237 write_cr4(cr4);
239 : : "irg" (mask)
240 :"ax");
241} 238}
242 239
243static inline void clear_in_cr4 (unsigned long mask) 240static inline void clear_in_cr4 (unsigned long mask)
244{ 241{
242 unsigned cr4;
245 mmu_cr4_features &= ~mask; 243 mmu_cr4_features &= ~mask;
246 __asm__("movl %%cr4,%%eax\n\t" 244 cr4 = read_cr4();
247 "andl %0,%%eax\n\t" 245 cr4 &= ~mask;
248 "movl %%eax,%%cr4\n" 246 write_cr4(cr4);
249 : : "irg" (~mask)
250 :"ax");
251} 247}
252 248
253/* 249/*