aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/system_32.h
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:08 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:08 -0500
commitd89542229b657bdcce6a6f76168f9098ee3e9344 (patch)
tree5beb503b43c24a3d666898ef68b656caae77e4da /include/asm-x86/system_32.h
parente34907ae180f4fe6c28bb4516c679c2f81b0c9ed (diff)
x86: put together equal pieces of system.h
This patch puts together pieces of system_{32,64}.h that looks like the same. It's the first step towards integration of this file. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/system_32.h')
-rw-r--r--include/asm-x86/system_32.h58
1 files changed, 0 insertions, 58 deletions
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 28978b17b07a..fb457642ac58 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -34,34 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
34 "2" (prev), "d" (next)); \ 34 "2" (prev), "d" (next)); \
35} while (0) 35} while (0)
36 36
37#define _set_base(addr,base) do { unsigned long __pr; \
38__asm__ __volatile__ ("movw %%dx,%1\n\t" \
39 "rorl $16,%%edx\n\t" \
40 "movb %%dl,%2\n\t" \
41 "movb %%dh,%3" \
42 :"=&d" (__pr) \
43 :"m" (*((addr)+2)), \
44 "m" (*((addr)+4)), \
45 "m" (*((addr)+7)), \
46 "0" (base) \
47 ); } while(0)
48
49#define _set_limit(addr,limit) do { unsigned long __lr; \
50__asm__ __volatile__ ("movw %%dx,%1\n\t" \
51 "rorl $16,%%edx\n\t" \
52 "movb %2,%%dh\n\t" \
53 "andb $0xf0,%%dh\n\t" \
54 "orb %%dh,%%dl\n\t" \
55 "movb %%dl,%2" \
56 :"=&d" (__lr) \
57 :"m" (*(addr)), \
58 "m" (*((addr)+6)), \
59 "0" (limit) \
60 ); } while(0)
61
62#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
63#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
64
65/* 37/*
66 * Load a segment. Fall back on loading the zero 38 * Load a segment. Fall back on loading the zero
67 * segment if something goes wrong.. 39 * segment if something goes wrong..
@@ -83,12 +55,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
83 ".previous" \ 55 ".previous" \
84 : :"rm" (value)) 56 : :"rm" (value))
85 57
86/*
87 * Save a segment register away
88 */
89#define savesegment(seg, value) \
90 asm volatile("mov %%" #seg ",%0":"=rm" (value))
91
92 58
93static inline void native_clts(void) 59static inline void native_clts(void)
94{ 60{
@@ -161,11 +127,6 @@ static inline void native_wbinvd(void)
161 asm volatile("wbinvd": : :"memory"); 127 asm volatile("wbinvd": : :"memory");
162} 128}
163 129
164static inline void clflush(void *__p)
165{
166 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
167}
168
169#ifdef CONFIG_PARAVIRT 130#ifdef CONFIG_PARAVIRT
170#include <asm/paravirt.h> 131#include <asm/paravirt.h>
171#else 132#else
@@ -190,15 +151,6 @@ static inline void clflush(void *__p)
190 151
191#endif /* __KERNEL__ */ 152#endif /* __KERNEL__ */
192 153
193static inline unsigned long get_limit(unsigned long segment)
194{
195 unsigned long __limit;
196 __asm__("lsll %1,%0"
197 :"=r" (__limit):"r" (segment));
198 return __limit+1;
199}
200
201#define nop() __asm__ __volatile__ ("nop")
202 154
203/* 155/*
204 * Force strict CPU ordering. 156 * Force strict CPU ordering.
@@ -305,15 +257,5 @@ static inline unsigned long get_limit(unsigned long segment)
305 * disable hlt during certain critical i/o operations 257 * disable hlt during certain critical i/o operations
306 */ 258 */
307#define HAVE_DISABLE_HLT 259#define HAVE_DISABLE_HLT
308void disable_hlt(void);
309void enable_hlt(void);
310
311extern int es7000_plat;
312void cpu_idle_wait(void);
313
314extern unsigned long arch_align_stack(unsigned long sp);
315extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
316
317void default_idle(void);
318 260
319#endif 261#endif