aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:08 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:08 -0500
commitd89542229b657bdcce6a6f76168f9098ee3e9344 (patch)
tree5beb503b43c24a3d666898ef68b656caae77e4da /include
parente34907ae180f4fe6c28bb4516c679c2f81b0c9ed (diff)
x86: put together equal pieces of system.h
This patch puts together pieces of system_{32,64}.h that looks like the same. It's the first step towards integration of this file. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/system.h69
-rw-r--r--include/asm-x86/system_32.h58
-rw-r--r--include/asm-x86/system_64.h12
3 files changed, 69 insertions, 70 deletions
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h
index 692562b48f2a..d0803f8c70c4 100644
--- a/include/asm-x86/system.h
+++ b/include/asm-x86/system.h
@@ -1,5 +1,74 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5
1#ifdef CONFIG_X86_32 6#ifdef CONFIG_X86_32
2# include "system_32.h" 7# include "system_32.h"
3#else 8#else
4# include "system_64.h" 9# include "system_64.h"
5#endif 10#endif
11
12#ifdef __KERNEL__
13#define _set_base(addr, base) do { unsigned long __pr; \
14__asm__ __volatile__ ("movw %%dx,%1\n\t" \
15 "rorl $16,%%edx\n\t" \
16 "movb %%dl,%2\n\t" \
17 "movb %%dh,%3" \
18 :"=&d" (__pr) \
19 :"m" (*((addr)+2)), \
20 "m" (*((addr)+4)), \
21 "m" (*((addr)+7)), \
22 "0" (base) \
23 ); } while (0)
24
25#define _set_limit(addr, limit) do { unsigned long __lr; \
26__asm__ __volatile__ ("movw %%dx,%1\n\t" \
27 "rorl $16,%%edx\n\t" \
28 "movb %2,%%dh\n\t" \
29 "andb $0xf0,%%dh\n\t" \
30 "orb %%dh,%%dl\n\t" \
31 "movb %%dl,%2" \
32 :"=&d" (__lr) \
33 :"m" (*(addr)), \
34 "m" (*((addr)+6)), \
35 "0" (limit) \
36 ); } while (0)
37
38#define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
39#define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
40
41/*
42 * Save a segment register away
43 */
44#define savesegment(seg, value) \
45 asm volatile("mov %%" #seg ",%0":"=rm" (value))
46
47static inline unsigned long get_limit(unsigned long segment)
48{
49 unsigned long __limit;
50 __asm__("lsll %1,%0"
51 :"=r" (__limit):"r" (segment));
52 return __limit+1;
53}
54#endif /* __KERNEL__ */
55
56static inline void clflush(void *__p)
57{
58 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
59}
60
61#define nop() __asm__ __volatile__ ("nop")
62
63void disable_hlt(void);
64void enable_hlt(void);
65
66extern int es7000_plat;
67void cpu_idle_wait(void);
68
69extern unsigned long arch_align_stack(unsigned long sp);
70extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
71
72void default_idle(void);
73
74#endif
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h
index 28978b17b07a..fb457642ac58 100644
--- a/include/asm-x86/system_32.h
+++ b/include/asm-x86/system_32.h
@@ -34,34 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
34 "2" (prev), "d" (next)); \ 34 "2" (prev), "d" (next)); \
35} while (0) 35} while (0)
36 36
37#define _set_base(addr,base) do { unsigned long __pr; \
38__asm__ __volatile__ ("movw %%dx,%1\n\t" \
39 "rorl $16,%%edx\n\t" \
40 "movb %%dl,%2\n\t" \
41 "movb %%dh,%3" \
42 :"=&d" (__pr) \
43 :"m" (*((addr)+2)), \
44 "m" (*((addr)+4)), \
45 "m" (*((addr)+7)), \
46 "0" (base) \
47 ); } while(0)
48
49#define _set_limit(addr,limit) do { unsigned long __lr; \
50__asm__ __volatile__ ("movw %%dx,%1\n\t" \
51 "rorl $16,%%edx\n\t" \
52 "movb %2,%%dh\n\t" \
53 "andb $0xf0,%%dh\n\t" \
54 "orb %%dh,%%dl\n\t" \
55 "movb %%dl,%2" \
56 :"=&d" (__lr) \
57 :"m" (*(addr)), \
58 "m" (*((addr)+6)), \
59 "0" (limit) \
60 ); } while(0)
61
62#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
63#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
64
65/* 37/*
66 * Load a segment. Fall back on loading the zero 38 * Load a segment. Fall back on loading the zero
67 * segment if something goes wrong.. 39 * segment if something goes wrong..
@@ -83,12 +55,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
83 ".previous" \ 55 ".previous" \
84 : :"rm" (value)) 56 : :"rm" (value))
85 57
86/*
87 * Save a segment register away
88 */
89#define savesegment(seg, value) \
90 asm volatile("mov %%" #seg ",%0":"=rm" (value))
91
92 58
93static inline void native_clts(void) 59static inline void native_clts(void)
94{ 60{
@@ -161,11 +127,6 @@ static inline void native_wbinvd(void)
161 asm volatile("wbinvd": : :"memory"); 127 asm volatile("wbinvd": : :"memory");
162} 128}
163 129
164static inline void clflush(void *__p)
165{
166 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
167}
168
169#ifdef CONFIG_PARAVIRT 130#ifdef CONFIG_PARAVIRT
170#include <asm/paravirt.h> 131#include <asm/paravirt.h>
171#else 132#else
@@ -190,15 +151,6 @@ static inline void clflush(void *__p)
190 151
191#endif /* __KERNEL__ */ 152#endif /* __KERNEL__ */
192 153
193static inline unsigned long get_limit(unsigned long segment)
194{
195 unsigned long __limit;
196 __asm__("lsll %1,%0"
197 :"=r" (__limit):"r" (segment));
198 return __limit+1;
199}
200
201#define nop() __asm__ __volatile__ ("nop")
202 154
203/* 155/*
204 * Force strict CPU ordering. 156 * Force strict CPU ordering.
@@ -305,15 +257,5 @@ static inline unsigned long get_limit(unsigned long segment)
305 * disable hlt during certain critical i/o operations 257 * disable hlt during certain critical i/o operations
306 */ 258 */
307#define HAVE_DISABLE_HLT 259#define HAVE_DISABLE_HLT
308void disable_hlt(void);
309void enable_hlt(void);
310
311extern int es7000_plat;
312void cpu_idle_wait(void);
313
314extern unsigned long arch_align_stack(unsigned long sp);
315extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
316
317void default_idle(void);
318 260
319#endif 261#endif
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h
index 3dcb217a7202..cc5b2666a044 100644
--- a/include/asm-x86/system_64.h
+++ b/include/asm-x86/system_64.h
@@ -141,13 +141,6 @@ static inline void write_cr8(unsigned long val)
141 141
142#endif /* __KERNEL__ */ 142#endif /* __KERNEL__ */
143 143
144static inline void clflush(volatile void *__p)
145{
146 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
147}
148
149#define nop() __asm__ __volatile__ ("nop")
150
151#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
152#define smp_mb() mb() 145#define smp_mb() mb()
153#define smp_rmb() barrier() 146#define smp_rmb() barrier()
@@ -177,9 +170,4 @@ static inline void clflush(volatile void *__p)
177 170
178#include <linux/irqflags.h> 171#include <linux/irqflags.h>
179 172
180void cpu_idle_wait(void);
181
182extern unsigned long arch_align_stack(unsigned long sp);
183extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
184
185#endif 173#endif