diff options
author | Glauber de Oliveira Costa <gcosta@redhat.com> | 2008-01-30 07:31:08 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:31:08 -0500 |
commit | d89542229b657bdcce6a6f76168f9098ee3e9344 (patch) | |
tree | 5beb503b43c24a3d666898ef68b656caae77e4da | |
parent | e34907ae180f4fe6c28bb4516c679c2f81b0c9ed (diff) |
x86: put together equal pieces of system.h
This patch puts together pieces of system_{32,64}.h that
looks like the same. It's the first step towards integration
of this file.
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/process_64.c | 2 | ||||
-rw-r--r-- | include/asm-x86/system.h | 69 | ||||
-rw-r--r-- | include/asm-x86/system_32.h | 58 | ||||
-rw-r--r-- | include/asm-x86/system_64.h | 12 |
4 files changed, 70 insertions, 71 deletions
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 4c4d8b3f046..057b5442ffd 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -99,7 +99,7 @@ void exit_idle(void) | |||
99 | * We use this if we don't have any better | 99 | * We use this if we don't have any better |
100 | * idle routine.. | 100 | * idle routine.. |
101 | */ | 101 | */ |
102 | static void default_idle(void) | 102 | void default_idle(void) |
103 | { | 103 | { |
104 | current_thread_info()->status &= ~TS_POLLING; | 104 | current_thread_info()->status &= ~TS_POLLING; |
105 | /* | 105 | /* |
diff --git a/include/asm-x86/system.h b/include/asm-x86/system.h index 692562b48f2..d0803f8c70c 100644 --- a/include/asm-x86/system.h +++ b/include/asm-x86/system.h | |||
@@ -1,5 +1,74 @@ | |||
1 | #ifndef _ASM_X86_SYSTEM_H_ | ||
2 | #define _ASM_X86_SYSTEM_H_ | ||
3 | |||
4 | #include <asm/asm.h> | ||
5 | |||
1 | #ifdef CONFIG_X86_32 | 6 | #ifdef CONFIG_X86_32 |
2 | # include "system_32.h" | 7 | # include "system_32.h" |
3 | #else | 8 | #else |
4 | # include "system_64.h" | 9 | # include "system_64.h" |
5 | #endif | 10 | #endif |
11 | |||
12 | #ifdef __KERNEL__ | ||
13 | #define _set_base(addr, base) do { unsigned long __pr; \ | ||
14 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
15 | "rorl $16,%%edx\n\t" \ | ||
16 | "movb %%dl,%2\n\t" \ | ||
17 | "movb %%dh,%3" \ | ||
18 | :"=&d" (__pr) \ | ||
19 | :"m" (*((addr)+2)), \ | ||
20 | "m" (*((addr)+4)), \ | ||
21 | "m" (*((addr)+7)), \ | ||
22 | "0" (base) \ | ||
23 | ); } while (0) | ||
24 | |||
25 | #define _set_limit(addr, limit) do { unsigned long __lr; \ | ||
26 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
27 | "rorl $16,%%edx\n\t" \ | ||
28 | "movb %2,%%dh\n\t" \ | ||
29 | "andb $0xf0,%%dh\n\t" \ | ||
30 | "orb %%dh,%%dl\n\t" \ | ||
31 | "movb %%dl,%2" \ | ||
32 | :"=&d" (__lr) \ | ||
33 | :"m" (*(addr)), \ | ||
34 | "m" (*((addr)+6)), \ | ||
35 | "0" (limit) \ | ||
36 | ); } while (0) | ||
37 | |||
38 | #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base)) | ||
39 | #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1)) | ||
40 | |||
41 | /* | ||
42 | * Save a segment register away | ||
43 | */ | ||
44 | #define savesegment(seg, value) \ | ||
45 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | ||
46 | |||
47 | static inline unsigned long get_limit(unsigned long segment) | ||
48 | { | ||
49 | unsigned long __limit; | ||
50 | __asm__("lsll %1,%0" | ||
51 | :"=r" (__limit):"r" (segment)); | ||
52 | return __limit+1; | ||
53 | } | ||
54 | #endif /* __KERNEL__ */ | ||
55 | |||
56 | static inline void clflush(void *__p) | ||
57 | { | ||
58 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); | ||
59 | } | ||
60 | |||
61 | #define nop() __asm__ __volatile__ ("nop") | ||
62 | |||
63 | void disable_hlt(void); | ||
64 | void enable_hlt(void); | ||
65 | |||
66 | extern int es7000_plat; | ||
67 | void cpu_idle_wait(void); | ||
68 | |||
69 | extern unsigned long arch_align_stack(unsigned long sp); | ||
70 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
71 | |||
72 | void default_idle(void); | ||
73 | |||
74 | #endif | ||
diff --git a/include/asm-x86/system_32.h b/include/asm-x86/system_32.h index 28978b17b07..fb457642ac5 100644 --- a/include/asm-x86/system_32.h +++ b/include/asm-x86/system_32.h | |||
@@ -34,34 +34,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc | |||
34 | "2" (prev), "d" (next)); \ | 34 | "2" (prev), "d" (next)); \ |
35 | } while (0) | 35 | } while (0) |
36 | 36 | ||
37 | #define _set_base(addr,base) do { unsigned long __pr; \ | ||
38 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
39 | "rorl $16,%%edx\n\t" \ | ||
40 | "movb %%dl,%2\n\t" \ | ||
41 | "movb %%dh,%3" \ | ||
42 | :"=&d" (__pr) \ | ||
43 | :"m" (*((addr)+2)), \ | ||
44 | "m" (*((addr)+4)), \ | ||
45 | "m" (*((addr)+7)), \ | ||
46 | "0" (base) \ | ||
47 | ); } while(0) | ||
48 | |||
49 | #define _set_limit(addr,limit) do { unsigned long __lr; \ | ||
50 | __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | ||
51 | "rorl $16,%%edx\n\t" \ | ||
52 | "movb %2,%%dh\n\t" \ | ||
53 | "andb $0xf0,%%dh\n\t" \ | ||
54 | "orb %%dh,%%dl\n\t" \ | ||
55 | "movb %%dl,%2" \ | ||
56 | :"=&d" (__lr) \ | ||
57 | :"m" (*(addr)), \ | ||
58 | "m" (*((addr)+6)), \ | ||
59 | "0" (limit) \ | ||
60 | ); } while(0) | ||
61 | |||
62 | #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) | ||
63 | #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) ) | ||
64 | |||
65 | /* | 37 | /* |
66 | * Load a segment. Fall back on loading the zero | 38 | * Load a segment. Fall back on loading the zero |
67 | * segment if something goes wrong.. | 39 | * segment if something goes wrong.. |
@@ -83,12 +55,6 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |||
83 | ".previous" \ | 55 | ".previous" \ |
84 | : :"rm" (value)) | 56 | : :"rm" (value)) |
85 | 57 | ||
86 | /* | ||
87 | * Save a segment register away | ||
88 | */ | ||
89 | #define savesegment(seg, value) \ | ||
90 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | ||
91 | |||
92 | 58 | ||
93 | static inline void native_clts(void) | 59 | static inline void native_clts(void) |
94 | { | 60 | { |
@@ -161,11 +127,6 @@ static inline void native_wbinvd(void) | |||
161 | asm volatile("wbinvd": : :"memory"); | 127 | asm volatile("wbinvd": : :"memory"); |
162 | } | 128 | } |
163 | 129 | ||
164 | static inline void clflush(void *__p) | ||
165 | { | ||
166 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); | ||
167 | } | ||
168 | |||
169 | #ifdef CONFIG_PARAVIRT | 130 | #ifdef CONFIG_PARAVIRT |
170 | #include <asm/paravirt.h> | 131 | #include <asm/paravirt.h> |
171 | #else | 132 | #else |
@@ -190,15 +151,6 @@ static inline void clflush(void *__p) | |||
190 | 151 | ||
191 | #endif /* __KERNEL__ */ | 152 | #endif /* __KERNEL__ */ |
192 | 153 | ||
193 | static inline unsigned long get_limit(unsigned long segment) | ||
194 | { | ||
195 | unsigned long __limit; | ||
196 | __asm__("lsll %1,%0" | ||
197 | :"=r" (__limit):"r" (segment)); | ||
198 | return __limit+1; | ||
199 | } | ||
200 | |||
201 | #define nop() __asm__ __volatile__ ("nop") | ||
202 | 154 | ||
203 | /* | 155 | /* |
204 | * Force strict CPU ordering. | 156 | * Force strict CPU ordering. |
@@ -305,15 +257,5 @@ static inline unsigned long get_limit(unsigned long segment) | |||
305 | * disable hlt during certain critical i/o operations | 257 | * disable hlt during certain critical i/o operations |
306 | */ | 258 | */ |
307 | #define HAVE_DISABLE_HLT | 259 | #define HAVE_DISABLE_HLT |
308 | void disable_hlt(void); | ||
309 | void enable_hlt(void); | ||
310 | |||
311 | extern int es7000_plat; | ||
312 | void cpu_idle_wait(void); | ||
313 | |||
314 | extern unsigned long arch_align_stack(unsigned long sp); | ||
315 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
316 | |||
317 | void default_idle(void); | ||
318 | 260 | ||
319 | #endif | 261 | #endif |
diff --git a/include/asm-x86/system_64.h b/include/asm-x86/system_64.h index 3dcb217a720..cc5b2666a04 100644 --- a/include/asm-x86/system_64.h +++ b/include/asm-x86/system_64.h | |||
@@ -141,13 +141,6 @@ static inline void write_cr8(unsigned long val) | |||
141 | 141 | ||
142 | #endif /* __KERNEL__ */ | 142 | #endif /* __KERNEL__ */ |
143 | 143 | ||
144 | static inline void clflush(volatile void *__p) | ||
145 | { | ||
146 | asm volatile("clflush %0" : "+m" (*(char __force *)__p)); | ||
147 | } | ||
148 | |||
149 | #define nop() __asm__ __volatile__ ("nop") | ||
150 | |||
151 | #ifdef CONFIG_SMP | 144 | #ifdef CONFIG_SMP |
152 | #define smp_mb() mb() | 145 | #define smp_mb() mb() |
153 | #define smp_rmb() barrier() | 146 | #define smp_rmb() barrier() |
@@ -177,9 +170,4 @@ static inline void clflush(volatile void *__p) | |||
177 | 170 | ||
178 | #include <linux/irqflags.h> | 171 | #include <linux/irqflags.h> |
179 | 172 | ||
180 | void cpu_idle_wait(void); | ||
181 | |||
182 | extern unsigned long arch_align_stack(unsigned long sp); | ||
183 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | ||
184 | |||
185 | #endif | 173 | #endif |