diff options
author | Glauber de Oliveira Costa <gcosta@redhat.com> | 2008-01-30 07:31:27 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:31:27 -0500 |
commit | 683e0253dbd12554b2ee969b15e68105252bff57 (patch) | |
tree | b1b2df43f7fcdf48bc69789d81c437be0cdd639b /include | |
parent | 62d7d7ed11760a0fea40e4fc6f0553e721d00443 (diff) |
x86: unify common parts of processor.h
This patch moves the pieces of processor_32.h and processor_64 that are
equal to processor.h. Only what's exactly the same is moved around, the rest
not being touched.
Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/processor.h | 120 | ||||
-rw-r--r-- | include/asm-x86/processor_32.h | 111 | ||||
-rw-r--r-- | include/asm-x86/processor_64.h | 116 |
3 files changed, 120 insertions, 227 deletions
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 8b7794766884..52e3637ef59e 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -3,6 +3,10 @@ | |||
3 | 3 | ||
4 | #include <asm/processor-flags.h> | 4 | #include <asm/processor-flags.h> |
5 | 5 | ||
6 | /* Forward declaration, a strange C thing */ | ||
7 | struct task_struct; | ||
8 | struct mm_struct; | ||
9 | |||
6 | #include <asm/page.h> | 10 | #include <asm/page.h> |
7 | #include <asm/system.h> | 11 | #include <asm/system.h> |
8 | 12 | ||
@@ -29,6 +33,11 @@ static inline void load_cr3(pgd_t *pgdir) | |||
29 | # include "processor_64.h" | 33 | # include "processor_64.h" |
30 | #endif | 34 | #endif |
31 | 35 | ||
36 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
37 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
38 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
39 | extern unsigned short num_cache_leaves; | ||
40 | |||
32 | static inline unsigned long native_get_debugreg(int regno) | 41 | static inline unsigned long native_get_debugreg(int regno) |
33 | { | 42 | { |
34 | unsigned long val = 0; /* Damn you, gcc! */ | 43 | unsigned long val = 0; /* Damn you, gcc! */ |
@@ -138,7 +147,53 @@ static inline void clear_in_cr4(unsigned long mask) | |||
138 | write_cr4(cr4); | 147 | write_cr4(cr4); |
139 | } | 148 | } |
140 | 149 | ||
150 | struct microcode_header { | ||
151 | unsigned int hdrver; | ||
152 | unsigned int rev; | ||
153 | unsigned int date; | ||
154 | unsigned int sig; | ||
155 | unsigned int cksum; | ||
156 | unsigned int ldrver; | ||
157 | unsigned int pf; | ||
158 | unsigned int datasize; | ||
159 | unsigned int totalsize; | ||
160 | unsigned int reserved[3]; | ||
161 | }; | ||
162 | |||
163 | struct microcode { | ||
164 | struct microcode_header hdr; | ||
165 | unsigned int bits[0]; | ||
166 | }; | ||
167 | |||
168 | typedef struct microcode microcode_t; | ||
169 | typedef struct microcode_header microcode_header_t; | ||
170 | |||
171 | /* microcode format is extended from prescott processors */ | ||
172 | struct extended_signature { | ||
173 | unsigned int sig; | ||
174 | unsigned int pf; | ||
175 | unsigned int cksum; | ||
176 | }; | ||
177 | |||
178 | struct extended_sigtable { | ||
179 | unsigned int count; | ||
180 | unsigned int cksum; | ||
181 | unsigned int reserved[3]; | ||
182 | struct extended_signature sigs[0]; | ||
183 | }; | ||
184 | |||
185 | /* | ||
186 | * create a kernel thread without removing it from tasklists | ||
187 | */ | ||
188 | extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); | ||
189 | |||
190 | /* Free all resources held by a thread. */ | ||
191 | extern void release_thread(struct task_struct *); | ||
192 | |||
193 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
194 | extern void prepare_to_copy(struct task_struct *tsk); | ||
141 | 195 | ||
196 | unsigned long get_wchan(struct task_struct *p); | ||
142 | 197 | ||
143 | /* | 198 | /* |
144 | * Generic CPUID function | 199 | * Generic CPUID function |
@@ -196,4 +251,69 @@ static inline unsigned int cpuid_edx(unsigned int op) | |||
196 | return edx; | 251 | return edx; |
197 | } | 252 | } |
198 | 253 | ||
254 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
255 | static inline void rep_nop(void) | ||
256 | { | ||
257 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
258 | } | ||
259 | |||
260 | /* Stop speculative execution */ | ||
261 | static inline void sync_core(void) | ||
262 | { | ||
263 | int tmp; | ||
264 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) | ||
265 | : "ebx", "ecx", "edx", "memory"); | ||
266 | } | ||
267 | |||
268 | #define cpu_relax() rep_nop() | ||
269 | |||
270 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
271 | unsigned long edx) | ||
272 | { | ||
273 | /* "monitor %eax,%ecx,%edx;" */ | ||
274 | asm volatile( | ||
275 | ".byte 0x0f,0x01,0xc8;" | ||
276 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
277 | } | ||
278 | |||
279 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
280 | { | ||
281 | /* "mwait %eax,%ecx;" */ | ||
282 | asm volatile( | ||
283 | ".byte 0x0f,0x01,0xc9;" | ||
284 | : :"a" (eax), "c" (ecx)); | ||
285 | } | ||
286 | |||
287 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
288 | { | ||
289 | /* "mwait %eax,%ecx;" */ | ||
290 | asm volatile( | ||
291 | "sti; .byte 0x0f,0x01,0xc9;" | ||
292 | : :"a" (eax), "c" (ecx)); | ||
293 | } | ||
294 | |||
295 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
296 | |||
297 | extern int force_mwait; | ||
298 | |||
299 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
300 | |||
301 | extern unsigned long boot_option_idle_override; | ||
302 | |||
303 | /* Boot loader type from the setup header */ | ||
304 | extern int bootloader_type; | ||
305 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
306 | |||
307 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | ||
308 | #define ARCH_HAS_PREFETCHW | ||
309 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
310 | |||
311 | #define spin_lock_prefetch(x) prefetchw(x) | ||
312 | /* This decides where the kernel will search for a free chunk of vm | ||
313 | * space during mmap's. | ||
314 | */ | ||
315 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
316 | |||
317 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | ||
318 | |||
199 | #endif | 319 | #endif |
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h index 516a38ad2a7a..9e119d3789b4 100644 --- a/include/asm-x86/processor_32.h +++ b/include/asm-x86/processor_32.h | |||
@@ -109,10 +109,6 @@ void __init cpu_detect(struct cpuinfo_x86 *c); | |||
109 | 109 | ||
110 | extern void identify_boot_cpu(void); | 110 | extern void identify_boot_cpu(void); |
111 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | 111 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); |
112 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
113 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
114 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
115 | extern unsigned short num_cache_leaves; | ||
116 | 112 | ||
117 | #ifdef CONFIG_X86_HT | 113 | #ifdef CONFIG_X86_HT |
118 | extern void detect_ht(struct cpuinfo_x86 *c); | 114 | extern void detect_ht(struct cpuinfo_x86 *c); |
@@ -120,32 +116,6 @@ extern void detect_ht(struct cpuinfo_x86 *c); | |||
120 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | 116 | static inline void detect_ht(struct cpuinfo_x86 *c) {} |
121 | #endif | 117 | #endif |
122 | 118 | ||
123 | /* Stop speculative execution */ | ||
124 | static inline void sync_core(void) | ||
125 | { | ||
126 | int tmp; | ||
127 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | ||
128 | } | ||
129 | |||
130 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
131 | unsigned long edx) | ||
132 | { | ||
133 | /* "monitor %eax,%ecx,%edx;" */ | ||
134 | asm volatile( | ||
135 | ".byte 0x0f,0x01,0xc8;" | ||
136 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
137 | } | ||
138 | |||
139 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
140 | { | ||
141 | /* "mwait %eax,%ecx;" */ | ||
142 | asm volatile( | ||
143 | ".byte 0x0f,0x01,0xc9;" | ||
144 | : :"a" (eax), "c" (ecx)); | ||
145 | } | ||
146 | |||
147 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
148 | |||
149 | /* from system description table in BIOS. Mostly for MCA use, but | 119 | /* from system description table in BIOS. Mostly for MCA use, but |
150 | others may find it useful. */ | 120 | others may find it useful. */ |
151 | extern unsigned int machine_id; | 121 | extern unsigned int machine_id; |
@@ -153,20 +123,11 @@ extern unsigned int machine_submodel_id; | |||
153 | extern unsigned int BIOS_revision; | 123 | extern unsigned int BIOS_revision; |
154 | extern unsigned int mca_pentium_flag; | 124 | extern unsigned int mca_pentium_flag; |
155 | 125 | ||
156 | /* Boot loader type from the setup header */ | ||
157 | extern int bootloader_type; | ||
158 | |||
159 | /* | 126 | /* |
160 | * User space process size: 3GB (default). | 127 | * User space process size: 3GB (default). |
161 | */ | 128 | */ |
162 | #define TASK_SIZE (PAGE_OFFSET) | 129 | #define TASK_SIZE (PAGE_OFFSET) |
163 | 130 | ||
164 | /* This decides where the kernel will search for a free chunk of vm | ||
165 | * space during mmap's. | ||
166 | */ | ||
167 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | ||
168 | |||
169 | #define HAVE_ARCH_PICK_MMAP_LAYOUT | ||
170 | 131 | ||
171 | /* | 132 | /* |
172 | * Size of io_bitmap. | 133 | * Size of io_bitmap. |
@@ -356,25 +317,9 @@ struct thread_struct { | |||
356 | regs->sp = new_esp; \ | 317 | regs->sp = new_esp; \ |
357 | } while (0) | 318 | } while (0) |
358 | 319 | ||
359 | /* Forward declaration, a strange C thing */ | ||
360 | struct task_struct; | ||
361 | struct mm_struct; | ||
362 | |||
363 | /* Free all resources held by a thread. */ | ||
364 | extern void release_thread(struct task_struct *); | ||
365 | |||
366 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
367 | extern void prepare_to_copy(struct task_struct *tsk); | ||
368 | |||
369 | /* | ||
370 | * create a kernel thread without removing it from tasklists | ||
371 | */ | ||
372 | extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
373 | 320 | ||
374 | extern unsigned long thread_saved_pc(struct task_struct *tsk); | 321 | extern unsigned long thread_saved_pc(struct task_struct *tsk); |
375 | 322 | ||
376 | unsigned long get_wchan(struct task_struct *p); | ||
377 | |||
378 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) | 323 | #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) |
379 | #define KSTK_TOP(info) \ | 324 | #define KSTK_TOP(info) \ |
380 | ({ \ | 325 | ({ \ |
@@ -399,53 +344,8 @@ unsigned long get_wchan(struct task_struct *p); | |||
399 | __regs__ - 1; \ | 344 | __regs__ - 1; \ |
400 | }) | 345 | }) |
401 | 346 | ||
402 | #define KSTK_EIP(task) (task_pt_regs(task)->ip) | ||
403 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | 347 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
404 | 348 | ||
405 | |||
406 | struct microcode_header { | ||
407 | unsigned int hdrver; | ||
408 | unsigned int rev; | ||
409 | unsigned int date; | ||
410 | unsigned int sig; | ||
411 | unsigned int cksum; | ||
412 | unsigned int ldrver; | ||
413 | unsigned int pf; | ||
414 | unsigned int datasize; | ||
415 | unsigned int totalsize; | ||
416 | unsigned int reserved[3]; | ||
417 | }; | ||
418 | |||
419 | struct microcode { | ||
420 | struct microcode_header hdr; | ||
421 | unsigned int bits[0]; | ||
422 | }; | ||
423 | |||
424 | typedef struct microcode microcode_t; | ||
425 | typedef struct microcode_header microcode_header_t; | ||
426 | |||
427 | /* microcode format is extended from prescott processors */ | ||
428 | struct extended_signature { | ||
429 | unsigned int sig; | ||
430 | unsigned int pf; | ||
431 | unsigned int cksum; | ||
432 | }; | ||
433 | |||
434 | struct extended_sigtable { | ||
435 | unsigned int count; | ||
436 | unsigned int cksum; | ||
437 | unsigned int reserved[3]; | ||
438 | struct extended_signature sigs[0]; | ||
439 | }; | ||
440 | |||
441 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
442 | static inline void rep_nop(void) | ||
443 | { | ||
444 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
445 | } | ||
446 | |||
447 | #define cpu_relax() rep_nop() | ||
448 | |||
449 | static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) | 349 | static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) |
450 | { | 350 | { |
451 | tss->x86_tss.sp0 = thread->sp0; | 351 | tss->x86_tss.sp0 = thread->sp0; |
@@ -555,7 +455,6 @@ static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread | |||
555 | because they are microcoded there and very slow. | 455 | because they are microcoded there and very slow. |
556 | However we don't do prefetches for pre XP Athlons currently | 456 | However we don't do prefetches for pre XP Athlons currently |
557 | That should be fixed. */ | 457 | That should be fixed. */ |
558 | #define ARCH_HAS_PREFETCH | ||
559 | static inline void prefetch(const void *x) | 458 | static inline void prefetch(const void *x) |
560 | { | 459 | { |
561 | alternative_input(ASM_NOP4, | 460 | alternative_input(ASM_NOP4, |
@@ -565,8 +464,6 @@ static inline void prefetch(const void *x) | |||
565 | } | 464 | } |
566 | 465 | ||
567 | #define ARCH_HAS_PREFETCH | 466 | #define ARCH_HAS_PREFETCH |
568 | #define ARCH_HAS_PREFETCHW | ||
569 | #define ARCH_HAS_SPINLOCK_PREFETCH | ||
570 | 467 | ||
571 | /* 3dnow! prefetch to get an exclusive cache line. Useful for | 468 | /* 3dnow! prefetch to get an exclusive cache line. Useful for |
572 | spinlocks to avoid one state transition in the cache coherency protocol. */ | 469 | spinlocks to avoid one state transition in the cache coherency protocol. */ |
@@ -577,13 +474,7 @@ static inline void prefetchw(const void *x) | |||
577 | X86_FEATURE_3DNOW, | 474 | X86_FEATURE_3DNOW, |
578 | "r" (x)); | 475 | "r" (x)); |
579 | } | 476 | } |
580 | #define spin_lock_prefetch(x) prefetchw(x) | ||
581 | |||
582 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
583 | 477 | ||
584 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
585 | |||
586 | extern unsigned long boot_option_idle_override; | ||
587 | extern void enable_sep_cpu(void); | 478 | extern void enable_sep_cpu(void); |
588 | extern int sysenter_setup(void); | 479 | extern int sysenter_setup(void); |
589 | 480 | ||
@@ -595,6 +486,4 @@ extern void switch_to_new_gdt(void); | |||
595 | extern void cpu_init(void); | 486 | extern void cpu_init(void); |
596 | extern void init_gdt(int cpu); | 487 | extern void init_gdt(int cpu); |
597 | 488 | ||
598 | extern int force_mwait; | ||
599 | |||
600 | #endif /* __ASM_I386_PROCESSOR_H */ | 489 | #endif /* __ASM_I386_PROCESSOR_H */ |
diff --git a/include/asm-x86/processor_64.h b/include/asm-x86/processor_64.h index 6abe1ba30fe0..5f5c7fc63797 100644 --- a/include/asm-x86/processor_64.h +++ b/include/asm-x86/processor_64.h | |||
@@ -83,11 +83,6 @@ DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); | |||
83 | extern char ignore_irq13; | 83 | extern char ignore_irq13; |
84 | 84 | ||
85 | extern void identify_cpu(struct cpuinfo_x86 *); | 85 | extern void identify_cpu(struct cpuinfo_x86 *); |
86 | extern void print_cpu_info(struct cpuinfo_x86 *); | ||
87 | extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | ||
88 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | ||
89 | extern unsigned short num_cache_leaves; | ||
90 | |||
91 | 86 | ||
92 | /* | 87 | /* |
93 | * User space process size. 47bits minus one guard page. | 88 | * User space process size. 47bits minus one guard page. |
@@ -102,8 +97,6 @@ extern unsigned short num_cache_leaves; | |||
102 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) | 97 | #define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64) |
103 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) | 98 | #define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64) |
104 | 99 | ||
105 | #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3) | ||
106 | |||
107 | /* | 100 | /* |
108 | * Size of io_bitmap. | 101 | * Size of io_bitmap. |
109 | */ | 102 | */ |
@@ -226,68 +219,16 @@ struct thread_struct { | |||
226 | set_fs(USER_DS); \ | 219 | set_fs(USER_DS); \ |
227 | } while(0) | 220 | } while(0) |
228 | 221 | ||
229 | struct task_struct; | ||
230 | struct mm_struct; | ||
231 | |||
232 | /* Free all resources held by a thread. */ | ||
233 | extern void release_thread(struct task_struct *); | ||
234 | |||
235 | /* Prepare to copy thread state - unlazy all lazy status */ | ||
236 | extern void prepare_to_copy(struct task_struct *tsk); | ||
237 | |||
238 | /* | ||
239 | * create a kernel thread without removing it from tasklists | ||
240 | */ | ||
241 | extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); | ||
242 | |||
243 | /* | 222 | /* |
244 | * Return saved PC of a blocked thread. | 223 | * Return saved PC of a blocked thread. |
245 | * What is this good for? it will be always the scheduler or ret_from_fork. | 224 | * What is this good for? it will be always the scheduler or ret_from_fork. |
246 | */ | 225 | */ |
247 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) | 226 | #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8)) |
248 | 227 | ||
249 | extern unsigned long get_wchan(struct task_struct *p); | ||
250 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) | 228 | #define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1) |
251 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->ip) | ||
252 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ | 229 | #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */ |
253 | 230 | ||
254 | 231 | ||
255 | struct microcode_header { | ||
256 | unsigned int hdrver; | ||
257 | unsigned int rev; | ||
258 | unsigned int date; | ||
259 | unsigned int sig; | ||
260 | unsigned int cksum; | ||
261 | unsigned int ldrver; | ||
262 | unsigned int pf; | ||
263 | unsigned int datasize; | ||
264 | unsigned int totalsize; | ||
265 | unsigned int reserved[3]; | ||
266 | }; | ||
267 | |||
268 | struct microcode { | ||
269 | struct microcode_header hdr; | ||
270 | unsigned int bits[0]; | ||
271 | }; | ||
272 | |||
273 | typedef struct microcode microcode_t; | ||
274 | typedef struct microcode_header microcode_header_t; | ||
275 | |||
276 | /* microcode format is extended from prescott processors */ | ||
277 | struct extended_signature { | ||
278 | unsigned int sig; | ||
279 | unsigned int pf; | ||
280 | unsigned int cksum; | ||
281 | }; | ||
282 | |||
283 | struct extended_sigtable { | ||
284 | unsigned int count; | ||
285 | unsigned int cksum; | ||
286 | unsigned int reserved[3]; | ||
287 | struct extended_signature sigs[0]; | ||
288 | }; | ||
289 | |||
290 | |||
291 | #if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) | 232 | #if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) |
292 | #define ASM_NOP1 P6_NOP1 | 233 | #define ASM_NOP1 P6_NOP1 |
293 | #define ASM_NOP2 P6_NOP2 | 234 | #define ASM_NOP2 P6_NOP2 |
@@ -331,20 +272,6 @@ struct extended_sigtable { | |||
331 | 272 | ||
332 | #define ASM_NOP_MAX 8 | 273 | #define ASM_NOP_MAX 8 |
333 | 274 | ||
334 | /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||
335 | static inline void rep_nop(void) | ||
336 | { | ||
337 | __asm__ __volatile__("rep;nop": : :"memory"); | ||
338 | } | ||
339 | |||
340 | /* Stop speculative execution */ | ||
341 | static inline void sync_core(void) | ||
342 | { | ||
343 | int tmp; | ||
344 | asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory"); | ||
345 | } | ||
346 | |||
347 | #define ARCH_HAS_PREFETCHW 1 | ||
348 | static inline void prefetchw(void *x) | 275 | static inline void prefetchw(void *x) |
349 | { | 276 | { |
350 | alternative_input("prefetcht0 (%1)", | 277 | alternative_input("prefetcht0 (%1)", |
@@ -353,42 +280,6 @@ static inline void prefetchw(void *x) | |||
353 | "r" (x)); | 280 | "r" (x)); |
354 | } | 281 | } |
355 | 282 | ||
356 | #define ARCH_HAS_SPINLOCK_PREFETCH 1 | ||
357 | |||
358 | #define spin_lock_prefetch(x) prefetchw(x) | ||
359 | |||
360 | #define cpu_relax() rep_nop() | ||
361 | |||
362 | static inline void __monitor(const void *eax, unsigned long ecx, | ||
363 | unsigned long edx) | ||
364 | { | ||
365 | /* "monitor %eax,%ecx,%edx;" */ | ||
366 | asm volatile( | ||
367 | ".byte 0x0f,0x01,0xc8;" | ||
368 | : :"a" (eax), "c" (ecx), "d"(edx)); | ||
369 | } | ||
370 | |||
371 | static inline void __mwait(unsigned long eax, unsigned long ecx) | ||
372 | { | ||
373 | /* "mwait %eax,%ecx;" */ | ||
374 | asm volatile( | ||
375 | ".byte 0x0f,0x01,0xc9;" | ||
376 | : :"a" (eax), "c" (ecx)); | ||
377 | } | ||
378 | |||
379 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
380 | { | ||
381 | /* "mwait %eax,%ecx;" */ | ||
382 | asm volatile( | ||
383 | "sti; .byte 0x0f,0x01,0xc9;" | ||
384 | : :"a" (eax), "c" (ecx)); | ||
385 | } | ||
386 | |||
387 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | ||
388 | |||
389 | extern int force_mwait; | ||
390 | |||
391 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | ||
392 | 283 | ||
393 | #define stack_current() \ | 284 | #define stack_current() \ |
394 | ({ \ | 285 | ({ \ |
@@ -397,12 +288,5 @@ extern void select_idle_routine(const struct cpuinfo_x86 *c); | |||
397 | ti->task; \ | 288 | ti->task; \ |
398 | }) | 289 | }) |
399 | 290 | ||
400 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | ||
401 | |||
402 | extern unsigned long boot_option_idle_override; | ||
403 | /* Boot loader type from the setup header */ | ||
404 | extern int bootloader_type; | ||
405 | |||
406 | #define HAVE_ARCH_PICK_MMAP_LAYOUT 1 | ||
407 | 291 | ||
408 | #endif /* __ASM_X86_64_PROCESSOR_H */ | 292 | #endif /* __ASM_X86_64_PROCESSOR_H */ |