aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/processor_32.h
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-01-30 07:31:27 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:27 -0500
commit683e0253dbd12554b2ee969b15e68105252bff57 (patch)
treeb1b2df43f7fcdf48bc69789d81c437be0cdd639b /include/asm-x86/processor_32.h
parent62d7d7ed11760a0fea40e4fc6f0553e721d00443 (diff)
x86: unify common parts of processor.h
This patch moves the pieces of processor_32.h and processor_64 that are equal to processor.h. Only what's exactly the same is moved around, the rest not being touched. Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/processor_32.h')
-rw-r--r--include/asm-x86/processor_32.h111
1 files changed, 0 insertions, 111 deletions
diff --git a/include/asm-x86/processor_32.h b/include/asm-x86/processor_32.h
index 516a38ad2a7a..9e119d3789b4 100644
--- a/include/asm-x86/processor_32.h
+++ b/include/asm-x86/processor_32.h
@@ -109,10 +109,6 @@ void __init cpu_detect(struct cpuinfo_x86 *c);
109 109
110extern void identify_boot_cpu(void); 110extern void identify_boot_cpu(void);
111extern void identify_secondary_cpu(struct cpuinfo_x86 *); 111extern void identify_secondary_cpu(struct cpuinfo_x86 *);
112extern void print_cpu_info(struct cpuinfo_x86 *);
113extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
114extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
115extern unsigned short num_cache_leaves;
116 112
117#ifdef CONFIG_X86_HT 113#ifdef CONFIG_X86_HT
118extern void detect_ht(struct cpuinfo_x86 *c); 114extern void detect_ht(struct cpuinfo_x86 *c);
@@ -120,32 +116,6 @@ extern void detect_ht(struct cpuinfo_x86 *c);
120static inline void detect_ht(struct cpuinfo_x86 *c) {} 116static inline void detect_ht(struct cpuinfo_x86 *c) {}
121#endif 117#endif
122 118
123/* Stop speculative execution */
124static inline void sync_core(void)
125{
126 int tmp;
127 asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
128}
129
130static inline void __monitor(const void *eax, unsigned long ecx,
131 unsigned long edx)
132{
133 /* "monitor %eax,%ecx,%edx;" */
134 asm volatile(
135 ".byte 0x0f,0x01,0xc8;"
136 : :"a" (eax), "c" (ecx), "d"(edx));
137}
138
139static inline void __mwait(unsigned long eax, unsigned long ecx)
140{
141 /* "mwait %eax,%ecx;" */
142 asm volatile(
143 ".byte 0x0f,0x01,0xc9;"
144 : :"a" (eax), "c" (ecx));
145}
146
147extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx);
148
149/* from system description table in BIOS. Mostly for MCA use, but 119/* from system description table in BIOS. Mostly for MCA use, but
150others may find it useful. */ 120others may find it useful. */
151extern unsigned int machine_id; 121extern unsigned int machine_id;
@@ -153,20 +123,11 @@ extern unsigned int machine_submodel_id;
153extern unsigned int BIOS_revision; 123extern unsigned int BIOS_revision;
154extern unsigned int mca_pentium_flag; 124extern unsigned int mca_pentium_flag;
155 125
156/* Boot loader type from the setup header */
157extern int bootloader_type;
158
159/* 126/*
160 * User space process size: 3GB (default). 127 * User space process size: 3GB (default).
161 */ 128 */
162#define TASK_SIZE (PAGE_OFFSET) 129#define TASK_SIZE (PAGE_OFFSET)
163 130
164/* This decides where the kernel will search for a free chunk of vm
165 * space during mmap's.
166 */
167#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
168
169#define HAVE_ARCH_PICK_MMAP_LAYOUT
170 131
171/* 132/*
172 * Size of io_bitmap. 133 * Size of io_bitmap.
@@ -356,25 +317,9 @@ struct thread_struct {
356 regs->sp = new_esp; \ 317 regs->sp = new_esp; \
357} while (0) 318} while (0)
358 319
359/* Forward declaration, a strange C thing */
360struct task_struct;
361struct mm_struct;
362
363/* Free all resources held by a thread. */
364extern void release_thread(struct task_struct *);
365
366/* Prepare to copy thread state - unlazy all lazy status */
367extern void prepare_to_copy(struct task_struct *tsk);
368
369/*
370 * create a kernel thread without removing it from tasklists
371 */
372extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
373 320
374extern unsigned long thread_saved_pc(struct task_struct *tsk); 321extern unsigned long thread_saved_pc(struct task_struct *tsk);
375 322
376unsigned long get_wchan(struct task_struct *p);
377
378#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) 323#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
379#define KSTK_TOP(info) \ 324#define KSTK_TOP(info) \
380({ \ 325({ \
@@ -399,53 +344,8 @@ unsigned long get_wchan(struct task_struct *p);
399 __regs__ - 1; \ 344 __regs__ - 1; \
400}) 345})
401 346
402#define KSTK_EIP(task) (task_pt_regs(task)->ip)
403#define KSTK_ESP(task) (task_pt_regs(task)->sp) 347#define KSTK_ESP(task) (task_pt_regs(task)->sp)
404 348
405
406struct microcode_header {
407 unsigned int hdrver;
408 unsigned int rev;
409 unsigned int date;
410 unsigned int sig;
411 unsigned int cksum;
412 unsigned int ldrver;
413 unsigned int pf;
414 unsigned int datasize;
415 unsigned int totalsize;
416 unsigned int reserved[3];
417};
418
419struct microcode {
420 struct microcode_header hdr;
421 unsigned int bits[0];
422};
423
424typedef struct microcode microcode_t;
425typedef struct microcode_header microcode_header_t;
426
427/* microcode format is extended from prescott processors */
428struct extended_signature {
429 unsigned int sig;
430 unsigned int pf;
431 unsigned int cksum;
432};
433
434struct extended_sigtable {
435 unsigned int count;
436 unsigned int cksum;
437 unsigned int reserved[3];
438 struct extended_signature sigs[0];
439};
440
441/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
442static inline void rep_nop(void)
443{
444 __asm__ __volatile__("rep;nop": : :"memory");
445}
446
447#define cpu_relax() rep_nop()
448
449static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread) 349static inline void native_load_sp0(struct tss_struct *tss, struct thread_struct *thread)
450{ 350{
451 tss->x86_tss.sp0 = thread->sp0; 351 tss->x86_tss.sp0 = thread->sp0;
@@ -555,7 +455,6 @@ static inline void load_sp0(struct tss_struct *tss, struct thread_struct *thread
555 because they are microcoded there and very slow. 455 because they are microcoded there and very slow.
556 However we don't do prefetches for pre XP Athlons currently 456 However we don't do prefetches for pre XP Athlons currently
557 That should be fixed. */ 457 That should be fixed. */
558#define ARCH_HAS_PREFETCH
559static inline void prefetch(const void *x) 458static inline void prefetch(const void *x)
560{ 459{
561 alternative_input(ASM_NOP4, 460 alternative_input(ASM_NOP4,
@@ -565,8 +464,6 @@ static inline void prefetch(const void *x)
565} 464}
566 465
567#define ARCH_HAS_PREFETCH 466#define ARCH_HAS_PREFETCH
568#define ARCH_HAS_PREFETCHW
569#define ARCH_HAS_SPINLOCK_PREFETCH
570 467
571/* 3dnow! prefetch to get an exclusive cache line. Useful for 468/* 3dnow! prefetch to get an exclusive cache line. Useful for
572 spinlocks to avoid one state transition in the cache coherency protocol. */ 469 spinlocks to avoid one state transition in the cache coherency protocol. */
@@ -577,13 +474,7 @@ static inline void prefetchw(const void *x)
577 X86_FEATURE_3DNOW, 474 X86_FEATURE_3DNOW,
578 "r" (x)); 475 "r" (x));
579} 476}
580#define spin_lock_prefetch(x) prefetchw(x)
581
582extern void select_idle_routine(const struct cpuinfo_x86 *c);
583 477
584#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
585
586extern unsigned long boot_option_idle_override;
587extern void enable_sep_cpu(void); 478extern void enable_sep_cpu(void);
588extern int sysenter_setup(void); 479extern int sysenter_setup(void);
589 480
@@ -595,6 +486,4 @@ extern void switch_to_new_gdt(void);
595extern void cpu_init(void); 486extern void cpu_init(void);
596extern void init_gdt(int cpu); 487extern void init_gdt(int cpu);
597 488
598extern int force_mwait;
599
600#endif /* __ASM_I386_PROCESSOR_H */ 489#endif /* __ASM_I386_PROCESSOR_H */