diff options
Diffstat (limited to 'include/asm-i386/processor.h')
| -rw-r--r-- | include/asm-i386/processor.h | 187 |
1 files changed, 98 insertions, 89 deletions
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index 11bf899de8aa..70f3515c3db0 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <asm/percpu.h> | 21 | #include <asm/percpu.h> |
| 22 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
| 23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
| 24 | #include <asm/processor-flags.h> | ||
| 24 | 25 | ||
| 25 | /* flag for disabling the tsc */ | 26 | /* flag for disabling the tsc */ |
| 26 | extern int tsc_disable; | 27 | extern int tsc_disable; |
| @@ -115,7 +116,8 @@ extern char ignore_fpu_irq; | |||
| 115 | 116 | ||
| 116 | void __init cpu_detect(struct cpuinfo_x86 *c); | 117 | void __init cpu_detect(struct cpuinfo_x86 *c); |
| 117 | 118 | ||
| 118 | extern void identify_cpu(struct cpuinfo_x86 *); | 119 | extern void identify_boot_cpu(void); |
| 120 | extern void identify_secondary_cpu(struct cpuinfo_x86 *); | ||
| 119 | extern void print_cpu_info(struct cpuinfo_x86 *); | 121 | extern void print_cpu_info(struct cpuinfo_x86 *); |
| 120 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 122 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
| 121 | extern unsigned short num_cache_leaves; | 123 | extern unsigned short num_cache_leaves; |
| @@ -126,28 +128,7 @@ extern void detect_ht(struct cpuinfo_x86 *c); | |||
| 126 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | 128 | static inline void detect_ht(struct cpuinfo_x86 *c) {} |
| 127 | #endif | 129 | #endif |
| 128 | 130 | ||
| 129 | /* | 131 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
| 130 | * EFLAGS bits | ||
| 131 | */ | ||
| 132 | #define X86_EFLAGS_CF 0x00000001 /* Carry Flag */ | ||
| 133 | #define X86_EFLAGS_PF 0x00000004 /* Parity Flag */ | ||
| 134 | #define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */ | ||
| 135 | #define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */ | ||
| 136 | #define X86_EFLAGS_SF 0x00000080 /* Sign Flag */ | ||
| 137 | #define X86_EFLAGS_TF 0x00000100 /* Trap Flag */ | ||
| 138 | #define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */ | ||
| 139 | #define X86_EFLAGS_DF 0x00000400 /* Direction Flag */ | ||
| 140 | #define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */ | ||
| 141 | #define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */ | ||
| 142 | #define X86_EFLAGS_NT 0x00004000 /* Nested Task */ | ||
| 143 | #define X86_EFLAGS_RF 0x00010000 /* Resume Flag */ | ||
| 144 | #define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */ | ||
| 145 | #define X86_EFLAGS_AC 0x00040000 /* Alignment Check */ | ||
| 146 | #define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */ | ||
| 147 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ | ||
| 148 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ | ||
| 149 | |||
| 150 | static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx, | ||
| 151 | unsigned int *ecx, unsigned int *edx) | 132 | unsigned int *ecx, unsigned int *edx) |
| 152 | { | 133 | { |
| 153 | /* ecx is often an input as well as an output. */ | 134 | /* ecx is often an input as well as an output. */ |
| @@ -162,21 +143,6 @@ static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx, | |||
| 162 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) | 143 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) |
| 163 | 144 | ||
| 164 | /* | 145 | /* |
| 165 | * Intel CPU features in CR4 | ||
| 166 | */ | ||
| 167 | #define X86_CR4_VME 0x0001 /* enable vm86 extensions */ | ||
| 168 | #define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */ | ||
| 169 | #define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */ | ||
| 170 | #define X86_CR4_DE 0x0008 /* enable debugging extensions */ | ||
| 171 | #define X86_CR4_PSE 0x0010 /* enable page size extensions */ | ||
| 172 | #define X86_CR4_PAE 0x0020 /* enable physical address extensions */ | ||
| 173 | #define X86_CR4_MCE 0x0040 /* Machine check enable */ | ||
| 174 | #define X86_CR4_PGE 0x0080 /* enable global pages */ | ||
| 175 | #define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */ | ||
| 176 | #define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */ | ||
| 177 | #define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */ | ||
| 178 | |||
| 179 | /* | ||
| 180 | * Save the cr4 feature set we're using (ie | 146 | * Save the cr4 feature set we're using (ie |
| 181 | * Pentium 4MB enable and PPro Global page | 147 | * Pentium 4MB enable and PPro Global page |
| 182 | * enable), so that any CPU's that boot up | 148 | * enable), so that any CPU's that boot up |
| @@ -203,26 +169,6 @@ static inline void clear_in_cr4 (unsigned long mask) | |||
| 203 | } | 169 | } |
| 204 | 170 | ||
| 205 | /* | 171 | /* |
| 206 | * NSC/Cyrix CPU configuration register indexes | ||
| 207 | */ | ||
| 208 | |||
| 209 | #define CX86_PCR0 0x20 | ||
| 210 | #define CX86_GCR 0xb8 | ||
| 211 | #define CX86_CCR0 0xc0 | ||
| 212 | #define CX86_CCR1 0xc1 | ||
| 213 | #define CX86_CCR2 0xc2 | ||
| 214 | #define CX86_CCR3 0xc3 | ||
| 215 | #define CX86_CCR4 0xe8 | ||
| 216 | #define CX86_CCR5 0xe9 | ||
| 217 | #define CX86_CCR6 0xea | ||
| 218 | #define CX86_CCR7 0xeb | ||
| 219 | #define CX86_PCR1 0xf0 | ||
| 220 | #define CX86_DIR0 0xfe | ||
| 221 | #define CX86_DIR1 0xff | ||
| 222 | #define CX86_ARR_BASE 0xc4 | ||
| 223 | #define CX86_RCR_BASE 0xdc | ||
| 224 | |||
| 225 | /* | ||
| 226 | * NSC/Cyrix CPU indexed register access macros | 172 | * NSC/Cyrix CPU indexed register access macros |
| 227 | */ | 173 | */ |
| 228 | 174 | ||
| @@ -345,7 +291,8 @@ typedef struct { | |||
| 345 | 291 | ||
| 346 | struct thread_struct; | 292 | struct thread_struct; |
| 347 | 293 | ||
| 348 | struct tss_struct { | 294 | /* This is the TSS defined by the hardware. */ |
| 295 | struct i386_hw_tss { | ||
| 349 | unsigned short back_link,__blh; | 296 | unsigned short back_link,__blh; |
| 350 | unsigned long esp0; | 297 | unsigned long esp0; |
| 351 | unsigned short ss0,__ss0h; | 298 | unsigned short ss0,__ss0h; |
| @@ -369,6 +316,11 @@ struct tss_struct { | |||
| 369 | unsigned short gs, __gsh; | 316 | unsigned short gs, __gsh; |
| 370 | unsigned short ldt, __ldth; | 317 | unsigned short ldt, __ldth; |
| 371 | unsigned short trace, io_bitmap_base; | 318 | unsigned short trace, io_bitmap_base; |
| 319 | } __attribute__((packed)); | ||
| 320 | |||
| 321 | struct tss_struct { | ||
| 322 | struct i386_hw_tss x86_tss; | ||
| 323 | |||
| 372 | /* | 324 | /* |
| 373 | * The extra 1 is there because the CPU will access an | 325 | * The extra 1 is there because the CPU will access an |
| 374 | * additional byte beyond the end of the IO permission | 326 | * additional byte beyond the end of the IO permission |
| @@ -421,10 +373,11 @@ struct thread_struct { | |||
| 421 | }; | 373 | }; |
| 422 | 374 | ||
| 423 | #define INIT_THREAD { \ | 375 | #define INIT_THREAD { \ |
| 376 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ | ||
| 424 | .vm86_info = NULL, \ | 377 | .vm86_info = NULL, \ |
| 425 | .sysenter_cs = __KERNEL_CS, \ | 378 | .sysenter_cs = __KERNEL_CS, \ |
| 426 | .io_bitmap_ptr = NULL, \ | 379 | .io_bitmap_ptr = NULL, \ |
| 427 | .fs = __KERNEL_PDA, \ | 380 | .fs = __KERNEL_PERCPU, \ |
| 428 | } | 381 | } |
| 429 | 382 | ||
| 430 | /* | 383 | /* |
| @@ -434,10 +387,12 @@ struct thread_struct { | |||
| 434 | * be within the limit. | 387 | * be within the limit. |
| 435 | */ | 388 | */ |
| 436 | #define INIT_TSS { \ | 389 | #define INIT_TSS { \ |
| 437 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ | 390 | .x86_tss = { \ |
| 438 | .ss0 = __KERNEL_DS, \ | 391 | .esp0 = sizeof(init_stack) + (long)&init_stack, \ |
| 439 | .ss1 = __KERNEL_CS, \ | 392 | .ss0 = __KERNEL_DS, \ |
| 440 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | 393 | .ss1 = __KERNEL_CS, \ |
| 394 | .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ | ||
| 395 | }, \ | ||
| 441 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ | 396 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ |
| 442 | } | 397 | } |
| 443 | 398 | ||
| @@ -544,40 +499,70 @@ static inline void rep_nop(void) | |||
| 544 | 499 | ||
| 545 | #define cpu_relax() rep_nop() | 500 | #define cpu_relax() rep_nop() |
| 546 | 501 | ||
| 547 | #ifdef CONFIG_PARAVIRT | 502 | static inline void native_load_esp0(struct tss_struct *tss, struct thread_struct *thread) |
| 548 | #include <asm/paravirt.h> | ||
| 549 | #else | ||
| 550 | #define paravirt_enabled() 0 | ||
| 551 | #define __cpuid native_cpuid | ||
| 552 | |||
| 553 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
| 554 | { | 503 | { |
| 555 | tss->esp0 = thread->esp0; | 504 | tss->x86_tss.esp0 = thread->esp0; |
| 556 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | 505 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ |
| 557 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | 506 | if (unlikely(tss->x86_tss.ss1 != thread->sysenter_cs)) { |
| 558 | tss->ss1 = thread->sysenter_cs; | 507 | tss->x86_tss.ss1 = thread->sysenter_cs; |
| 559 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | 508 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
| 560 | } | 509 | } |
| 561 | } | 510 | } |
| 562 | 511 | ||
| 563 | /* | ||
| 564 | * These special macros can be used to get or set a debugging register | ||
| 565 | */ | ||
| 566 | #define get_debugreg(var, register) \ | ||
| 567 | __asm__("movl %%db" #register ", %0" \ | ||
| 568 | :"=r" (var)) | ||
| 569 | #define set_debugreg(value, register) \ | ||
| 570 | __asm__("movl %0,%%db" #register \ | ||
| 571 | : /* no output */ \ | ||
| 572 | :"r" (value)) | ||
| 573 | 512 | ||
| 574 | #define set_iopl_mask native_set_iopl_mask | 513 | static inline unsigned long native_get_debugreg(int regno) |
| 575 | #endif /* CONFIG_PARAVIRT */ | 514 | { |
| 515 | unsigned long val = 0; /* Damn you, gcc! */ | ||
| 516 | |||
| 517 | switch (regno) { | ||
| 518 | case 0: | ||
| 519 | asm("movl %%db0, %0" :"=r" (val)); break; | ||
| 520 | case 1: | ||
| 521 | asm("movl %%db1, %0" :"=r" (val)); break; | ||
| 522 | case 2: | ||
| 523 | asm("movl %%db2, %0" :"=r" (val)); break; | ||
| 524 | case 3: | ||
| 525 | asm("movl %%db3, %0" :"=r" (val)); break; | ||
| 526 | case 6: | ||
| 527 | asm("movl %%db6, %0" :"=r" (val)); break; | ||
| 528 | case 7: | ||
| 529 | asm("movl %%db7, %0" :"=r" (val)); break; | ||
| 530 | default: | ||
| 531 | BUG(); | ||
| 532 | } | ||
| 533 | return val; | ||
| 534 | } | ||
| 535 | |||
| 536 | static inline void native_set_debugreg(int regno, unsigned long value) | ||
| 537 | { | ||
| 538 | switch (regno) { | ||
| 539 | case 0: | ||
| 540 | asm("movl %0,%%db0" : /* no output */ :"r" (value)); | ||
| 541 | break; | ||
| 542 | case 1: | ||
| 543 | asm("movl %0,%%db1" : /* no output */ :"r" (value)); | ||
| 544 | break; | ||
| 545 | case 2: | ||
| 546 | asm("movl %0,%%db2" : /* no output */ :"r" (value)); | ||
| 547 | break; | ||
| 548 | case 3: | ||
| 549 | asm("movl %0,%%db3" : /* no output */ :"r" (value)); | ||
| 550 | break; | ||
| 551 | case 6: | ||
| 552 | asm("movl %0,%%db6" : /* no output */ :"r" (value)); | ||
| 553 | break; | ||
| 554 | case 7: | ||
| 555 | asm("movl %0,%%db7" : /* no output */ :"r" (value)); | ||
| 556 | break; | ||
| 557 | default: | ||
| 558 | BUG(); | ||
| 559 | } | ||
| 560 | } | ||
| 576 | 561 | ||
| 577 | /* | 562 | /* |
| 578 | * Set IOPL bits in EFLAGS from given mask | 563 | * Set IOPL bits in EFLAGS from given mask |
| 579 | */ | 564 | */ |
| 580 | static fastcall inline void native_set_iopl_mask(unsigned mask) | 565 | static inline void native_set_iopl_mask(unsigned mask) |
| 581 | { | 566 | { |
| 582 | unsigned int reg; | 567 | unsigned int reg; |
| 583 | __asm__ __volatile__ ("pushfl;" | 568 | __asm__ __volatile__ ("pushfl;" |
| @@ -590,6 +575,28 @@ static fastcall inline void native_set_iopl_mask(unsigned mask) | |||
| 590 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | 575 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); |
| 591 | } | 576 | } |
| 592 | 577 | ||
| 578 | #ifdef CONFIG_PARAVIRT | ||
| 579 | #include <asm/paravirt.h> | ||
| 580 | #else | ||
| 581 | #define paravirt_enabled() 0 | ||
| 582 | #define __cpuid native_cpuid | ||
| 583 | |||
| 584 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
| 585 | { | ||
| 586 | native_load_esp0(tss, thread); | ||
| 587 | } | ||
| 588 | |||
| 589 | /* | ||
| 590 | * These special macros can be used to get or set a debugging register | ||
| 591 | */ | ||
| 592 | #define get_debugreg(var, register) \ | ||
| 593 | (var) = native_get_debugreg(register) | ||
| 594 | #define set_debugreg(value, register) \ | ||
| 595 | native_set_debugreg(register, value) | ||
| 596 | |||
| 597 | #define set_iopl_mask native_set_iopl_mask | ||
| 598 | #endif /* CONFIG_PARAVIRT */ | ||
| 599 | |||
| 593 | /* | 600 | /* |
| 594 | * Generic CPUID function | 601 | * Generic CPUID function |
| 595 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | 602 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx |
| @@ -742,8 +749,10 @@ extern unsigned long boot_option_idle_override; | |||
| 742 | extern void enable_sep_cpu(void); | 749 | extern void enable_sep_cpu(void); |
| 743 | extern int sysenter_setup(void); | 750 | extern int sysenter_setup(void); |
| 744 | 751 | ||
| 745 | extern int init_gdt(int cpu, struct task_struct *idle); | ||
| 746 | extern void cpu_set_gdt(int); | 752 | extern void cpu_set_gdt(int); |
| 747 | extern void secondary_cpu_init(void); | 753 | extern void switch_to_new_gdt(void); |
| 754 | extern void cpu_init(void); | ||
| 755 | |||
| 756 | extern int force_mwait; | ||
| 748 | 757 | ||
| 749 | #endif /* __ASM_I386_PROCESSOR_H */ | 758 | #endif /* __ASM_I386_PROCESSOR_H */ |
