diff options
Diffstat (limited to 'arch/x86/include/asm/processor.h')
-rw-r--r-- | arch/x86/include/asm/processor.h | 45 |
1 files changed, 10 insertions, 35 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index ebaa04a8d3af..b844edc69fe9 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -94,10 +94,6 @@ struct cpuinfo_x86 { | |||
94 | int x86_cache_alignment; /* In bytes */ | 94 | int x86_cache_alignment; /* In bytes */ |
95 | int x86_power; | 95 | int x86_power; |
96 | unsigned long loops_per_jiffy; | 96 | unsigned long loops_per_jiffy; |
97 | #ifdef CONFIG_SMP | ||
98 | /* cpus sharing the last level cache: */ | ||
99 | cpumask_var_t llc_shared_map; | ||
100 | #endif | ||
101 | /* cpuid returned max cores value: */ | 97 | /* cpuid returned max cores value: */ |
102 | u16 x86_max_cores; | 98 | u16 x86_max_cores; |
103 | u16 apicid; | 99 | u16 apicid; |
@@ -110,6 +106,8 @@ struct cpuinfo_x86 { | |||
110 | u16 phys_proc_id; | 106 | u16 phys_proc_id; |
111 | /* Core id: */ | 107 | /* Core id: */ |
112 | u16 cpu_core_id; | 108 | u16 cpu_core_id; |
109 | /* Compute unit id */ | ||
110 | u8 compute_unit_id; | ||
113 | /* Index into per_cpu list: */ | 111 | /* Index into per_cpu list: */ |
114 | u16 cpu_index; | 112 | u16 cpu_index; |
115 | #endif | 113 | #endif |
@@ -139,10 +137,9 @@ extern __u32 cpu_caps_set[NCAPINTS]; | |||
139 | #ifdef CONFIG_SMP | 137 | #ifdef CONFIG_SMP |
140 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | 138 | DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); |
141 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) | 139 | #define cpu_data(cpu) per_cpu(cpu_info, cpu) |
142 | #define current_cpu_data __get_cpu_var(cpu_info) | ||
143 | #else | 140 | #else |
141 | #define cpu_info boot_cpu_data | ||
144 | #define cpu_data(cpu) boot_cpu_data | 142 | #define cpu_data(cpu) boot_cpu_data |
145 | #define current_cpu_data boot_cpu_data | ||
146 | #endif | 143 | #endif |
147 | 144 | ||
148 | extern const struct seq_operations cpuinfo_op; | 145 | extern const struct seq_operations cpuinfo_op; |
@@ -606,7 +603,7 @@ extern unsigned long mmu_cr4_features; | |||
606 | 603 | ||
607 | static inline void set_in_cr4(unsigned long mask) | 604 | static inline void set_in_cr4(unsigned long mask) |
608 | { | 605 | { |
609 | unsigned cr4; | 606 | unsigned long cr4; |
610 | 607 | ||
611 | mmu_cr4_features |= mask; | 608 | mmu_cr4_features |= mask; |
612 | cr4 = read_cr4(); | 609 | cr4 = read_cr4(); |
@@ -616,7 +613,7 @@ static inline void set_in_cr4(unsigned long mask) | |||
616 | 613 | ||
617 | static inline void clear_in_cr4(unsigned long mask) | 614 | static inline void clear_in_cr4(unsigned long mask) |
618 | { | 615 | { |
619 | unsigned cr4; | 616 | unsigned long cr4; |
620 | 617 | ||
621 | mmu_cr4_features &= ~mask; | 618 | mmu_cr4_features &= ~mask; |
622 | cr4 = read_cr4(); | 619 | cr4 = read_cr4(); |
@@ -761,35 +758,13 @@ static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | |||
761 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 758 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
762 | 759 | ||
763 | extern void select_idle_routine(const struct cpuinfo_x86 *c); | 760 | extern void select_idle_routine(const struct cpuinfo_x86 *c); |
764 | extern void init_c1e_mask(void); | 761 | extern void init_amd_e400_c1e_mask(void); |
765 | 762 | ||
766 | extern unsigned long boot_option_idle_override; | 763 | extern unsigned long boot_option_idle_override; |
767 | extern unsigned long idle_halt; | 764 | extern bool amd_e400_c1e_detected; |
768 | extern unsigned long idle_nomwait; | ||
769 | extern bool c1e_detected; | ||
770 | 765 | ||
771 | /* | 766 | enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, |
772 | * on systems with caches, caches must be flashed as the absolute | 767 | IDLE_POLL, IDLE_FORCE_MWAIT}; |
773 | * last instruction before going into a suspended halt. Otherwise, | ||
774 | * dirty data can linger in the cache and become stale on resume, | ||
775 | * leading to strange errors. | ||
776 | * | ||
777 | * perform a variety of operations to guarantee that the compiler | ||
778 | * will not reorder instructions. wbinvd itself is serializing | ||
779 | * so the processor will not reorder. | ||
780 | * | ||
781 | * Systems without cache can just go into halt. | ||
782 | */ | ||
783 | static inline void wbinvd_halt(void) | ||
784 | { | ||
785 | mb(); | ||
786 | /* check for clflush to determine if wbinvd is legal */ | ||
787 | if (cpu_has_clflush) | ||
788 | asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); | ||
789 | else | ||
790 | while (1) | ||
791 | halt(); | ||
792 | } | ||
793 | 768 | ||
794 | extern void enable_sep_cpu(void); | 769 | extern void enable_sep_cpu(void); |
795 | extern int sysenter_setup(void); | 770 | extern int sysenter_setup(void); |
@@ -927,7 +902,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); | |||
927 | /* | 902 | /* |
928 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. | 903 | * The below -8 is to reserve 8 bytes on top of the ring0 stack. |
929 | * This is necessary to guarantee that the entire "struct pt_regs" | 904 | * This is necessary to guarantee that the entire "struct pt_regs" |
930 | * is accessable even if the CPU haven't stored the SS/ESP registers | 905 | * is accessible even if the CPU haven't stored the SS/ESP registers |
931 | * on the stack (interrupt gate does not save these registers | 906 | * on the stack (interrupt gate does not save these registers |
932 | * when switching to the same priv ring). | 907 | * when switching to the same priv ring). |
933 | * Therefore beware: accessing the ss/esp fields of the | 908 | * Therefore beware: accessing the ss/esp fields of the |