diff options
Diffstat (limited to 'include')
57 files changed, 833 insertions, 249 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index cb752ba72466..7440a0dceddb 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -385,6 +385,7 @@ | |||
385 | . = ALIGN(align); \ | 385 | . = ALIGN(align); \ |
386 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 386 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
387 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | 387 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ |
388 | *(.data.percpu.page_aligned) \ | ||
388 | *(.data.percpu) \ | 389 | *(.data.percpu) \ |
389 | *(.data.percpu.shared_aligned) \ | 390 | *(.data.percpu.shared_aligned) \ |
390 | } \ | 391 | } \ |
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h index 65590c9aecd4..d76a0839abe9 100644 --- a/include/asm-x86/apic.h +++ b/include/asm-x86/apic.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/apicdef.h> | 9 | #include <asm/apicdef.h> |
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/system.h> | 11 | #include <asm/system.h> |
12 | #include <asm/cpufeature.h> | ||
13 | #include <asm/msr.h> | ||
12 | 14 | ||
13 | #define ARCH_APICTIMER_STOPS_ON_C3 1 | 15 | #define ARCH_APICTIMER_STOPS_ON_C3 1 |
14 | 16 | ||
@@ -47,8 +49,6 @@ extern int disable_apic; | |||
47 | #ifdef CONFIG_PARAVIRT | 49 | #ifdef CONFIG_PARAVIRT |
48 | #include <asm/paravirt.h> | 50 | #include <asm/paravirt.h> |
49 | #else | 51 | #else |
50 | #define apic_write native_apic_write | ||
51 | #define apic_read native_apic_read | ||
52 | #define setup_boot_clock setup_boot_APIC_clock | 52 | #define setup_boot_clock setup_boot_APIC_clock |
53 | #define setup_secondary_clock setup_secondary_APIC_clock | 53 | #define setup_secondary_clock setup_secondary_APIC_clock |
54 | #endif | 54 | #endif |
@@ -60,7 +60,7 @@ extern u64 xapic_icr_read(void); | |||
60 | extern void xapic_icr_write(u32, u32); | 60 | extern void xapic_icr_write(u32, u32); |
61 | extern int setup_profiling_timer(unsigned int); | 61 | extern int setup_profiling_timer(unsigned int); |
62 | 62 | ||
63 | static inline void native_apic_write(unsigned long reg, u32 v) | 63 | static inline void native_apic_mem_write(u32 reg, u32 v) |
64 | { | 64 | { |
65 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); | 65 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); |
66 | 66 | ||
@@ -69,15 +69,68 @@ static inline void native_apic_write(unsigned long reg, u32 v) | |||
69 | ASM_OUTPUT2("0" (v), "m" (*addr))); | 69 | ASM_OUTPUT2("0" (v), "m" (*addr))); |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline u32 native_apic_read(unsigned long reg) | 72 | static inline u32 native_apic_mem_read(u32 reg) |
73 | { | 73 | { |
74 | return *((volatile u32 *)(APIC_BASE + reg)); | 74 | return *((volatile u32 *)(APIC_BASE + reg)); |
75 | } | 75 | } |
76 | 76 | ||
77 | extern void apic_wait_icr_idle(void); | 77 | static inline void native_apic_msr_write(u32 reg, u32 v) |
78 | extern u32 safe_apic_wait_icr_idle(void); | 78 | { |
79 | if (reg == APIC_DFR || reg == APIC_ID || reg == APIC_LDR || | ||
80 | reg == APIC_LVR) | ||
81 | return; | ||
82 | |||
83 | wrmsr(APIC_BASE_MSR + (reg >> 4), v, 0); | ||
84 | } | ||
85 | |||
86 | static inline u32 native_apic_msr_read(u32 reg) | ||
87 | { | ||
88 | u32 low, high; | ||
89 | |||
90 | if (reg == APIC_DFR) | ||
91 | return -1; | ||
92 | |||
93 | rdmsr(APIC_BASE_MSR + (reg >> 4), low, high); | ||
94 | return low; | ||
95 | } | ||
96 | |||
97 | #ifndef CONFIG_X86_32 | ||
98 | extern int x2apic, x2apic_preenabled; | ||
99 | extern void check_x2apic(void); | ||
100 | extern void enable_x2apic(void); | ||
101 | extern void enable_IR_x2apic(void); | ||
102 | extern void x2apic_icr_write(u32 low, u32 id); | ||
103 | #endif | ||
104 | |||
105 | struct apic_ops { | ||
106 | u32 (*read)(u32 reg); | ||
107 | void (*write)(u32 reg, u32 v); | ||
108 | u64 (*icr_read)(void); | ||
109 | void (*icr_write)(u32 low, u32 high); | ||
110 | void (*wait_icr_idle)(void); | ||
111 | u32 (*safe_wait_icr_idle)(void); | ||
112 | }; | ||
113 | |||
114 | extern struct apic_ops *apic_ops; | ||
115 | |||
116 | #define apic_read (apic_ops->read) | ||
117 | #define apic_write (apic_ops->write) | ||
118 | #define apic_icr_read (apic_ops->icr_read) | ||
119 | #define apic_icr_write (apic_ops->icr_write) | ||
120 | #define apic_wait_icr_idle (apic_ops->wait_icr_idle) | ||
121 | #define safe_apic_wait_icr_idle (apic_ops->safe_wait_icr_idle) | ||
122 | |||
79 | extern int get_physical_broadcast(void); | 123 | extern int get_physical_broadcast(void); |
80 | 124 | ||
125 | #ifdef CONFIG_X86_64 | ||
126 | static inline void ack_x2APIC_irq(void) | ||
127 | { | ||
128 | /* Docs say use 0 for future compatibility */ | ||
129 | native_apic_msr_write(APIC_EOI, 0); | ||
130 | } | ||
131 | #endif | ||
132 | |||
133 | |||
81 | static inline void ack_APIC_irq(void) | 134 | static inline void ack_APIC_irq(void) |
82 | { | 135 | { |
83 | /* | 136 | /* |
diff --git a/include/asm-x86/apicdef.h b/include/asm-x86/apicdef.h index c40687da20fc..b922c85ac91d 100644 --- a/include/asm-x86/apicdef.h +++ b/include/asm-x86/apicdef.h | |||
@@ -105,6 +105,7 @@ | |||
105 | #define APIC_TMICT 0x380 | 105 | #define APIC_TMICT 0x380 |
106 | #define APIC_TMCCT 0x390 | 106 | #define APIC_TMCCT 0x390 |
107 | #define APIC_TDCR 0x3E0 | 107 | #define APIC_TDCR 0x3E0 |
108 | #define APIC_SELF_IPI 0x3F0 | ||
108 | #define APIC_TDR_DIV_TMBASE (1 << 2) | 109 | #define APIC_TDR_DIV_TMBASE (1 << 2) |
109 | #define APIC_TDR_DIV_1 0xB | 110 | #define APIC_TDR_DIV_1 0xB |
110 | #define APIC_TDR_DIV_2 0x0 | 111 | #define APIC_TDR_DIV_2 0x0 |
@@ -128,6 +129,8 @@ | |||
128 | #define APIC_EILVT3 0x530 | 129 | #define APIC_EILVT3 0x530 |
129 | 130 | ||
130 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) | 131 | #define APIC_BASE (fix_to_virt(FIX_APIC_BASE)) |
132 | #define APIC_BASE_MSR 0x800 | ||
133 | #define X2APIC_ENABLE (1UL << 10) | ||
131 | 134 | ||
132 | #ifdef CONFIG_X86_32 | 135 | #ifdef CONFIG_X86_32 |
133 | # define MAX_IO_APICS 64 | 136 | # define MAX_IO_APICS 64 |
diff --git a/include/asm-x86/arch_hooks.h b/include/asm-x86/arch_hooks.h index 72adc3a109cc..de4596b24c23 100644 --- a/include/asm-x86/arch_hooks.h +++ b/include/asm-x86/arch_hooks.h | |||
@@ -12,8 +12,6 @@ | |||
12 | /* these aren't arch hooks, they are generic routines | 12 | /* these aren't arch hooks, they are generic routines |
13 | * that can be used by the hooks */ | 13 | * that can be used by the hooks */ |
14 | extern void init_ISA_irqs(void); | 14 | extern void init_ISA_irqs(void); |
15 | extern void apic_intr_init(void); | ||
16 | extern void smp_intr_init(void); | ||
17 | extern irqreturn_t timer_interrupt(int irq, void *dev_id); | 15 | extern irqreturn_t timer_interrupt(int irq, void *dev_id); |
18 | 16 | ||
19 | /* these are the defined hooks */ | 17 | /* these are the defined hooks */ |
diff --git a/include/asm-x86/mach-bigsmp/mach_apic.h b/include/asm-x86/bigsmp/apic.h index 05362d44a3ee..0a9cd7c5ca0c 100644 --- a/include/asm-x86/mach-bigsmp/mach_apic.h +++ b/include/asm-x86/bigsmp/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_BIGSMP__MACH_APIC_H | 1 | #ifndef __ASM_MACH_APIC_H |
2 | #define ASM_X86__MACH_BIGSMP__MACH_APIC_H | 2 | #define __ASM_MACH_APIC_H |
3 | 3 | ||
4 | #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) | 4 | #define xapic_phys_to_log_apicid(cpu) (per_cpu(x86_bios_cpu_apicid, cpu)) |
5 | #define esr_disable (1) | 5 | #define esr_disable (1) |
@@ -11,7 +11,7 @@ static inline int apic_id_registered(void) | |||
11 | 11 | ||
12 | /* Round robin the irqs amoung the online cpus */ | 12 | /* Round robin the irqs amoung the online cpus */ |
13 | static inline cpumask_t target_cpus(void) | 13 | static inline cpumask_t target_cpus(void) |
14 | { | 14 | { |
15 | static unsigned long cpu = NR_CPUS; | 15 | static unsigned long cpu = NR_CPUS; |
16 | do { | 16 | do { |
17 | if (cpu >= NR_CPUS) | 17 | if (cpu >= NR_CPUS) |
@@ -23,7 +23,7 @@ static inline cpumask_t target_cpus(void) | |||
23 | } | 23 | } |
24 | 24 | ||
25 | #undef APIC_DEST_LOGICAL | 25 | #undef APIC_DEST_LOGICAL |
26 | #define APIC_DEST_LOGICAL 0 | 26 | #define APIC_DEST_LOGICAL 0 |
27 | #define TARGET_CPUS (target_cpus()) | 27 | #define TARGET_CPUS (target_cpus()) |
28 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 28 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
29 | #define INT_DELIVERY_MODE (dest_Fixed) | 29 | #define INT_DELIVERY_MODE (dest_Fixed) |
@@ -141,4 +141,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |||
141 | return cpuid_apic >> index_msb; | 141 | return cpuid_apic >> index_msb; |
142 | } | 142 | } |
143 | 143 | ||
144 | #endif /* ASM_X86__MACH_BIGSMP__MACH_APIC_H */ | 144 | #endif /* __ASM_MACH_APIC_H */ |
diff --git a/include/asm-x86/bigsmp/apicdef.h b/include/asm-x86/bigsmp/apicdef.h new file mode 100644 index 000000000000..392c3f5ef2fe --- /dev/null +++ b/include/asm-x86/bigsmp/apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_MACH_APICDEF_H | ||
2 | #define __ASM_MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-x86/mach-summit/mach_ipi.h b/include/asm-x86/bigsmp/ipi.h index a3b31c528d90..9404c535b7ec 100644 --- a/include/asm-x86/mach-summit/mach_ipi.h +++ b/include/asm-x86/bigsmp/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_SUMMIT__MACH_IPI_H | 1 | #ifndef __ASM_MACH_IPI_H |
2 | #define ASM_X86__MACH_SUMMIT__MACH_IPI_H | 2 | #define __ASM_MACH_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); |
5 | 5 | ||
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector) | |||
22 | send_IPI_mask(cpu_online_map, vector); | 22 | send_IPI_mask(cpu_online_map, vector); |
23 | } | 23 | } |
24 | 24 | ||
25 | #endif /* ASM_X86__MACH_SUMMIT__MACH_IPI_H */ | 25 | #endif /* __ASM_MACH_IPI_H */ |
diff --git a/include/asm-x86/bugs.h b/include/asm-x86/bugs.h index 4761c461d23a..dc604985f2ad 100644 --- a/include/asm-x86/bugs.h +++ b/include/asm-x86/bugs.h | |||
@@ -2,6 +2,11 @@ | |||
2 | #define ASM_X86__BUGS_H | 2 | #define ASM_X86__BUGS_H |
3 | 3 | ||
4 | extern void check_bugs(void); | 4 | extern void check_bugs(void); |
5 | |||
6 | #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) | ||
5 | int ppro_with_ram_bug(void); | 7 | int ppro_with_ram_bug(void); |
8 | #else | ||
9 | static inline int ppro_with_ram_bug(void) { return 0; } | ||
10 | #endif | ||
6 | 11 | ||
7 | #endif /* ASM_X86__BUGS_H */ | 12 | #endif /* ASM_X86__BUGS_H */ |
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h index 250fa0cb144b..adfeae6586e1 100644 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h | |||
@@ -6,7 +6,13 @@ | |||
6 | 6 | ||
7 | #include <asm/required-features.h> | 7 | #include <asm/required-features.h> |
8 | 8 | ||
9 | #define NCAPINTS 8 /* N 32-bit words worth of info */ | 9 | #define NCAPINTS 9 /* N 32-bit words worth of info */ |
10 | |||
11 | /* | ||
12 | * Note: If the comment begins with a quoted string, that string is used | ||
13 | * in /proc/cpuinfo instead of the macro name. If the string is "", | ||
14 | * this feature bit is not displayed in /proc/cpuinfo at all. | ||
15 | */ | ||
10 | 16 | ||
11 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ | 17 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
12 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | 18 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ |
@@ -14,7 +20,7 @@ | |||
14 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | 20 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ |
15 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | 21 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ |
16 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | 22 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ |
17 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */ | 23 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ |
18 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | 24 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ |
19 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ | 25 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */ |
20 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | 26 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ |
@@ -23,22 +29,23 @@ | |||
23 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | 29 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ |
24 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | 30 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ |
25 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | 31 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ |
26 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */ | 32 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ |
33 | /* (plus FCMOVcc, FCOMI with FPU) */ | ||
27 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | 34 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ |
28 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | 35 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
29 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | 36 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
30 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | 37 | #define X86_FEATURE_CLFLSH (0*32+19) /* "clflush" CLFLUSH instruction */ |
31 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ | 38 | #define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ |
32 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | 39 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
33 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 40 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
34 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | 41 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
35 | /* of FPU context), and CR4.OSFXSR available */ | 42 | #define X86_FEATURE_XMM (0*32+25) /* "sse" */ |
36 | #define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */ | 43 | #define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ |
37 | #define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */ | 44 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ |
38 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */ | ||
39 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | 45 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ |
40 | #define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */ | 46 | #define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ |
41 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | 47 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ |
48 | #define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ | ||
42 | 49 | ||
43 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | 50 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
44 | /* Don't duplicate feature flags which are redundant with Intel! */ | 51 | /* Don't duplicate feature flags which are redundant with Intel! */ |
@@ -46,7 +53,8 @@ | |||
46 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ | 53 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ |
47 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ | 54 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ |
48 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | 55 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ |
49 | #define X86_FEATURE_GBPAGES (1*32+26) /* GB pages */ | 56 | #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
57 | #define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ | ||
50 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ | 58 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ |
51 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | 59 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ |
52 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | 60 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ |
@@ -64,53 +72,79 @@ | |||
64 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | 72 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
65 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | 73 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
66 | /* cpu types for specific tunings: */ | 74 | /* cpu types for specific tunings: */ |
67 | #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ | 75 | #define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ |
68 | #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ | 76 | #define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ |
69 | #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ | 77 | #define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ |
70 | #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ | 78 | #define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ |
71 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | 79 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ |
72 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 80 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
73 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | 81 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ |
74 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 82 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | ||
75 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | 84 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ |
76 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | 85 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ |
77 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ | 86 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ |
78 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ | 87 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ |
79 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ | 88 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ |
80 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ | 89 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ |
81 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ | 90 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ |
82 | #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ | 91 | #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ |
83 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | 92 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ |
84 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ | 93 | #define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */ |
94 | #define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */ | ||
85 | 95 | ||
86 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 96 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
87 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 97 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ |
88 | #define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */ | 98 | #define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ |
89 | #define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */ | 99 | #define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ |
100 | #define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ | ||
101 | #define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ | ||
102 | #define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ | ||
103 | #define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ | ||
90 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | 104 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ |
91 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | 105 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ |
106 | #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ | ||
92 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | 107 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ |
108 | #define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ | ||
93 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | 109 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ |
94 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | 110 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ |
111 | #define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ | ||
95 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ | 112 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ |
96 | #define X86_FEATURE_XMM4_2 (4*32+20) /* Streaming SIMD Extensions-4.2 */ | 113 | #define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ |
114 | #define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ | ||
115 | #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ | ||
116 | #define X86_FEATURE_AES (4*32+25) /* AES instructions */ | ||
117 | #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ | ||
118 | #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ | ||
119 | #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ | ||
97 | 120 | ||
98 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | 121 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
99 | #define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ | 122 | #define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ |
100 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ | 123 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ |
101 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ | 124 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
102 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ | 125 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
103 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ | 126 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ |
104 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ | 127 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ |
105 | #define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */ | 128 | #define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ |
106 | #define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */ | 129 | #define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ |
107 | #define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */ | 130 | #define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ |
108 | #define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */ | 131 | #define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ |
109 | 132 | ||
110 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | 133 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
111 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ | 134 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ |
112 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ | 135 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ |
113 | #define X86_FEATURE_IBS (6*32+ 10) /* Instruction Based Sampling */ | 136 | #define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ |
137 | #define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ | ||
138 | #define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ | ||
139 | #define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ | ||
140 | #define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ | ||
141 | #define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ | ||
142 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ | ||
143 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ | ||
144 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ | ||
145 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ | ||
146 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | ||
147 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | ||
114 | 148 | ||
115 | /* | 149 | /* |
116 | * Auxiliary flags: Linux defined - For features scattered in various | 150 | * Auxiliary flags: Linux defined - For features scattered in various |
@@ -118,6 +152,13 @@ | |||
118 | */ | 152 | */ |
119 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ | 153 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ |
120 | 154 | ||
155 | /* Virtualization flags: Linux defined */ | ||
156 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | ||
157 | #define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ | ||
158 | #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ | ||
159 | #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ | ||
160 | #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ | ||
161 | |||
121 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 162 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
122 | 163 | ||
123 | #include <linux/bitops.h> | 164 | #include <linux/bitops.h> |
@@ -151,7 +192,7 @@ extern const char * const x86_power_flags[32]; | |||
151 | } while (0) | 192 | } while (0) |
152 | #define setup_force_cpu_cap(bit) do { \ | 193 | #define setup_force_cpu_cap(bit) do { \ |
153 | set_cpu_cap(&boot_cpu_data, bit); \ | 194 | set_cpu_cap(&boot_cpu_data, bit); \ |
154 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ | 195 | clear_bit(bit, (unsigned long *)cleared_cpu_caps); \ |
155 | } while (0) | 196 | } while (0) |
156 | 197 | ||
157 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) | 198 | #define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU) |
@@ -192,7 +233,10 @@ extern const char * const x86_power_flags[32]; | |||
192 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) | 233 | #define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES) |
193 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) | 234 | #define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON) |
194 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) | 235 | #define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT) |
236 | #define cpu_has_xmm4_1 boot_cpu_has(X86_FEATURE_XMM4_1) | ||
195 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) | 237 | #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) |
238 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) | ||
239 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) | ||
196 | 240 | ||
197 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) | 241 | #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) |
198 | # define cpu_has_invlpg 1 | 242 | # define cpu_has_invlpg 1 |
diff --git a/include/asm-x86/e820.h b/include/asm-x86/e820.h index f52daf176bcb..5abbdec06bd2 100644 --- a/include/asm-x86/e820.h +++ b/include/asm-x86/e820.h | |||
@@ -43,6 +43,7 @@ | |||
43 | #define E820_RESERVED 2 | 43 | #define E820_RESERVED 2 |
44 | #define E820_ACPI 3 | 44 | #define E820_ACPI 3 |
45 | #define E820_NVS 4 | 45 | #define E820_NVS 4 |
46 | #define E820_UNUSABLE 5 | ||
46 | 47 | ||
47 | /* reserved RAM used by kernel itself */ | 48 | /* reserved RAM used by kernel itself */ |
48 | #define E820_RESERVED_KERN 128 | 49 | #define E820_RESERVED_KERN 128 |
@@ -121,6 +122,7 @@ extern void e820_register_active_regions(int nid, unsigned long start_pfn, | |||
121 | extern u64 e820_hole_size(u64 start, u64 end); | 122 | extern u64 e820_hole_size(u64 start, u64 end); |
122 | extern void finish_e820_parsing(void); | 123 | extern void finish_e820_parsing(void); |
123 | extern void e820_reserve_resources(void); | 124 | extern void e820_reserve_resources(void); |
125 | extern void e820_reserve_resources_late(void); | ||
124 | extern void setup_memory_map(void); | 126 | extern void setup_memory_map(void); |
125 | extern char *default_machine_specific_memory_setup(void); | 127 | extern char *default_machine_specific_memory_setup(void); |
126 | extern char *machine_specific_memory_setup(void); | 128 | extern char *machine_specific_memory_setup(void); |
diff --git a/include/asm-x86/mach-es7000/mach_apic.h b/include/asm-x86/es7000/apic.h index c1f6f682d619..bd2c44d1f7ac 100644 --- a/include/asm-x86/mach-es7000/mach_apic.h +++ b/include/asm-x86/es7000/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_ES7000__MACH_APIC_H | 1 | #ifndef __ASM_ES7000_APIC_H |
2 | #define ASM_X86__MACH_ES7000__MACH_APIC_H | 2 | #define __ASM_ES7000_APIC_H |
3 | 3 | ||
4 | #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) | 4 | #define xapic_phys_to_log_apicid(cpu) per_cpu(x86_bios_cpu_apicid, cpu) |
5 | #define esr_disable (1) | 5 | #define esr_disable (1) |
@@ -10,7 +10,7 @@ static inline int apic_id_registered(void) | |||
10 | } | 10 | } |
11 | 11 | ||
12 | static inline cpumask_t target_cpus(void) | 12 | static inline cpumask_t target_cpus(void) |
13 | { | 13 | { |
14 | #if defined CONFIG_ES7000_CLUSTERED_APIC | 14 | #if defined CONFIG_ES7000_CLUSTERED_APIC |
15 | return CPU_MASK_ALL; | 15 | return CPU_MASK_ALL; |
16 | #else | 16 | #else |
@@ -23,24 +23,24 @@ static inline cpumask_t target_cpus(void) | |||
23 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) | 23 | #define APIC_DFR_VALUE (APIC_DFR_CLUSTER) |
24 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 24 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
25 | #define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ | 25 | #define INT_DEST_MODE (1) /* logical delivery broadcast to all procs */ |
26 | #define NO_BALANCE_IRQ (1) | 26 | #define NO_BALANCE_IRQ (1) |
27 | #undef WAKE_SECONDARY_VIA_INIT | 27 | #undef WAKE_SECONDARY_VIA_INIT |
28 | #define WAKE_SECONDARY_VIA_MIP | 28 | #define WAKE_SECONDARY_VIA_MIP |
29 | #else | 29 | #else |
30 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) | 30 | #define APIC_DFR_VALUE (APIC_DFR_FLAT) |
31 | #define INT_DELIVERY_MODE (dest_Fixed) | 31 | #define INT_DELIVERY_MODE (dest_Fixed) |
32 | #define INT_DEST_MODE (0) /* phys delivery to target procs */ | 32 | #define INT_DEST_MODE (0) /* phys delivery to target procs */ |
33 | #define NO_BALANCE_IRQ (0) | 33 | #define NO_BALANCE_IRQ (0) |
34 | #undef APIC_DEST_LOGICAL | 34 | #undef APIC_DEST_LOGICAL |
35 | #define APIC_DEST_LOGICAL 0x0 | 35 | #define APIC_DEST_LOGICAL 0x0 |
36 | #define WAKE_SECONDARY_VIA_INIT | 36 | #define WAKE_SECONDARY_VIA_INIT |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | 39 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) |
40 | { | 40 | { |
41 | return 0; | 41 | return 0; |
42 | } | 42 | } |
43 | static inline unsigned long check_apicid_present(int bit) | 43 | static inline unsigned long check_apicid_present(int bit) |
44 | { | 44 | { |
45 | return physid_isset(bit, phys_cpu_present_map); | 45 | return physid_isset(bit, phys_cpu_present_map); |
46 | } | 46 | } |
@@ -80,7 +80,7 @@ static inline void setup_apic_routing(void) | |||
80 | { | 80 | { |
81 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); | 81 | int apic = per_cpu(x86_bios_cpu_apicid, smp_processor_id()); |
82 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", | 82 | printk("Enabling APIC mode: %s. Using %d I/O APICs, target cpus %lx\n", |
83 | (apic_version[apic] == 0x14) ? | 83 | (apic_version[apic] == 0x14) ? |
84 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); | 84 | "Physical Cluster" : "Logical Cluster", nr_ioapics, cpus_addr(TARGET_CPUS)[0]); |
85 | } | 85 | } |
86 | 86 | ||
@@ -141,7 +141,7 @@ static inline void setup_portio_remap(void) | |||
141 | extern unsigned int boot_cpu_physical_apicid; | 141 | extern unsigned int boot_cpu_physical_apicid; |
142 | static inline int check_phys_apicid_present(int cpu_physical_apicid) | 142 | static inline int check_phys_apicid_present(int cpu_physical_apicid) |
143 | { | 143 | { |
144 | boot_cpu_physical_apicid = GET_APIC_ID(read_apic_id()); | 144 | boot_cpu_physical_apicid = read_apic_id(); |
145 | return (1); | 145 | return (1); |
146 | } | 146 | } |
147 | 147 | ||
@@ -150,7 +150,7 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
150 | int num_bits_set; | 150 | int num_bits_set; |
151 | int cpus_found = 0; | 151 | int cpus_found = 0; |
152 | int cpu; | 152 | int cpu; |
153 | int apicid; | 153 | int apicid; |
154 | 154 | ||
155 | num_bits_set = cpus_weight(cpumask); | 155 | num_bits_set = cpus_weight(cpumask); |
156 | /* Return id to all */ | 156 | /* Return id to all */ |
@@ -160,16 +160,16 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
160 | #else | 160 | #else |
161 | return cpu_to_logical_apicid(0); | 161 | return cpu_to_logical_apicid(0); |
162 | #endif | 162 | #endif |
163 | /* | 163 | /* |
164 | * The cpus in the mask must all be on the apic cluster. If are not | 164 | * The cpus in the mask must all be on the apic cluster. If are not |
165 | * on the same apicid cluster return default value of TARGET_CPUS. | 165 | * on the same apicid cluster return default value of TARGET_CPUS. |
166 | */ | 166 | */ |
167 | cpu = first_cpu(cpumask); | 167 | cpu = first_cpu(cpumask); |
168 | apicid = cpu_to_logical_apicid(cpu); | 168 | apicid = cpu_to_logical_apicid(cpu); |
169 | while (cpus_found < num_bits_set) { | 169 | while (cpus_found < num_bits_set) { |
170 | if (cpu_isset(cpu, cpumask)) { | 170 | if (cpu_isset(cpu, cpumask)) { |
171 | int new_apicid = cpu_to_logical_apicid(cpu); | 171 | int new_apicid = cpu_to_logical_apicid(cpu); |
172 | if (apicid_cluster(apicid) != | 172 | if (apicid_cluster(apicid) != |
173 | apicid_cluster(new_apicid)){ | 173 | apicid_cluster(new_apicid)){ |
174 | printk ("%s: Not a valid mask!\n",__FUNCTION__); | 174 | printk ("%s: Not a valid mask!\n",__FUNCTION__); |
175 | #if defined CONFIG_ES7000_CLUSTERED_APIC | 175 | #if defined CONFIG_ES7000_CLUSTERED_APIC |
@@ -191,4 +191,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |||
191 | return cpuid_apic >> index_msb; | 191 | return cpuid_apic >> index_msb; |
192 | } | 192 | } |
193 | 193 | ||
194 | #endif /* ASM_X86__MACH_ES7000__MACH_APIC_H */ | 194 | #endif /* __ASM_ES7000_APIC_H */ |
diff --git a/include/asm-x86/es7000/apicdef.h b/include/asm-x86/es7000/apicdef.h new file mode 100644 index 000000000000..8b234a3cb851 --- /dev/null +++ b/include/asm-x86/es7000/apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_ES7000_APICDEF_H | ||
2 | #define __ASM_ES7000_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-x86/mach-es7000/mach_ipi.h b/include/asm-x86/es7000/ipi.h index 3a21240e03dc..632a955fcc0a 100644 --- a/include/asm-x86/mach-es7000/mach_ipi.h +++ b/include/asm-x86/es7000/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_ES7000__MACH_IPI_H | 1 | #ifndef __ASM_ES7000_IPI_H |
2 | #define ASM_X86__MACH_ES7000__MACH_IPI_H | 2 | #define __ASM_ES7000_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); |
5 | 5 | ||
@@ -21,4 +21,4 @@ static inline void send_IPI_all(int vector) | |||
21 | send_IPI_mask(cpu_online_map, vector); | 21 | send_IPI_mask(cpu_online_map, vector); |
22 | } | 22 | } |
23 | 23 | ||
24 | #endif /* ASM_X86__MACH_ES7000__MACH_IPI_H */ | 24 | #endif /* __ASM_ES7000_IPI_H */ |
diff --git a/include/asm-x86/mach-es7000/mach_mpparse.h b/include/asm-x86/es7000/mpparse.h index befde24705b7..7b5c889d8e7d 100644 --- a/include/asm-x86/mach-es7000/mach_mpparse.h +++ b/include/asm-x86/es7000/mpparse.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_ES7000__MACH_MPPARSE_H | 1 | #ifndef __ASM_ES7000_MPPARSE_H |
2 | #define ASM_X86__MACH_ES7000__MACH_MPPARSE_H | 2 | #define __ASM_ES7000_MPPARSE_H |
3 | 3 | ||
4 | #include <linux/acpi.h> | 4 | #include <linux/acpi.h> |
5 | 5 | ||
@@ -26,4 +26,4 @@ static inline int es7000_check_dsdt(void) | |||
26 | } | 26 | } |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #endif /* ASM_X86__MACH_ES7000__MACH_MPPARSE_H */ | 29 | #endif /* __ASM_MACH_MPPARSE_H */ |
diff --git a/include/asm-x86/mach-es7000/mach_wakecpu.h b/include/asm-x86/es7000/wakecpu.h index 97c776ce13f2..3ffc5a7bf667 100644 --- a/include/asm-x86/mach-es7000/mach_wakecpu.h +++ b/include/asm-x86/es7000/wakecpu.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef ASM_X86__MACH_ES7000__MACH_WAKECPU_H | 1 | #ifndef __ASM_ES7000_WAKECPU_H |
2 | #define ASM_X86__MACH_ES7000__MACH_WAKECPU_H | 2 | #define __ASM_ES7000_WAKECPU_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * This file copes with machines that wakeup secondary CPUs by the | 5 | * This file copes with machines that wakeup secondary CPUs by the |
6 | * INIT, INIT, STARTUP sequence. | 6 | * INIT, INIT, STARTUP sequence. |
7 | */ | 7 | */ |
@@ -56,4 +56,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
56 | #define inquire_remote_apic(apicid) {} | 56 | #define inquire_remote_apic(apicid) {} |
57 | #endif | 57 | #endif |
58 | 58 | ||
59 | #endif /* ASM_X86__MACH_ES7000__MACH_WAKECPU_H */ | 59 | #endif /* __ASM_MACH_WAKECPU_H */ |
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index 25097a8cc5ef..ed6a4886c082 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | struct genapic { | 15 | struct genapic { |
16 | char *name; | 16 | char *name; |
17 | int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id); | ||
17 | u32 int_delivery_mode; | 18 | u32 int_delivery_mode; |
18 | u32 int_dest_mode; | 19 | u32 int_dest_mode; |
19 | int (*apic_id_registered)(void); | 20 | int (*apic_id_registered)(void); |
@@ -24,17 +25,24 @@ struct genapic { | |||
24 | void (*send_IPI_mask)(cpumask_t mask, int vector); | 25 | void (*send_IPI_mask)(cpumask_t mask, int vector); |
25 | void (*send_IPI_allbutself)(int vector); | 26 | void (*send_IPI_allbutself)(int vector); |
26 | void (*send_IPI_all)(int vector); | 27 | void (*send_IPI_all)(int vector); |
28 | void (*send_IPI_self)(int vector); | ||
27 | /* */ | 29 | /* */ |
28 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); | 30 | unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask); |
29 | unsigned int (*phys_pkg_id)(int index_msb); | 31 | unsigned int (*phys_pkg_id)(int index_msb); |
32 | unsigned int (*get_apic_id)(unsigned long x); | ||
33 | unsigned long (*set_apic_id)(unsigned int id); | ||
34 | unsigned long apic_id_mask; | ||
30 | }; | 35 | }; |
31 | 36 | ||
32 | extern struct genapic *genapic; | 37 | extern struct genapic *genapic; |
33 | 38 | ||
34 | extern struct genapic apic_flat; | 39 | extern struct genapic apic_flat; |
35 | extern struct genapic apic_physflat; | 40 | extern struct genapic apic_physflat; |
41 | extern struct genapic apic_x2apic_cluster; | ||
42 | extern struct genapic apic_x2apic_phys; | ||
36 | extern int acpi_madt_oem_check(char *, char *); | 43 | extern int acpi_madt_oem_check(char *, char *); |
37 | 44 | ||
45 | extern void apic_send_IPI_self(int vector); | ||
38 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | 46 | enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; |
39 | extern enum uv_system_type get_uv_system_type(void); | 47 | extern enum uv_system_type get_uv_system_type(void); |
40 | extern int is_uv_system(void); | 48 | extern int is_uv_system(void); |
diff --git a/include/asm-x86/hw_irq.h b/include/asm-x86/hw_irq.h index 65997b15d56a..50f6e0316b50 100644 --- a/include/asm-x86/hw_irq.h +++ b/include/asm-x86/hw_irq.h | |||
@@ -64,7 +64,6 @@ extern unsigned long io_apic_irqs; | |||
64 | extern void init_VISWS_APIC_irqs(void); | 64 | extern void init_VISWS_APIC_irqs(void); |
65 | extern void setup_IO_APIC(void); | 65 | extern void setup_IO_APIC(void); |
66 | extern void disable_IO_APIC(void); | 66 | extern void disable_IO_APIC(void); |
67 | extern void print_IO_APIC(void); | ||
68 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); | 67 | extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn); |
69 | extern void setup_ioapic_dest(void); | 68 | extern void setup_ioapic_dest(void); |
70 | 69 | ||
@@ -73,7 +72,9 @@ extern void enable_IO_APIC(void); | |||
73 | #endif | 72 | #endif |
74 | 73 | ||
75 | /* IPI functions */ | 74 | /* IPI functions */ |
75 | #ifdef CONFIG_X86_32 | ||
76 | extern void send_IPI_self(int vector); | 76 | extern void send_IPI_self(int vector); |
77 | #endif | ||
77 | extern void send_IPI(int dest, int vector); | 78 | extern void send_IPI(int dest, int vector); |
78 | 79 | ||
79 | /* Statistics */ | 80 | /* Statistics */ |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 1ecdc3ed96e4..9ba862a4eac0 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -19,7 +19,9 @@ | |||
19 | #include <asm/sigcontext.h> | 19 | #include <asm/sigcontext.h> |
20 | #include <asm/user.h> | 20 | #include <asm/user.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/xsave.h> | ||
22 | 23 | ||
24 | extern unsigned int sig_xstate_size; | ||
23 | extern void fpu_init(void); | 25 | extern void fpu_init(void); |
24 | extern void mxcsr_feature_mask_init(void); | 26 | extern void mxcsr_feature_mask_init(void); |
25 | extern int init_fpu(struct task_struct *child); | 27 | extern int init_fpu(struct task_struct *child); |
@@ -31,12 +33,18 @@ extern user_regset_active_fn fpregs_active, xfpregs_active; | |||
31 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; | 33 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get; |
32 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; | 34 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set; |
33 | 35 | ||
36 | extern struct _fpx_sw_bytes fx_sw_reserved; | ||
34 | #ifdef CONFIG_IA32_EMULATION | 37 | #ifdef CONFIG_IA32_EMULATION |
38 | extern unsigned int sig_xstate_ia32_size; | ||
39 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; | ||
35 | struct _fpstate_ia32; | 40 | struct _fpstate_ia32; |
36 | extern int save_i387_ia32(struct _fpstate_ia32 __user *buf); | 41 | struct _xstate_ia32; |
37 | extern int restore_i387_ia32(struct _fpstate_ia32 __user *buf); | 42 | extern int save_i387_xstate_ia32(void __user *buf); |
43 | extern int restore_i387_xstate_ia32(void __user *buf); | ||
38 | #endif | 44 | #endif |
39 | 45 | ||
46 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | ||
47 | |||
40 | #ifdef CONFIG_X86_64 | 48 | #ifdef CONFIG_X86_64 |
41 | 49 | ||
42 | /* Ignore delayed exceptions from user space */ | 50 | /* Ignore delayed exceptions from user space */ |
@@ -47,7 +55,7 @@ static inline void tolerant_fwait(void) | |||
47 | _ASM_EXTABLE(1b, 2b)); | 55 | _ASM_EXTABLE(1b, 2b)); |
48 | } | 56 | } |
49 | 57 | ||
50 | static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | 58 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
51 | { | 59 | { |
52 | int err; | 60 | int err; |
53 | 61 | ||
@@ -67,15 +75,31 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
67 | return err; | 75 | return err; |
68 | } | 76 | } |
69 | 77 | ||
70 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ | 78 | static inline int restore_fpu_checking(struct task_struct *tsk) |
79 | { | ||
80 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
81 | return xrstor_checking(&tsk->thread.xstate->xsave); | ||
82 | else | ||
83 | return fxrstor_checking(&tsk->thread.xstate->fxsave); | ||
84 | } | ||
71 | 85 | ||
72 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception | 86 | /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception |
73 | is pending. Clear the x87 state here by setting it to fixed | 87 | is pending. Clear the x87 state here by setting it to fixed |
74 | values. The kernel data segment can be sometimes 0 and sometimes | 88 | values. The kernel data segment can be sometimes 0 and sometimes |
75 | new user value. Both should be ok. | 89 | new user value. Both should be ok. |
76 | Use the PDA as safe address because it should be already in L1. */ | 90 | Use the PDA as safe address because it should be already in L1. */ |
77 | static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | 91 | static inline void clear_fpu_state(struct task_struct *tsk) |
78 | { | 92 | { |
93 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | ||
94 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | ||
95 | |||
96 | /* | ||
97 | * xsave header may indicate the init state of the FP. | ||
98 | */ | ||
99 | if ((task_thread_info(tsk)->status & TS_XSAVE) && | ||
100 | !(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
101 | return; | ||
102 | |||
79 | if (unlikely(fx->swd & X87_FSW_ES)) | 103 | if (unlikely(fx->swd & X87_FSW_ES)) |
80 | asm volatile("fnclex"); | 104 | asm volatile("fnclex"); |
81 | alternative_input(ASM_NOP8 ASM_NOP2, | 105 | alternative_input(ASM_NOP8 ASM_NOP2, |
@@ -84,7 +108,7 @@ static inline void clear_fpu_state(struct i387_fxsave_struct *fx) | |||
84 | X86_FEATURE_FXSAVE_LEAK); | 108 | X86_FEATURE_FXSAVE_LEAK); |
85 | } | 109 | } |
86 | 110 | ||
87 | static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | 111 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
88 | { | 112 | { |
89 | int err; | 113 | int err; |
90 | 114 | ||
@@ -108,7 +132,7 @@ static inline int save_i387_checking(struct i387_fxsave_struct __user *fx) | |||
108 | return err; | 132 | return err; |
109 | } | 133 | } |
110 | 134 | ||
111 | static inline void __save_init_fpu(struct task_struct *tsk) | 135 | static inline void fxsave(struct task_struct *tsk) |
112 | { | 136 | { |
113 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | 137 | /* Using "rex64; fxsave %0" is broken because, if the memory operand |
114 | uses any extended registers for addressing, a second REX prefix | 138 | uses any extended registers for addressing, a second REX prefix |
@@ -133,7 +157,16 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
133 | : "=m" (tsk->thread.xstate->fxsave) | 157 | : "=m" (tsk->thread.xstate->fxsave) |
134 | : "cdaSDb" (&tsk->thread.xstate->fxsave)); | 158 | : "cdaSDb" (&tsk->thread.xstate->fxsave)); |
135 | #endif | 159 | #endif |
136 | clear_fpu_state(&tsk->thread.xstate->fxsave); | 160 | } |
161 | |||
162 | static inline void __save_init_fpu(struct task_struct *tsk) | ||
163 | { | ||
164 | if (task_thread_info(tsk)->status & TS_XSAVE) | ||
165 | xsave(tsk); | ||
166 | else | ||
167 | fxsave(tsk); | ||
168 | |||
169 | clear_fpu_state(tsk); | ||
137 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 170 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
138 | } | 171 | } |
139 | 172 | ||
@@ -148,6 +181,10 @@ static inline void tolerant_fwait(void) | |||
148 | 181 | ||
149 | static inline void restore_fpu(struct task_struct *tsk) | 182 | static inline void restore_fpu(struct task_struct *tsk) |
150 | { | 183 | { |
184 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
185 | xrstor_checking(&tsk->thread.xstate->xsave); | ||
186 | return; | ||
187 | } | ||
151 | /* | 188 | /* |
152 | * The "nop" is needed to make the instructions the same | 189 | * The "nop" is needed to make the instructions the same |
153 | * length. | 190 | * length. |
@@ -173,6 +210,27 @@ static inline void restore_fpu(struct task_struct *tsk) | |||
173 | */ | 210 | */ |
174 | static inline void __save_init_fpu(struct task_struct *tsk) | 211 | static inline void __save_init_fpu(struct task_struct *tsk) |
175 | { | 212 | { |
213 | if (task_thread_info(tsk)->status & TS_XSAVE) { | ||
214 | struct xsave_struct *xstate = &tsk->thread.xstate->xsave; | ||
215 | struct i387_fxsave_struct *fx = &tsk->thread.xstate->fxsave; | ||
216 | |||
217 | xsave(tsk); | ||
218 | |||
219 | /* | ||
220 | * xsave header may indicate the init state of the FP. | ||
221 | */ | ||
222 | if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP)) | ||
223 | goto end; | ||
224 | |||
225 | if (unlikely(fx->swd & X87_FSW_ES)) | ||
226 | asm volatile("fnclex"); | ||
227 | |||
228 | /* | ||
229 | * we can do a simple return here or be paranoid :) | ||
230 | */ | ||
231 | goto clear_state; | ||
232 | } | ||
233 | |||
176 | /* Use more nops than strictly needed in case the compiler | 234 | /* Use more nops than strictly needed in case the compiler |
177 | varies code */ | 235 | varies code */ |
178 | alternative_input( | 236 | alternative_input( |
@@ -182,6 +240,7 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
182 | X86_FEATURE_FXSR, | 240 | X86_FEATURE_FXSR, |
183 | [fx] "m" (tsk->thread.xstate->fxsave), | 241 | [fx] "m" (tsk->thread.xstate->fxsave), |
184 | [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); | 242 | [fsw] "m" (tsk->thread.xstate->fxsave.swd) : "memory"); |
243 | clear_state: | ||
185 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 244 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
186 | is pending. Clear the x87 state here by setting it to fixed | 245 | is pending. Clear the x87 state here by setting it to fixed |
187 | values. safe_address is a random variable that should be in L1 */ | 246 | values. safe_address is a random variable that should be in L1 */ |
@@ -191,16 +250,17 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
191 | "fildl %[addr]", /* set F?P to defined value */ | 250 | "fildl %[addr]", /* set F?P to defined value */ |
192 | X86_FEATURE_FXSAVE_LEAK, | 251 | X86_FEATURE_FXSAVE_LEAK, |
193 | [addr] "m" (safe_address)); | 252 | [addr] "m" (safe_address)); |
253 | end: | ||
194 | task_thread_info(tsk)->status &= ~TS_USEDFPU; | 254 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
195 | } | 255 | } |
196 | 256 | ||
257 | #endif /* CONFIG_X86_64 */ | ||
258 | |||
197 | /* | 259 | /* |
198 | * Signal frame handlers... | 260 | * Signal frame handlers... |
199 | */ | 261 | */ |
200 | extern int save_i387(struct _fpstate __user *buf); | 262 | extern int save_i387_xstate(void __user *buf); |
201 | extern int restore_i387(struct _fpstate __user *buf); | 263 | extern int restore_i387_xstate(void __user *buf); |
202 | |||
203 | #endif /* CONFIG_X86_64 */ | ||
204 | 264 | ||
205 | static inline void __unlazy_fpu(struct task_struct *tsk) | 265 | static inline void __unlazy_fpu(struct task_struct *tsk) |
206 | { | 266 | { |
diff --git a/include/asm-x86/i8259.h b/include/asm-x86/i8259.h index c586559a6957..23c1b3baaecd 100644 --- a/include/asm-x86/i8259.h +++ b/include/asm-x86/i8259.h | |||
@@ -57,4 +57,7 @@ static inline void outb_pic(unsigned char value, unsigned int port) | |||
57 | 57 | ||
58 | extern struct irq_chip i8259A_chip; | 58 | extern struct irq_chip i8259A_chip; |
59 | 59 | ||
60 | extern void mask_8259A(void); | ||
61 | extern void unmask_8259A(void); | ||
62 | |||
60 | #endif /* ASM_X86__I8259_H */ | 63 | #endif /* ASM_X86__I8259_H */ |
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h index be62847ab07e..8ec68a50cf10 100644 --- a/include/asm-x86/io_apic.h +++ b/include/asm-x86/io_apic.h | |||
@@ -107,6 +107,20 @@ struct IO_APIC_route_entry { | |||
107 | 107 | ||
108 | } __attribute__ ((packed)); | 108 | } __attribute__ ((packed)); |
109 | 109 | ||
110 | struct IR_IO_APIC_route_entry { | ||
111 | __u64 vector : 8, | ||
112 | zero : 3, | ||
113 | index2 : 1, | ||
114 | delivery_status : 1, | ||
115 | polarity : 1, | ||
116 | irr : 1, | ||
117 | trigger : 1, | ||
118 | mask : 1, | ||
119 | reserved : 31, | ||
120 | format : 1, | ||
121 | index : 15; | ||
122 | } __attribute__ ((packed)); | ||
123 | |||
110 | #ifdef CONFIG_X86_IO_APIC | 124 | #ifdef CONFIG_X86_IO_APIC |
111 | 125 | ||
112 | /* | 126 | /* |
@@ -183,6 +197,12 @@ extern int io_apic_set_pci_routing(int ioapic, int pin, int irq, | |||
183 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); | 197 | extern int (*ioapic_renumber_irq)(int ioapic, int irq); |
184 | extern void ioapic_init_mappings(void); | 198 | extern void ioapic_init_mappings(void); |
185 | 199 | ||
200 | #ifdef CONFIG_X86_64 | ||
201 | extern int save_mask_IO_APIC_setup(void); | ||
202 | extern void restore_IO_APIC_setup(void); | ||
203 | extern void reinit_intr_remapped_IO_APIC(int); | ||
204 | #endif | ||
205 | |||
186 | #else /* !CONFIG_X86_IO_APIC */ | 206 | #else /* !CONFIG_X86_IO_APIC */ |
187 | #define io_apic_assign_pci_irqs 0 | 207 | #define io_apic_assign_pci_irqs 0 |
188 | static const int timer_through_8259 = 0; | 208 | static const int timer_through_8259 = 0; |
diff --git a/include/asm-x86/ipi.h b/include/asm-x86/ipi.h index c1b226797518..30a692cfaff8 100644 --- a/include/asm-x86/ipi.h +++ b/include/asm-x86/ipi.h | |||
@@ -49,6 +49,12 @@ static inline int __prepare_ICR2(unsigned int mask) | |||
49 | return SET_APIC_DEST_FIELD(mask); | 49 | return SET_APIC_DEST_FIELD(mask); |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline void __xapic_wait_icr_idle(void) | ||
53 | { | ||
54 | while (native_apic_mem_read(APIC_ICR) & APIC_ICR_BUSY) | ||
55 | cpu_relax(); | ||
56 | } | ||
57 | |||
52 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, | 58 | static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, |
53 | unsigned int dest) | 59 | unsigned int dest) |
54 | { | 60 | { |
@@ -64,7 +70,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, | |||
64 | /* | 70 | /* |
65 | * Wait for idle. | 71 | * Wait for idle. |
66 | */ | 72 | */ |
67 | apic_wait_icr_idle(); | 73 | __xapic_wait_icr_idle(); |
68 | 74 | ||
69 | /* | 75 | /* |
70 | * No need to touch the target chip field | 76 | * No need to touch the target chip field |
@@ -74,7 +80,7 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector, | |||
74 | /* | 80 | /* |
75 | * Send the IPI. The write to APIC_ICR fires this off. | 81 | * Send the IPI. The write to APIC_ICR fires this off. |
76 | */ | 82 | */ |
77 | apic_write(APIC_ICR, cfg); | 83 | native_apic_mem_write(APIC_ICR, cfg); |
78 | } | 84 | } |
79 | 85 | ||
80 | /* | 86 | /* |
@@ -92,13 +98,13 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
92 | if (unlikely(vector == NMI_VECTOR)) | 98 | if (unlikely(vector == NMI_VECTOR)) |
93 | safe_apic_wait_icr_idle(); | 99 | safe_apic_wait_icr_idle(); |
94 | else | 100 | else |
95 | apic_wait_icr_idle(); | 101 | __xapic_wait_icr_idle(); |
96 | 102 | ||
97 | /* | 103 | /* |
98 | * prepare target chip field | 104 | * prepare target chip field |
99 | */ | 105 | */ |
100 | cfg = __prepare_ICR2(mask); | 106 | cfg = __prepare_ICR2(mask); |
101 | apic_write(APIC_ICR2, cfg); | 107 | native_apic_mem_write(APIC_ICR2, cfg); |
102 | 108 | ||
103 | /* | 109 | /* |
104 | * program the ICR | 110 | * program the ICR |
@@ -108,7 +114,7 @@ static inline void __send_IPI_dest_field(unsigned int mask, int vector, | |||
108 | /* | 114 | /* |
109 | * Send the IPI. The write to APIC_ICR fires this off. | 115 | * Send the IPI. The write to APIC_ICR fires this off. |
110 | */ | 116 | */ |
111 | apic_write(APIC_ICR, cfg); | 117 | native_apic_mem_write(APIC_ICR, cfg); |
112 | } | 118 | } |
113 | 119 | ||
114 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) | 120 | static inline void send_IPI_mask_sequence(cpumask_t mask, int vector) |
diff --git a/include/asm-x86/irq_remapping.h b/include/asm-x86/irq_remapping.h new file mode 100644 index 000000000000..78242c6ffa58 --- /dev/null +++ b/include/asm-x86/irq_remapping.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _ASM_IRQ_REMAPPING_H | ||
2 | #define _ASM_IRQ_REMAPPING_H | ||
3 | |||
4 | extern int x2apic; | ||
5 | |||
6 | #define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8) | ||
7 | |||
8 | #endif | ||
diff --git a/include/asm-x86/mach-bigsmp/mach_apicdef.h b/include/asm-x86/mach-bigsmp/mach_apicdef.h deleted file mode 100644 index 811935d9d49b..000000000000 --- a/include/asm-x86/mach-bigsmp/mach_apicdef.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef ASM_X86__MACH_BIGSMP__MACH_APICDEF_H | ||
2 | #define ASM_X86__MACH_BIGSMP__MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif /* ASM_X86__MACH_BIGSMP__MACH_APICDEF_H */ | ||
diff --git a/include/asm-x86/mach-default/mach_apic.h b/include/asm-x86/mach-default/mach_apic.h index b615f40736be..2a330a41b3dd 100644 --- a/include/asm-x86/mach-default/mach_apic.h +++ b/include/asm-x86/mach-default/mach_apic.h | |||
@@ -30,6 +30,8 @@ static inline cpumask_t target_cpus(void) | |||
30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) | 30 | #define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid) |
31 | #define phys_pkg_id (genapic->phys_pkg_id) | 31 | #define phys_pkg_id (genapic->phys_pkg_id) |
32 | #define vector_allocation_domain (genapic->vector_allocation_domain) | 32 | #define vector_allocation_domain (genapic->vector_allocation_domain) |
33 | #define read_apic_id() (GET_APIC_ID(apic_read(APIC_ID))) | ||
34 | #define send_IPI_self (genapic->send_IPI_self) | ||
33 | extern void setup_apic_routing(void); | 35 | extern void setup_apic_routing(void); |
34 | #else | 36 | #else |
35 | #define INT_DELIVERY_MODE dest_LowestPrio | 37 | #define INT_DELIVERY_MODE dest_LowestPrio |
@@ -54,7 +56,7 @@ static inline void init_apic_ldr(void) | |||
54 | 56 | ||
55 | static inline int apic_id_registered(void) | 57 | static inline int apic_id_registered(void) |
56 | { | 58 | { |
57 | return physid_isset(GET_APIC_ID(read_apic_id()), phys_cpu_present_map); | 59 | return physid_isset(read_apic_id(), phys_cpu_present_map); |
58 | } | 60 | } |
59 | 61 | ||
60 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | 62 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) |
diff --git a/include/asm-x86/mach-default/mach_apicdef.h b/include/asm-x86/mach-default/mach_apicdef.h index 936704f816d6..0c2d41c41b20 100644 --- a/include/asm-x86/mach-default/mach_apicdef.h +++ b/include/asm-x86/mach-default/mach_apicdef.h | |||
@@ -4,9 +4,9 @@ | |||
4 | #include <asm/apic.h> | 4 | #include <asm/apic.h> |
5 | 5 | ||
6 | #ifdef CONFIG_X86_64 | 6 | #ifdef CONFIG_X86_64 |
7 | #define APIC_ID_MASK (0xFFu<<24) | 7 | #define APIC_ID_MASK (genapic->apic_id_mask) |
8 | #define GET_APIC_ID(x) (((x)>>24)&0xFFu) | 8 | #define GET_APIC_ID(x) (genapic->get_apic_id(x)) |
9 | #define SET_APIC_ID(x) (((x)<<24)) | 9 | #define SET_APIC_ID(x) (genapic->set_apic_id(x)) |
10 | #else | 10 | #else |
11 | #define APIC_ID_MASK (0xF<<24) | 11 | #define APIC_ID_MASK (0xF<<24) |
12 | static inline unsigned get_apic_id(unsigned long x) | 12 | static inline unsigned get_apic_id(unsigned long x) |
diff --git a/include/asm-x86/mach-es7000/mach_apicdef.h b/include/asm-x86/mach-es7000/mach_apicdef.h deleted file mode 100644 index a07e56744028..000000000000 --- a/include/asm-x86/mach-es7000/mach_apicdef.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef ASM_X86__MACH_ES7000__MACH_APICDEF_H | ||
2 | #define ASM_X86__MACH_ES7000__MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif /* ASM_X86__MACH_ES7000__MACH_APICDEF_H */ | ||
diff --git a/include/asm-x86/mach-numaq/mach_mpparse.h b/include/asm-x86/mach-numaq/mach_mpparse.h deleted file mode 100644 index 74ade184920b..000000000000 --- a/include/asm-x86/mach-numaq/mach_mpparse.h +++ /dev/null | |||
@@ -1,7 +0,0 @@ | |||
1 | #ifndef ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H | ||
2 | #define ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H | ||
3 | |||
4 | extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, | ||
5 | char *productid); | ||
6 | |||
7 | #endif /* ASM_X86__MACH_NUMAQ__MACH_MPPARSE_H */ | ||
diff --git a/include/asm-x86/mach-summit/mach_apicdef.h b/include/asm-x86/mach-summit/mach_apicdef.h deleted file mode 100644 index d4bc8590c4f6..000000000000 --- a/include/asm-x86/mach-summit/mach_apicdef.h +++ /dev/null | |||
@@ -1,13 +0,0 @@ | |||
1 | #ifndef ASM_X86__MACH_SUMMIT__MACH_APICDEF_H | ||
2 | #define ASM_X86__MACH_SUMMIT__MACH_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (((x)>>24)&0xFF); | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif /* ASM_X86__MACH_SUMMIT__MACH_APICDEF_H */ | ||
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h index 118da365e371..be2241a818f1 100644 --- a/include/asm-x86/mpspec.h +++ b/include/asm-x86/mpspec.h | |||
@@ -5,11 +5,12 @@ | |||
5 | 5 | ||
6 | #include <asm/mpspec_def.h> | 6 | #include <asm/mpspec_def.h> |
7 | 7 | ||
8 | extern int apic_version[MAX_APICS]; | ||
9 | |||
8 | #ifdef CONFIG_X86_32 | 10 | #ifdef CONFIG_X86_32 |
9 | #include <mach_mpspec.h> | 11 | #include <mach_mpspec.h> |
10 | 12 | ||
11 | extern unsigned int def_to_bigsmp; | 13 | extern unsigned int def_to_bigsmp; |
12 | extern int apic_version[MAX_APICS]; | ||
13 | extern u8 apicid_2_node[]; | 14 | extern u8 apicid_2_node[]; |
14 | extern int pic_mode; | 15 | extern int pic_mode; |
15 | 16 | ||
diff --git a/include/asm-x86/msidef.h b/include/asm-x86/msidef.h index 3139666a94fa..ed9190246876 100644 --- a/include/asm-x86/msidef.h +++ b/include/asm-x86/msidef.h | |||
@@ -48,4 +48,8 @@ | |||
48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ | 48 | #define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \ |
49 | MSI_ADDR_DEST_ID_MASK) | 49 | MSI_ADDR_DEST_ID_MASK) |
50 | 50 | ||
51 | #define MSI_ADDR_IR_EXT_INT (1 << 4) | ||
52 | #define MSI_ADDR_IR_SHV (1 << 3) | ||
53 | #define MSI_ADDR_IR_INDEX1(index) ((index & 0x8000) >> 13) | ||
54 | #define MSI_ADDR_IR_INDEX2(index) ((index & 0x7fff) << 5) | ||
51 | #endif /* ASM_X86__MSIDEF_H */ | 55 | #endif /* ASM_X86__MSIDEF_H */ |
diff --git a/include/asm-x86/msr-index.h b/include/asm-x86/msr-index.h index 3052f058ab06..0bb43301a202 100644 --- a/include/asm-x86/msr-index.h +++ b/include/asm-x86/msr-index.h | |||
@@ -176,6 +176,7 @@ | |||
176 | #define MSR_IA32_TSC 0x00000010 | 176 | #define MSR_IA32_TSC 0x00000010 |
177 | #define MSR_IA32_PLATFORM_ID 0x00000017 | 177 | #define MSR_IA32_PLATFORM_ID 0x00000017 |
178 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a | 178 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a |
179 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a | ||
179 | 180 | ||
180 | #define MSR_IA32_APICBASE 0x0000001b | 181 | #define MSR_IA32_APICBASE 0x0000001b |
181 | #define MSR_IA32_APICBASE_BSP (1<<8) | 182 | #define MSR_IA32_APICBASE_BSP (1<<8) |
@@ -310,4 +311,19 @@ | |||
310 | /* Geode defined MSRs */ | 311 | /* Geode defined MSRs */ |
311 | #define MSR_GEODE_BUSCONT_CONF0 0x00001900 | 312 | #define MSR_GEODE_BUSCONT_CONF0 0x00001900 |
312 | 313 | ||
314 | /* Intel VT MSRs */ | ||
315 | #define MSR_IA32_VMX_BASIC 0x00000480 | ||
316 | #define MSR_IA32_VMX_PINBASED_CTLS 0x00000481 | ||
317 | #define MSR_IA32_VMX_PROCBASED_CTLS 0x00000482 | ||
318 | #define MSR_IA32_VMX_EXIT_CTLS 0x00000483 | ||
319 | #define MSR_IA32_VMX_ENTRY_CTLS 0x00000484 | ||
320 | #define MSR_IA32_VMX_MISC 0x00000485 | ||
321 | #define MSR_IA32_VMX_CR0_FIXED0 0x00000486 | ||
322 | #define MSR_IA32_VMX_CR0_FIXED1 0x00000487 | ||
323 | #define MSR_IA32_VMX_CR4_FIXED0 0x00000488 | ||
324 | #define MSR_IA32_VMX_CR4_FIXED1 0x00000489 | ||
325 | #define MSR_IA32_VMX_VMCS_ENUM 0x0000048a | ||
326 | #define MSR_IA32_VMX_PROCBASED_CTLS2 0x0000048b | ||
327 | #define MSR_IA32_VMX_EPT_VPID_CAP 0x0000048c | ||
328 | |||
313 | #endif /* ASM_X86__MSR_INDEX_H */ | 329 | #endif /* ASM_X86__MSR_INDEX_H */ |
diff --git a/include/asm-x86/mach-numaq/mach_apic.h b/include/asm-x86/numaq/apic.h index 7a0d39edfcfa..a8344ba6ea15 100644 --- a/include/asm-x86/mach-numaq/mach_apic.h +++ b/include/asm-x86/numaq/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_NUMAQ__MACH_APIC_H | 1 | #ifndef __ASM_NUMAQ_APIC_H |
2 | #define ASM_X86__MACH_NUMAQ__MACH_APIC_H | 2 | #define __ASM_NUMAQ_APIC_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <linux/mmzone.h> | 5 | #include <linux/mmzone.h> |
@@ -135,4 +135,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |||
135 | return cpuid_apic >> index_msb; | 135 | return cpuid_apic >> index_msb; |
136 | } | 136 | } |
137 | 137 | ||
138 | #endif /* ASM_X86__MACH_NUMAQ__MACH_APIC_H */ | 138 | #endif /* __ASM_NUMAQ_APIC_H */ |
diff --git a/include/asm-x86/mach-numaq/mach_apicdef.h b/include/asm-x86/numaq/apicdef.h index f870ec5f7782..e012a46cc22a 100644 --- a/include/asm-x86/mach-numaq/mach_apicdef.h +++ b/include/asm-x86/numaq/apicdef.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_NUMAQ__MACH_APICDEF_H | 1 | #ifndef __ASM_NUMAQ_APICDEF_H |
2 | #define ASM_X86__MACH_NUMAQ__MACH_APICDEF_H | 2 | #define __ASM_NUMAQ_APICDEF_H |
3 | 3 | ||
4 | 4 | ||
5 | #define APIC_ID_MASK (0xF<<24) | 5 | #define APIC_ID_MASK (0xF<<24) |
@@ -11,4 +11,4 @@ static inline unsigned get_apic_id(unsigned long x) | |||
11 | 11 | ||
12 | #define GET_APIC_ID(x) get_apic_id(x) | 12 | #define GET_APIC_ID(x) get_apic_id(x) |
13 | 13 | ||
14 | #endif /* ASM_X86__MACH_NUMAQ__MACH_APICDEF_H */ | 14 | #endif |
diff --git a/include/asm-x86/mach-numaq/mach_ipi.h b/include/asm-x86/numaq/ipi.h index 1e835823f4bc..935588d286cf 100644 --- a/include/asm-x86/mach-numaq/mach_ipi.h +++ b/include/asm-x86/numaq/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_NUMAQ__MACH_IPI_H | 1 | #ifndef __ASM_NUMAQ_IPI_H |
2 | #define ASM_X86__MACH_NUMAQ__MACH_IPI_H | 2 | #define __ASM_NUMAQ_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t, int vector); | 4 | void send_IPI_mask_sequence(cpumask_t, int vector); |
5 | 5 | ||
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector) | |||
22 | send_IPI_mask(cpu_online_map, vector); | 22 | send_IPI_mask(cpu_online_map, vector); |
23 | } | 23 | } |
24 | 24 | ||
25 | #endif /* ASM_X86__MACH_NUMAQ__MACH_IPI_H */ | 25 | #endif /* __ASM_NUMAQ_IPI_H */ |
diff --git a/include/asm-x86/numaq/mpparse.h b/include/asm-x86/numaq/mpparse.h new file mode 100644 index 000000000000..252292e077b6 --- /dev/null +++ b/include/asm-x86/numaq/mpparse.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef __ASM_NUMAQ_MPPARSE_H | ||
2 | #define __ASM_NUMAQ_MPPARSE_H | ||
3 | |||
4 | extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem, | ||
5 | char *productid); | ||
6 | |||
7 | #endif /* __ASM_NUMAQ_MPPARSE_H */ | ||
diff --git a/include/asm-x86/mach-numaq/mach_wakecpu.h b/include/asm-x86/numaq/wakecpu.h index 0db8cea643c0..c577bda5b1c5 100644 --- a/include/asm-x86/mach-numaq/mach_wakecpu.h +++ b/include/asm-x86/numaq/wakecpu.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H | 1 | #ifndef __ASM_NUMAQ_WAKECPU_H |
2 | #define ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H | 2 | #define __ASM_NUMAQ_WAKECPU_H |
3 | 3 | ||
4 | /* This file copes with machines that wakeup secondary CPUs by NMIs */ | 4 | /* This file copes with machines that wakeup secondary CPUs by NMIs */ |
5 | 5 | ||
@@ -40,4 +40,4 @@ static inline void restore_NMI_vector(unsigned short *high, unsigned short *low) | |||
40 | 40 | ||
41 | #define inquire_remote_apic(apicid) {} | 41 | #define inquire_remote_apic(apicid) {} |
42 | 42 | ||
43 | #endif /* ASM_X86__MACH_NUMAQ__MACH_WAKECPU_H */ | 43 | #endif /* __ASM_NUMAQ_WAKECPU_H */ |
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 891971f57d35..d7d358a43996 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -201,12 +201,6 @@ struct pv_irq_ops { | |||
201 | 201 | ||
202 | struct pv_apic_ops { | 202 | struct pv_apic_ops { |
203 | #ifdef CONFIG_X86_LOCAL_APIC | 203 | #ifdef CONFIG_X86_LOCAL_APIC |
204 | /* | ||
205 | * Direct APIC operations, principally for VMI. Ideally | ||
206 | * these shouldn't be in this interface. | ||
207 | */ | ||
208 | void (*apic_write)(unsigned long reg, u32 v); | ||
209 | u32 (*apic_read)(unsigned long reg); | ||
210 | void (*setup_boot_clock)(void); | 204 | void (*setup_boot_clock)(void); |
211 | void (*setup_secondary_clock)(void); | 205 | void (*setup_secondary_clock)(void); |
212 | 206 | ||
@@ -910,19 +904,6 @@ static inline void slow_down_io(void) | |||
910 | } | 904 | } |
911 | 905 | ||
912 | #ifdef CONFIG_X86_LOCAL_APIC | 906 | #ifdef CONFIG_X86_LOCAL_APIC |
913 | /* | ||
914 | * Basic functions accessing APICs. | ||
915 | */ | ||
916 | static inline void apic_write(unsigned long reg, u32 v) | ||
917 | { | ||
918 | PVOP_VCALL2(pv_apic_ops.apic_write, reg, v); | ||
919 | } | ||
920 | |||
921 | static inline u32 apic_read(unsigned long reg) | ||
922 | { | ||
923 | return PVOP_CALL1(unsigned long, pv_apic_ops.apic_read, reg); | ||
924 | } | ||
925 | |||
926 | static inline void setup_boot_clock(void) | 907 | static inline void setup_boot_clock(void) |
927 | { | 908 | { |
928 | PVOP_VCALL0(pv_apic_ops.setup_boot_clock); | 909 | PVOP_VCALL0(pv_apic_ops.setup_boot_clock); |
diff --git a/include/asm-x86/processor-cyrix.h b/include/asm-x86/processor-cyrix.h index 97568ada1f97..1198f2a0e42c 100644 --- a/include/asm-x86/processor-cyrix.h +++ b/include/asm-x86/processor-cyrix.h | |||
@@ -28,3 +28,11 @@ static inline void setCx86(u8 reg, u8 data) | |||
28 | outb(reg, 0x22); | 28 | outb(reg, 0x22); |
29 | outb(data, 0x23); | 29 | outb(data, 0x23); |
30 | } | 30 | } |
31 | |||
32 | #define getCx86_old(reg) ({ outb((reg), 0x22); inb(0x23); }) | ||
33 | |||
34 | #define setCx86_old(reg, data) do { \ | ||
35 | outb((reg), 0x22); \ | ||
36 | outb((data), 0x23); \ | ||
37 | } while (0) | ||
38 | |||
diff --git a/include/asm-x86/processor-flags.h b/include/asm-x86/processor-flags.h index 5dd79774f693..dc5f0712f9fa 100644 --- a/include/asm-x86/processor-flags.h +++ b/include/asm-x86/processor-flags.h | |||
@@ -59,6 +59,7 @@ | |||
59 | #define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ | 59 | #define X86_CR4_OSFXSR 0x00000200 /* enable fast FPU save and restore */ |
60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ | 60 | #define X86_CR4_OSXMMEXCPT 0x00000400 /* enable unmasked SSE exceptions */ |
61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ | 61 | #define X86_CR4_VMXE 0x00002000 /* enable VMX virtualization */ |
62 | #define X86_CR4_OSXSAVE 0x00040000 /* enable xsave and xrestore */ | ||
62 | 63 | ||
63 | /* | 64 | /* |
64 | * x86-64 Task Priority Register, CR8 | 65 | * x86-64 Task Priority Register, CR8 |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 5eaf9bf0a623..c7d35464a4bb 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -76,11 +76,11 @@ struct cpuinfo_x86 { | |||
76 | int x86_tlbsize; | 76 | int x86_tlbsize; |
77 | __u8 x86_virt_bits; | 77 | __u8 x86_virt_bits; |
78 | __u8 x86_phys_bits; | 78 | __u8 x86_phys_bits; |
79 | #endif | ||
79 | /* CPUID returned core id bits: */ | 80 | /* CPUID returned core id bits: */ |
80 | __u8 x86_coreid_bits; | 81 | __u8 x86_coreid_bits; |
81 | /* Max extended CPUID function supported: */ | 82 | /* Max extended CPUID function supported: */ |
82 | __u32 extended_cpuid_level; | 83 | __u32 extended_cpuid_level; |
83 | #endif | ||
84 | /* Maximum supported CPUID level, -1=no CPUID: */ | 84 | /* Maximum supported CPUID level, -1=no CPUID: */ |
85 | int cpuid_level; | 85 | int cpuid_level; |
86 | __u32 x86_capability[NCAPINTS]; | 86 | __u32 x86_capability[NCAPINTS]; |
@@ -166,11 +166,8 @@ extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c); | |||
166 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 166 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
167 | extern unsigned short num_cache_leaves; | 167 | extern unsigned short num_cache_leaves; |
168 | 168 | ||
169 | #if defined(CONFIG_X86_HT) || defined(CONFIG_X86_64) | 169 | extern void detect_extended_topology(struct cpuinfo_x86 *c); |
170 | extern void detect_ht(struct cpuinfo_x86 *c); | 170 | extern void detect_ht(struct cpuinfo_x86 *c); |
171 | #else | ||
172 | static inline void detect_ht(struct cpuinfo_x86 *c) {} | ||
173 | #endif | ||
174 | 171 | ||
175 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, | 172 | static inline void native_cpuid(unsigned int *eax, unsigned int *ebx, |
176 | unsigned int *ecx, unsigned int *edx) | 173 | unsigned int *ecx, unsigned int *edx) |
@@ -327,7 +324,12 @@ struct i387_fxsave_struct { | |||
327 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ | 324 | /* 16*16 bytes for each XMM-reg = 256 bytes: */ |
328 | u32 xmm_space[64]; | 325 | u32 xmm_space[64]; |
329 | 326 | ||
330 | u32 padding[24]; | 327 | u32 padding[12]; |
328 | |||
329 | union { | ||
330 | u32 padding1[12]; | ||
331 | u32 sw_reserved[12]; | ||
332 | }; | ||
331 | 333 | ||
332 | } __attribute__((aligned(16))); | 334 | } __attribute__((aligned(16))); |
333 | 335 | ||
@@ -351,10 +353,23 @@ struct i387_soft_struct { | |||
351 | u32 entry_eip; | 353 | u32 entry_eip; |
352 | }; | 354 | }; |
353 | 355 | ||
356 | struct xsave_hdr_struct { | ||
357 | u64 xstate_bv; | ||
358 | u64 reserved1[2]; | ||
359 | u64 reserved2[5]; | ||
360 | } __attribute__((packed)); | ||
361 | |||
362 | struct xsave_struct { | ||
363 | struct i387_fxsave_struct i387; | ||
364 | struct xsave_hdr_struct xsave_hdr; | ||
365 | /* new processor state extensions will go here */ | ||
366 | } __attribute__ ((packed, aligned (64))); | ||
367 | |||
354 | union thread_xstate { | 368 | union thread_xstate { |
355 | struct i387_fsave_struct fsave; | 369 | struct i387_fsave_struct fsave; |
356 | struct i387_fxsave_struct fxsave; | 370 | struct i387_fxsave_struct fxsave; |
357 | struct i387_soft_struct soft; | 371 | struct i387_soft_struct soft; |
372 | struct xsave_struct xsave; | ||
358 | }; | 373 | }; |
359 | 374 | ||
360 | #ifdef CONFIG_X86_64 | 375 | #ifdef CONFIG_X86_64 |
diff --git a/include/asm-x86/setup.h b/include/asm-x86/setup.h index 9030cb73c4d7..11b6cc14b289 100644 --- a/include/asm-x86/setup.h +++ b/include/asm-x86/setup.h | |||
@@ -38,6 +38,7 @@ struct x86_quirks { | |||
38 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *m); | 38 | void (*mpc_oem_pci_bus)(struct mpc_config_bus *m); |
39 | void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, | 39 | void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable, |
40 | unsigned short oemsize); | 40 | unsigned short oemsize); |
41 | int (*setup_ioapic_ids)(void); | ||
41 | }; | 42 | }; |
42 | 43 | ||
43 | extern struct x86_quirks *x86_quirks; | 44 | extern struct x86_quirks *x86_quirks; |
diff --git a/include/asm-x86/sigcontext.h b/include/asm-x86/sigcontext.h index 24879c85b291..ee813f4fe5d5 100644 --- a/include/asm-x86/sigcontext.h +++ b/include/asm-x86/sigcontext.h | |||
@@ -4,6 +4,40 @@ | |||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/types.h> | 5 | #include <asm/types.h> |
6 | 6 | ||
7 | #define FP_XSTATE_MAGIC1 0x46505853U | ||
8 | #define FP_XSTATE_MAGIC2 0x46505845U | ||
9 | #define FP_XSTATE_MAGIC2_SIZE sizeof(FP_XSTATE_MAGIC2) | ||
10 | |||
11 | /* | ||
12 | * bytes 464..511 in the current 512byte layout of fxsave/fxrstor frame | ||
13 | * are reserved for SW usage. On cpu's supporting xsave/xrstor, these bytes | ||
14 | * are used to extended the fpstate pointer in the sigcontext, which now | ||
15 | * includes the extended state information along with fpstate information. | ||
16 | * | ||
17 | * Presence of FP_XSTATE_MAGIC1 at the beginning of this SW reserved | ||
18 | * area and FP_XSTATE_MAGIC2 at the end of memory layout | ||
19 | * (extended_size - FP_XSTATE_MAGIC2_SIZE) indicates the presence of the | ||
20 | * extended state information in the memory layout pointed by the fpstate | ||
21 | * pointer in sigcontext. | ||
22 | */ | ||
23 | struct _fpx_sw_bytes { | ||
24 | __u32 magic1; /* FP_XSTATE_MAGIC1 */ | ||
25 | __u32 extended_size; /* total size of the layout referred by | ||
26 | * fpstate pointer in the sigcontext. | ||
27 | */ | ||
28 | __u64 xstate_bv; | ||
29 | /* feature bit mask (including fp/sse/extended | ||
30 | * state) that is present in the memory | ||
31 | * layout. | ||
32 | */ | ||
33 | __u32 xstate_size; /* actual xsave state size, based on the | ||
34 | * features saved in the layout. | ||
35 | * 'extended_size' will be greater than | ||
36 | * 'xstate_size'. | ||
37 | */ | ||
38 | __u32 padding[7]; /* for future use. */ | ||
39 | }; | ||
40 | |||
7 | #ifdef __i386__ | 41 | #ifdef __i386__ |
8 | /* | 42 | /* |
9 | * As documented in the iBCS2 standard.. | 43 | * As documented in the iBCS2 standard.. |
@@ -53,7 +87,13 @@ struct _fpstate { | |||
53 | unsigned long reserved; | 87 | unsigned long reserved; |
54 | struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ | 88 | struct _fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */ |
55 | struct _xmmreg _xmm[8]; | 89 | struct _xmmreg _xmm[8]; |
56 | unsigned long padding[56]; | 90 | unsigned long padding1[44]; |
91 | |||
92 | union { | ||
93 | unsigned long padding2[12]; | ||
94 | struct _fpx_sw_bytes sw_reserved; /* represents the extended | ||
95 | * state info */ | ||
96 | }; | ||
57 | }; | 97 | }; |
58 | 98 | ||
59 | #define X86_FXSR_MAGIC 0x0000 | 99 | #define X86_FXSR_MAGIC 0x0000 |
@@ -79,7 +119,15 @@ struct sigcontext { | |||
79 | unsigned long flags; | 119 | unsigned long flags; |
80 | unsigned long sp_at_signal; | 120 | unsigned long sp_at_signal; |
81 | unsigned short ss, __ssh; | 121 | unsigned short ss, __ssh; |
82 | struct _fpstate __user *fpstate; | 122 | |
123 | /* | ||
124 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | ||
125 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | ||
126 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | ||
127 | * of extended memory layout. See comments at the defintion of | ||
128 | * (struct _fpx_sw_bytes) | ||
129 | */ | ||
130 | void __user *fpstate; /* zero when no FPU/extended context */ | ||
83 | unsigned long oldmask; | 131 | unsigned long oldmask; |
84 | unsigned long cr2; | 132 | unsigned long cr2; |
85 | }; | 133 | }; |
@@ -130,7 +178,12 @@ struct _fpstate { | |||
130 | __u32 mxcsr_mask; | 178 | __u32 mxcsr_mask; |
131 | __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ | 179 | __u32 st_space[32]; /* 8*16 bytes for each FP-reg */ |
132 | __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ | 180 | __u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg */ |
133 | __u32 reserved2[24]; | 181 | __u32 reserved2[12]; |
182 | union { | ||
183 | __u32 reserved3[12]; | ||
184 | struct _fpx_sw_bytes sw_reserved; /* represents the extended | ||
185 | * state information */ | ||
186 | }; | ||
134 | }; | 187 | }; |
135 | 188 | ||
136 | #ifdef __KERNEL__ | 189 | #ifdef __KERNEL__ |
@@ -161,7 +214,15 @@ struct sigcontext { | |||
161 | unsigned long trapno; | 214 | unsigned long trapno; |
162 | unsigned long oldmask; | 215 | unsigned long oldmask; |
163 | unsigned long cr2; | 216 | unsigned long cr2; |
164 | struct _fpstate __user *fpstate; /* zero when no FPU context */ | 217 | |
218 | /* | ||
219 | * fpstate is really (struct _fpstate *) or (struct _xstate *) | ||
220 | * depending on the FP_XSTATE_MAGIC1 encoded in the SW reserved | ||
221 | * bytes of (struct _fpstate) and FP_XSTATE_MAGIC2 present at the end | ||
222 | * of extended memory layout. See comments at the defintion of | ||
223 | * (struct _fpx_sw_bytes) | ||
224 | */ | ||
225 | void __user *fpstate; /* zero when no FPU/extended context */ | ||
165 | unsigned long reserved1[8]; | 226 | unsigned long reserved1[8]; |
166 | }; | 227 | }; |
167 | #else /* __KERNEL__ */ | 228 | #else /* __KERNEL__ */ |
@@ -202,4 +263,22 @@ struct sigcontext { | |||
202 | 263 | ||
203 | #endif /* !__i386__ */ | 264 | #endif /* !__i386__ */ |
204 | 265 | ||
266 | struct _xsave_hdr { | ||
267 | __u64 xstate_bv; | ||
268 | __u64 reserved1[2]; | ||
269 | __u64 reserved2[5]; | ||
270 | }; | ||
271 | |||
272 | /* | ||
273 | * Extended state pointed by the fpstate pointer in the sigcontext. | ||
274 | * In addition to the fpstate, information encoded in the xstate_hdr | ||
275 | * indicates the presence of other extended state information | ||
276 | * supported by the processor and OS. | ||
277 | */ | ||
278 | struct _xstate { | ||
279 | struct _fpstate fpstate; | ||
280 | struct _xsave_hdr xstate_hdr; | ||
281 | /* new processor state extensions go here */ | ||
282 | }; | ||
283 | |||
205 | #endif /* ASM_X86__SIGCONTEXT_H */ | 284 | #endif /* ASM_X86__SIGCONTEXT_H */ |
diff --git a/include/asm-x86/sigcontext32.h b/include/asm-x86/sigcontext32.h index 4e2ec732dd01..8c347032c2f2 100644 --- a/include/asm-x86/sigcontext32.h +++ b/include/asm-x86/sigcontext32.h | |||
@@ -40,7 +40,11 @@ struct _fpstate_ia32 { | |||
40 | __u32 reserved; | 40 | __u32 reserved; |
41 | struct _fpxreg _fxsr_st[8]; | 41 | struct _fpxreg _fxsr_st[8]; |
42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ | 42 | struct _xmmreg _xmm[8]; /* It's actually 16 */ |
43 | __u32 padding[56]; | 43 | __u32 padding[44]; |
44 | union { | ||
45 | __u32 padding2[12]; | ||
46 | struct _fpx_sw_bytes sw_reserved; | ||
47 | }; | ||
44 | }; | 48 | }; |
45 | 49 | ||
46 | struct sigcontext_ia32 { | 50 | struct sigcontext_ia32 { |
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h index 04f84f4e2c8b..29324c103341 100644 --- a/include/asm-x86/smp.h +++ b/include/asm-x86/smp.h | |||
@@ -167,30 +167,33 @@ extern int safe_smp_processor_id(void); | |||
167 | 167 | ||
168 | #ifdef CONFIG_X86_LOCAL_APIC | 168 | #ifdef CONFIG_X86_LOCAL_APIC |
169 | 169 | ||
170 | #ifndef CONFIG_X86_64 | ||
170 | static inline int logical_smp_processor_id(void) | 171 | static inline int logical_smp_processor_id(void) |
171 | { | 172 | { |
172 | /* we don't want to mark this access volatile - bad code generation */ | 173 | /* we don't want to mark this access volatile - bad code generation */ |
173 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); | 174 | return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR)); |
174 | } | 175 | } |
175 | 176 | ||
176 | #ifndef CONFIG_X86_64 | 177 | #include <mach_apicdef.h> |
177 | static inline unsigned int read_apic_id(void) | 178 | static inline unsigned int read_apic_id(void) |
178 | { | 179 | { |
179 | return *(u32 *)(APIC_BASE + APIC_ID); | 180 | unsigned int reg; |
181 | |||
182 | reg = *(u32 *)(APIC_BASE + APIC_ID); | ||
183 | |||
184 | return GET_APIC_ID(reg); | ||
180 | } | 185 | } |
181 | #else | ||
182 | extern unsigned int read_apic_id(void); | ||
183 | #endif | 186 | #endif |
184 | 187 | ||
185 | 188 | ||
186 | # ifdef APIC_DEFINITION | 189 | # if defined(APIC_DEFINITION) || defined(CONFIG_X86_64) |
187 | extern int hard_smp_processor_id(void); | 190 | extern int hard_smp_processor_id(void); |
188 | # else | 191 | # else |
189 | # include <mach_apicdef.h> | 192 | #include <mach_apicdef.h> |
190 | static inline int hard_smp_processor_id(void) | 193 | static inline int hard_smp_processor_id(void) |
191 | { | 194 | { |
192 | /* we don't want to mark this access volatile - bad code generation */ | 195 | /* we don't want to mark this access volatile - bad code generation */ |
193 | return GET_APIC_ID(read_apic_id()); | 196 | return read_apic_id(); |
194 | } | 197 | } |
195 | # endif /* APIC_DEFINITION */ | 198 | # endif /* APIC_DEFINITION */ |
196 | 199 | ||
diff --git a/include/asm-x86/mach-summit/mach_apic.h b/include/asm-x86/summit/apic.h index 7a66758d701d..c5b2e4b10358 100644 --- a/include/asm-x86/mach-summit/mach_apic.h +++ b/include/asm-x86/summit/apic.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_SUMMIT__MACH_APIC_H | 1 | #ifndef __ASM_SUMMIT_APIC_H |
2 | #define ASM_X86__MACH_SUMMIT__MACH_APIC_H | 2 | #define __ASM_SUMMIT_APIC_H |
3 | 3 | ||
4 | #include <asm/smp.h> | 4 | #include <asm/smp.h> |
5 | 5 | ||
@@ -21,7 +21,7 @@ static inline cpumask_t target_cpus(void) | |||
21 | * Just start on cpu 0. IRQ balancing will spread load | 21 | * Just start on cpu 0. IRQ balancing will spread load |
22 | */ | 22 | */ |
23 | return cpumask_of_cpu(0); | 23 | return cpumask_of_cpu(0); |
24 | } | 24 | } |
25 | #define TARGET_CPUS (target_cpus()) | 25 | #define TARGET_CPUS (target_cpus()) |
26 | 26 | ||
27 | #define INT_DELIVERY_MODE (dest_LowestPrio) | 27 | #define INT_DELIVERY_MODE (dest_LowestPrio) |
@@ -30,10 +30,10 @@ static inline cpumask_t target_cpus(void) | |||
30 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) | 30 | static inline unsigned long check_apicid_used(physid_mask_t bitmap, int apicid) |
31 | { | 31 | { |
32 | return 0; | 32 | return 0; |
33 | } | 33 | } |
34 | 34 | ||
35 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ | 35 | /* we don't use the phys_cpu_present_map to indicate apicid presence */ |
36 | static inline unsigned long check_apicid_present(int bit) | 36 | static inline unsigned long check_apicid_present(int bit) |
37 | { | 37 | { |
38 | return 1; | 38 | return 1; |
39 | } | 39 | } |
@@ -122,7 +122,7 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_id_map) | |||
122 | 122 | ||
123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) | 123 | static inline physid_mask_t apicid_to_cpu_present(int apicid) |
124 | { | 124 | { |
125 | return physid_mask_of_physid(apicid); | 125 | return physid_mask_of_physid(0); |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline void setup_portio_remap(void) | 128 | static inline void setup_portio_remap(void) |
@@ -143,22 +143,22 @@ static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | |||
143 | int num_bits_set; | 143 | int num_bits_set; |
144 | int cpus_found = 0; | 144 | int cpus_found = 0; |
145 | int cpu; | 145 | int cpu; |
146 | int apicid; | 146 | int apicid; |
147 | 147 | ||
148 | num_bits_set = cpus_weight(cpumask); | 148 | num_bits_set = cpus_weight(cpumask); |
149 | /* Return id to all */ | 149 | /* Return id to all */ |
150 | if (num_bits_set == NR_CPUS) | 150 | if (num_bits_set == NR_CPUS) |
151 | return (int) 0xFF; | 151 | return (int) 0xFF; |
152 | /* | 152 | /* |
153 | * The cpus in the mask must all be on the apic cluster. If are not | 153 | * The cpus in the mask must all be on the apic cluster. If are not |
154 | * on the same apicid cluster return default value of TARGET_CPUS. | 154 | * on the same apicid cluster return default value of TARGET_CPUS. |
155 | */ | 155 | */ |
156 | cpu = first_cpu(cpumask); | 156 | cpu = first_cpu(cpumask); |
157 | apicid = cpu_to_logical_apicid(cpu); | 157 | apicid = cpu_to_logical_apicid(cpu); |
158 | while (cpus_found < num_bits_set) { | 158 | while (cpus_found < num_bits_set) { |
159 | if (cpu_isset(cpu, cpumask)) { | 159 | if (cpu_isset(cpu, cpumask)) { |
160 | int new_apicid = cpu_to_logical_apicid(cpu); | 160 | int new_apicid = cpu_to_logical_apicid(cpu); |
161 | if (apicid_cluster(apicid) != | 161 | if (apicid_cluster(apicid) != |
162 | apicid_cluster(new_apicid)){ | 162 | apicid_cluster(new_apicid)){ |
163 | printk ("%s: Not a valid mask!\n",__FUNCTION__); | 163 | printk ("%s: Not a valid mask!\n",__FUNCTION__); |
164 | return 0xFF; | 164 | return 0xFF; |
@@ -182,4 +182,4 @@ static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb) | |||
182 | return hard_smp_processor_id() >> index_msb; | 182 | return hard_smp_processor_id() >> index_msb; |
183 | } | 183 | } |
184 | 184 | ||
185 | #endif /* ASM_X86__MACH_SUMMIT__MACH_APIC_H */ | 185 | #endif /* __ASM_SUMMIT_APIC_H */ |
diff --git a/include/asm-x86/summit/apicdef.h b/include/asm-x86/summit/apicdef.h new file mode 100644 index 000000000000..f3fbca1f61c1 --- /dev/null +++ b/include/asm-x86/summit/apicdef.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __ASM_SUMMIT_APICDEF_H | ||
2 | #define __ASM_SUMMIT_APICDEF_H | ||
3 | |||
4 | #define APIC_ID_MASK (0xFF<<24) | ||
5 | |||
6 | static inline unsigned get_apic_id(unsigned long x) | ||
7 | { | ||
8 | return (x>>24)&0xFF; | ||
9 | } | ||
10 | |||
11 | #define GET_APIC_ID(x) get_apic_id(x) | ||
12 | |||
13 | #endif | ||
diff --git a/include/asm-x86/mach-bigsmp/mach_ipi.h b/include/asm-x86/summit/ipi.h index b1b0f966a009..53bd1e7bd7b4 100644 --- a/include/asm-x86/mach-bigsmp/mach_ipi.h +++ b/include/asm-x86/summit/ipi.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_BIGSMP__MACH_IPI_H | 1 | #ifndef __ASM_SUMMIT_IPI_H |
2 | #define ASM_X86__MACH_BIGSMP__MACH_IPI_H | 2 | #define __ASM_SUMMIT_IPI_H |
3 | 3 | ||
4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); | 4 | void send_IPI_mask_sequence(cpumask_t mask, int vector); |
5 | 5 | ||
@@ -22,4 +22,4 @@ static inline void send_IPI_all(int vector) | |||
22 | send_IPI_mask(cpu_online_map, vector); | 22 | send_IPI_mask(cpu_online_map, vector); |
23 | } | 23 | } |
24 | 24 | ||
25 | #endif /* ASM_X86__MACH_BIGSMP__MACH_IPI_H */ | 25 | #endif /* __ASM_SUMMIT_IPI_H */ |
diff --git a/include/asm-x86/mach-summit/irq_vectors_limits.h b/include/asm-x86/summit/irq_vectors_limits.h index 22f376ad68e1..890ce3f5e09a 100644 --- a/include/asm-x86/mach-summit/irq_vectors_limits.h +++ b/include/asm-x86/summit/irq_vectors_limits.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H | 1 | #ifndef _ASM_IRQ_VECTORS_LIMITS_H |
2 | #define ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H | 2 | #define _ASM_IRQ_VECTORS_LIMITS_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, | 5 | * For Summit or generic (i.e. installer) kernels, we have lots of I/O APICs, |
@@ -11,4 +11,4 @@ | |||
11 | #define NR_IRQS 224 | 11 | #define NR_IRQS 224 |
12 | #define NR_IRQ_VECTORS 1024 | 12 | #define NR_IRQ_VECTORS 1024 |
13 | 13 | ||
14 | #endif /* ASM_X86__MACH_SUMMIT__IRQ_VECTORS_LIMITS_H */ | 14 | #endif /* _ASM_IRQ_VECTORS_LIMITS_H */ |
diff --git a/include/asm-x86/mach-summit/mach_mpparse.h b/include/asm-x86/summit/mpparse.h index 92396f28772b..013ce6fab2d5 100644 --- a/include/asm-x86/mach-summit/mach_mpparse.h +++ b/include/asm-x86/summit/mpparse.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H | 1 | #ifndef __ASM_SUMMIT_MPPARSE_H |
2 | #define ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H | 2 | #define __ASM_SUMMIT_MPPARSE_H |
3 | 3 | ||
4 | #include <mach_apic.h> | ||
5 | #include <asm/tsc.h> | 4 | #include <asm/tsc.h> |
6 | 5 | ||
7 | extern int use_cyclone; | 6 | extern int use_cyclone; |
@@ -12,11 +11,11 @@ extern void setup_summit(void); | |||
12 | #define setup_summit() {} | 11 | #define setup_summit() {} |
13 | #endif | 12 | #endif |
14 | 13 | ||
15 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, | 14 | static inline int mps_oem_check(struct mp_config_table *mpc, char *oem, |
16 | char *productid) | 15 | char *productid) |
17 | { | 16 | { |
18 | if (!strncmp(oem, "IBM ENSW", 8) && | 17 | if (!strncmp(oem, "IBM ENSW", 8) && |
19 | (!strncmp(productid, "VIGIL SMP", 9) | 18 | (!strncmp(productid, "VIGIL SMP", 9) |
20 | || !strncmp(productid, "EXA", 3) | 19 | || !strncmp(productid, "EXA", 3) |
21 | || !strncmp(productid, "RUTHLESS SMP", 12))){ | 20 | || !strncmp(productid, "RUTHLESS SMP", 12))){ |
22 | mark_tsc_unstable("Summit based system"); | 21 | mark_tsc_unstable("Summit based system"); |
@@ -107,4 +106,4 @@ static inline int is_WPEG(struct rio_detail *rio){ | |||
107 | rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); | 106 | rio->type == LookOutAWPEG || rio->type == LookOutBWPEG); |
108 | } | 107 | } |
109 | 108 | ||
110 | #endif /* ASM_X86__MACH_SUMMIT__MACH_MPPARSE_H */ | 109 | #endif /* __ASM_SUMMIT_MPPARSE_H */ |
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h index 4db0066a3a35..3f4e52bb77f5 100644 --- a/include/asm-x86/thread_info.h +++ b/include/asm-x86/thread_info.h | |||
@@ -241,6 +241,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
241 | #define TS_POLLING 0x0004 /* true if in idle loop | 241 | #define TS_POLLING 0x0004 /* true if in idle loop |
242 | and not sleeping */ | 242 | and not sleeping */ |
243 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ | 243 | #define TS_RESTORE_SIGMASK 0x0008 /* restore signal mask in do_signal() */ |
244 | #define TS_XSAVE 0x0010 /* Use xsave/xrstor */ | ||
244 | 245 | ||
245 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) | 246 | #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING) |
246 | 247 | ||
diff --git a/include/asm-x86/ucontext.h b/include/asm-x86/ucontext.h index 9948dd328084..89eaa5456a7e 100644 --- a/include/asm-x86/ucontext.h +++ b/include/asm-x86/ucontext.h | |||
@@ -1,6 +1,12 @@ | |||
1 | #ifndef ASM_X86__UCONTEXT_H | 1 | #ifndef ASM_X86__UCONTEXT_H |
2 | #define ASM_X86__UCONTEXT_H | 2 | #define ASM_X86__UCONTEXT_H |
3 | 3 | ||
4 | #define UC_FP_XSTATE 0x1 /* indicates the presence of extended state | ||
5 | * information in the memory layout pointed | ||
6 | * by the fpstate pointer in the ucontext's | ||
7 | * sigcontext struct (uc_mcontext). | ||
8 | */ | ||
9 | |||
4 | struct ucontext { | 10 | struct ucontext { |
5 | unsigned long uc_flags; | 11 | unsigned long uc_flags; |
6 | struct ucontext *uc_link; | 12 | struct ucontext *uc_link; |
diff --git a/include/asm-x86/xcr.h b/include/asm-x86/xcr.h new file mode 100644 index 000000000000..f2cba4e79a23 --- /dev/null +++ b/include/asm-x86/xcr.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* -*- linux-c -*- ------------------------------------------------------- * | ||
2 | * | ||
3 | * Copyright 2008 rPath, Inc. - All Rights Reserved | ||
4 | * | ||
5 | * This file is part of the Linux kernel, and is made available under | ||
6 | * the terms of the GNU General Public License version 2 or (at your | ||
7 | * option) any later version; incorporated herein by reference. | ||
8 | * | ||
9 | * ----------------------------------------------------------------------- */ | ||
10 | |||
11 | /* | ||
12 | * asm-x86/xcr.h | ||
13 | * | ||
14 | * Definitions for the eXtended Control Register instructions | ||
15 | */ | ||
16 | |||
17 | #ifndef _ASM_X86_XCR_H | ||
18 | #define _ASM_X86_XCR_H | ||
19 | |||
20 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | ||
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | # ifndef __ASSEMBLY__ | ||
24 | |||
25 | #include <linux/types.h> | ||
26 | |||
27 | static inline u64 xgetbv(u32 index) | ||
28 | { | ||
29 | u32 eax, edx; | ||
30 | |||
31 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | ||
32 | : "=a" (eax), "=d" (edx) | ||
33 | : "c" (index)); | ||
34 | return eax + ((u64)edx << 32); | ||
35 | } | ||
36 | |||
37 | static inline void xsetbv(u32 index, u64 value) | ||
38 | { | ||
39 | u32 eax = value; | ||
40 | u32 edx = value >> 32; | ||
41 | |||
42 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | ||
43 | : : "a" (eax), "d" (edx), "c" (index)); | ||
44 | } | ||
45 | |||
46 | # endif /* __ASSEMBLY__ */ | ||
47 | #endif /* __KERNEL__ */ | ||
48 | |||
49 | #endif /* _ASM_X86_XCR_H */ | ||
diff --git a/include/asm-x86/xsave.h b/include/asm-x86/xsave.h new file mode 100644 index 000000000000..08e9a1ac07a9 --- /dev/null +++ b/include/asm-x86/xsave.h | |||
@@ -0,0 +1,118 @@ | |||
1 | #ifndef __ASM_X86_XSAVE_H | ||
2 | #define __ASM_X86_XSAVE_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <asm/processor.h> | ||
6 | #include <asm/i387.h> | ||
7 | |||
8 | #define XSTATE_FP 0x1 | ||
9 | #define XSTATE_SSE 0x2 | ||
10 | |||
11 | #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE) | ||
12 | |||
13 | #define FXSAVE_SIZE 512 | ||
14 | |||
15 | /* | ||
16 | * These are the features that the OS can handle currently. | ||
17 | */ | ||
18 | #define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE) | ||
19 | |||
20 | #ifdef CONFIG_X86_64 | ||
21 | #define REX_PREFIX "0x48, " | ||
22 | #else | ||
23 | #define REX_PREFIX | ||
24 | #endif | ||
25 | |||
26 | extern unsigned int xstate_size; | ||
27 | extern u64 pcntxt_mask; | ||
28 | extern struct xsave_struct *init_xstate_buf; | ||
29 | |||
30 | extern void xsave_cntxt_init(void); | ||
31 | extern void xsave_init(void); | ||
32 | extern int init_fpu(struct task_struct *child); | ||
33 | extern int check_for_xstate(struct i387_fxsave_struct __user *buf, | ||
34 | void __user *fpstate, | ||
35 | struct _fpx_sw_bytes *sw); | ||
36 | |||
37 | static inline int xrstor_checking(struct xsave_struct *fx) | ||
38 | { | ||
39 | int err; | ||
40 | |||
41 | asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | ||
42 | "2:\n" | ||
43 | ".section .fixup,\"ax\"\n" | ||
44 | "3: movl $-1,%[err]\n" | ||
45 | " jmp 2b\n" | ||
46 | ".previous\n" | ||
47 | _ASM_EXTABLE(1b, 3b) | ||
48 | : [err] "=r" (err) | ||
49 | : "D" (fx), "m" (*fx), "a" (-1), "d" (-1), "0" (0) | ||
50 | : "memory"); | ||
51 | |||
52 | return err; | ||
53 | } | ||
54 | |||
55 | static inline int xsave_user(struct xsave_struct __user *buf) | ||
56 | { | ||
57 | int err; | ||
58 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" | ||
59 | "2:\n" | ||
60 | ".section .fixup,\"ax\"\n" | ||
61 | "3: movl $-1,%[err]\n" | ||
62 | " jmp 2b\n" | ||
63 | ".previous\n" | ||
64 | ".section __ex_table,\"a\"\n" | ||
65 | _ASM_ALIGN "\n" | ||
66 | _ASM_PTR "1b,3b\n" | ||
67 | ".previous" | ||
68 | : [err] "=r" (err) | ||
69 | : "D" (buf), "a" (-1), "d" (-1), "0" (0) | ||
70 | : "memory"); | ||
71 | if (unlikely(err) && __clear_user(buf, xstate_size)) | ||
72 | err = -EFAULT; | ||
73 | /* No need to clear here because the caller clears USED_MATH */ | ||
74 | return err; | ||
75 | } | ||
76 | |||
77 | static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask) | ||
78 | { | ||
79 | int err; | ||
80 | struct xsave_struct *xstate = ((__force struct xsave_struct *)buf); | ||
81 | u32 lmask = mask; | ||
82 | u32 hmask = mask >> 32; | ||
83 | |||
84 | __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" | ||
85 | "2:\n" | ||
86 | ".section .fixup,\"ax\"\n" | ||
87 | "3: movl $-1,%[err]\n" | ||
88 | " jmp 2b\n" | ||
89 | ".previous\n" | ||
90 | ".section __ex_table,\"a\"\n" | ||
91 | _ASM_ALIGN "\n" | ||
92 | _ASM_PTR "1b,3b\n" | ||
93 | ".previous" | ||
94 | : [err] "=r" (err) | ||
95 | : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0) | ||
96 | : "memory"); /* memory required? */ | ||
97 | return err; | ||
98 | } | ||
99 | |||
100 | static inline void xrstor_state(struct xsave_struct *fx, u64 mask) | ||
101 | { | ||
102 | u32 lmask = mask; | ||
103 | u32 hmask = mask >> 32; | ||
104 | |||
105 | asm volatile(".byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" | ||
106 | : : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask) | ||
107 | : "memory"); | ||
108 | } | ||
109 | |||
110 | static inline void xsave(struct task_struct *tsk) | ||
111 | { | ||
112 | /* This, however, we can work around by forcing the compiler to select | ||
113 | an addressing mode that doesn't require extended registers. */ | ||
114 | __asm__ __volatile__(".byte " REX_PREFIX "0x0f,0xae,0x27" | ||
115 | : : "D" (&(tsk->thread.xstate->xsave)), | ||
116 | "a" (-1), "d"(-1) : "memory"); | ||
117 | } | ||
118 | #endif | ||
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 56c73b847551..c360c558e59e 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -25,9 +25,99 @@ | |||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/msi.h> | 26 | #include <linux/msi.h> |
27 | 27 | ||
28 | #ifdef CONFIG_DMAR | 28 | #if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP) |
29 | struct intel_iommu; | 29 | struct intel_iommu; |
30 | 30 | ||
31 | struct dmar_drhd_unit { | ||
32 | struct list_head list; /* list of drhd units */ | ||
33 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
34 | u64 reg_base_addr; /* register base address*/ | ||
35 | struct pci_dev **devices; /* target device array */ | ||
36 | int devices_cnt; /* target device count */ | ||
37 | u8 ignored:1; /* ignore drhd */ | ||
38 | u8 include_all:1; | ||
39 | struct intel_iommu *iommu; | ||
40 | }; | ||
41 | |||
42 | extern struct list_head dmar_drhd_units; | ||
43 | |||
44 | #define for_each_drhd_unit(drhd) \ | ||
45 | list_for_each_entry(drhd, &dmar_drhd_units, list) | ||
46 | |||
47 | extern int dmar_table_init(void); | ||
48 | extern int early_dmar_detect(void); | ||
49 | extern int dmar_dev_scope_init(void); | ||
50 | |||
51 | /* Intel IOMMU detection */ | ||
52 | extern void detect_intel_iommu(void); | ||
53 | |||
54 | |||
55 | extern int parse_ioapics_under_ir(void); | ||
56 | extern int alloc_iommu(struct dmar_drhd_unit *); | ||
57 | #else | ||
58 | static inline void detect_intel_iommu(void) | ||
59 | { | ||
60 | return; | ||
61 | } | ||
62 | |||
63 | static inline int dmar_table_init(void) | ||
64 | { | ||
65 | return -ENODEV; | ||
66 | } | ||
67 | #endif /* !CONFIG_DMAR && !CONFIG_INTR_REMAP */ | ||
68 | |||
69 | #ifdef CONFIG_INTR_REMAP | ||
70 | extern int intr_remapping_enabled; | ||
71 | extern int enable_intr_remapping(int); | ||
72 | |||
73 | struct irte { | ||
74 | union { | ||
75 | struct { | ||
76 | __u64 present : 1, | ||
77 | fpd : 1, | ||
78 | dst_mode : 1, | ||
79 | redir_hint : 1, | ||
80 | trigger_mode : 1, | ||
81 | dlvry_mode : 3, | ||
82 | avail : 4, | ||
83 | __reserved_1 : 4, | ||
84 | vector : 8, | ||
85 | __reserved_2 : 8, | ||
86 | dest_id : 32; | ||
87 | }; | ||
88 | __u64 low; | ||
89 | }; | ||
90 | |||
91 | union { | ||
92 | struct { | ||
93 | __u64 sid : 16, | ||
94 | sq : 2, | ||
95 | svt : 2, | ||
96 | __reserved_3 : 44; | ||
97 | }; | ||
98 | __u64 high; | ||
99 | }; | ||
100 | }; | ||
101 | extern int get_irte(int irq, struct irte *entry); | ||
102 | extern int modify_irte(int irq, struct irte *irte_modified); | ||
103 | extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | ||
104 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | ||
105 | u16 sub_handle); | ||
106 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); | ||
107 | extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); | ||
108 | extern int flush_irte(int irq); | ||
109 | extern int free_irte(int irq); | ||
110 | |||
111 | extern int irq_remapped(int irq); | ||
112 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | ||
113 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | ||
114 | #else | ||
115 | #define irq_remapped(irq) (0) | ||
116 | #define enable_intr_remapping(mode) (-1) | ||
117 | #define intr_remapping_enabled (0) | ||
118 | #endif | ||
119 | |||
120 | #ifdef CONFIG_DMAR | ||
31 | extern const char *dmar_get_fault_reason(u8 fault_reason); | 121 | extern const char *dmar_get_fault_reason(u8 fault_reason); |
32 | 122 | ||
33 | /* Can't use the common MSI interrupt functions | 123 | /* Can't use the common MSI interrupt functions |
@@ -40,47 +130,30 @@ extern void dmar_msi_write(int irq, struct msi_msg *msg); | |||
40 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 130 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
41 | extern int arch_setup_dmar_msi(unsigned int irq); | 131 | extern int arch_setup_dmar_msi(unsigned int irq); |
42 | 132 | ||
43 | /* Intel IOMMU detection and initialization functions */ | 133 | extern int iommu_detected, no_iommu; |
44 | extern void detect_intel_iommu(void); | ||
45 | extern int intel_iommu_init(void); | ||
46 | |||
47 | extern int dmar_table_init(void); | ||
48 | extern int early_dmar_detect(void); | ||
49 | |||
50 | extern struct list_head dmar_drhd_units; | ||
51 | extern struct list_head dmar_rmrr_units; | 134 | extern struct list_head dmar_rmrr_units; |
52 | |||
53 | struct dmar_drhd_unit { | ||
54 | struct list_head list; /* list of drhd units */ | ||
55 | u64 reg_base_addr; /* register base address*/ | ||
56 | struct pci_dev **devices; /* target device array */ | ||
57 | int devices_cnt; /* target device count */ | ||
58 | u8 ignored:1; /* ignore drhd */ | ||
59 | u8 include_all:1; | ||
60 | struct intel_iommu *iommu; | ||
61 | }; | ||
62 | |||
63 | struct dmar_rmrr_unit { | 135 | struct dmar_rmrr_unit { |
64 | struct list_head list; /* list of rmrr units */ | 136 | struct list_head list; /* list of rmrr units */ |
137 | struct acpi_dmar_header *hdr; /* ACPI header */ | ||
65 | u64 base_address; /* reserved base address*/ | 138 | u64 base_address; /* reserved base address*/ |
66 | u64 end_address; /* reserved end address */ | 139 | u64 end_address; /* reserved end address */ |
67 | struct pci_dev **devices; /* target devices */ | 140 | struct pci_dev **devices; /* target devices */ |
68 | int devices_cnt; /* target device count */ | 141 | int devices_cnt; /* target device count */ |
69 | }; | 142 | }; |
70 | 143 | ||
71 | #define for_each_drhd_unit(drhd) \ | ||
72 | list_for_each_entry(drhd, &dmar_drhd_units, list) | ||
73 | #define for_each_rmrr_units(rmrr) \ | 144 | #define for_each_rmrr_units(rmrr) \ |
74 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) | 145 | list_for_each_entry(rmrr, &dmar_rmrr_units, list) |
146 | /* Intel DMAR initialization functions */ | ||
147 | extern int intel_iommu_init(void); | ||
148 | extern int dmar_disabled; | ||
75 | #else | 149 | #else |
76 | static inline void detect_intel_iommu(void) | ||
77 | { | ||
78 | return; | ||
79 | } | ||
80 | static inline int intel_iommu_init(void) | 150 | static inline int intel_iommu_init(void) |
81 | { | 151 | { |
152 | #ifdef CONFIG_INTR_REMAP | ||
153 | return dmar_dev_scope_init(); | ||
154 | #else | ||
82 | return -ENODEV; | 155 | return -ENODEV; |
156 | #endif | ||
83 | } | 157 | } |
84 | |||
85 | #endif /* !CONFIG_DMAR */ | 158 | #endif /* !CONFIG_DMAR */ |
86 | #endif /* __DMAR_H__ */ | 159 | #endif /* __DMAR_H__ */ |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 350033e8f4e1..ee9bcc6f32b6 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -108,6 +108,9 @@ extern struct resource iomem_resource; | |||
108 | 108 | ||
109 | extern int request_resource(struct resource *root, struct resource *new); | 109 | extern int request_resource(struct resource *root, struct resource *new); |
110 | extern int release_resource(struct resource *new); | 110 | extern int release_resource(struct resource *new); |
111 | extern void reserve_region_with_split(struct resource *root, | ||
112 | resource_size_t start, resource_size_t end, | ||
113 | const char *name); | ||
111 | extern int insert_resource(struct resource *parent, struct resource *new); | 114 | extern int insert_resource(struct resource *parent, struct resource *new); |
112 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); | 115 | extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); |
113 | extern int allocate_resource(struct resource *root, struct resource *new, | 116 | extern int allocate_resource(struct resource *root, struct resource *new, |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 8ccb462ea42c..8d9411bc60f6 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -62,6 +62,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
62 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ | 62 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ |
63 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ | 63 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ |
64 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | 64 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ |
65 | #define IRQ_MOVE_PCNTXT 0x01000000 /* IRQ migration from process context */ | ||
65 | 66 | ||
66 | #ifdef CONFIG_IRQ_PER_CPU | 67 | #ifdef CONFIG_IRQ_PER_CPU |
67 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 68 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index fac3337547eb..9f2a3751873a 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -23,12 +23,19 @@ | |||
23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ | 23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ |
24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | 24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ |
25 | ____cacheline_aligned_in_smp | 25 | ____cacheline_aligned_in_smp |
26 | |||
27 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
28 | __attribute__((__section__(".data.percpu.page_aligned"))) \ | ||
29 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
26 | #else | 30 | #else |
27 | #define DEFINE_PER_CPU(type, name) \ | 31 | #define DEFINE_PER_CPU(type, name) \ |
28 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 32 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
29 | 33 | ||
30 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 34 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
31 | DEFINE_PER_CPU(type, name) | 35 | DEFINE_PER_CPU(type, name) |
36 | |||
37 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | ||
38 | DEFINE_PER_CPU(type, name) | ||
32 | #endif | 39 | #endif |
33 | 40 | ||
34 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 41 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |