diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:59:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:59:11 -0500 |
commit | 4522d58275f124105819723e24e912c8e5bf3cdd (patch) | |
tree | b92c29014fadffe049c1925676037f0092b8d112 /include | |
parent | 6cf24f031bc97cb5a7c9df3b6e73c45b628b2b28 (diff) | |
parent | 64a26a731235b59c9d73bbe82c1f896d57400d37 (diff) |
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (156 commits)
[PATCH] x86-64: Export smp_call_function_single
[PATCH] i386: Clean up smp_tune_scheduling()
[PATCH] unwinder: move .eh_frame to RODATA
[PATCH] unwinder: fully support linker generated .eh_frame_hdr section
[PATCH] x86-64: don't use set_irq_regs()
[PATCH] x86-64: check vector in setup_ioapic_dest to verify if need setup_IO_APIC_irq
[PATCH] x86-64: Make ix86 default to HIGHMEM4G instead of NOHIGHMEM
[PATCH] i386: replace kmalloc+memset with kzalloc
[PATCH] x86-64: remove remaining pc98 code
[PATCH] x86-64: remove unused variable
[PATCH] x86-64: Fix constraints in atomic_add_return()
[PATCH] x86-64: fix asm constraints in i386 atomic_add_return
[PATCH] x86-64: Correct documentation for bzImage protocol v2.05
[PATCH] x86-64: replace kmalloc+memset with kzalloc in MTRR code
[PATCH] x86-64: Fix numaq build error
[PATCH] x86-64: include/asm-x86_64/cpufeature.h isn't a userspace header
[PATCH] unwinder: Add debugging output to the Dwarf2 unwinder
[PATCH] x86-64: Clarify error message in GART code
[PATCH] x86-64: Fix interrupt race in idle callback (3rd try)
[PATCH] x86-64: Remove unwind stack pointer alignment forcing again
...
Fixed conflict in include/linux/uaccess.h manually
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
73 files changed, 1396 insertions, 371 deletions
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index e60d6f21fa62..4d4c62d11059 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -11,8 +11,8 @@ | |||
11 | 11 | ||
12 | #define RODATA \ | 12 | #define RODATA \ |
13 | . = ALIGN(4096); \ | 13 | . = ALIGN(4096); \ |
14 | __start_rodata = .; \ | ||
15 | .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ | 14 | .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ |
15 | VMLINUX_SYMBOL(__start_rodata) = .; \ | ||
16 | *(.rodata) *(.rodata.*) \ | 16 | *(.rodata) *(.rodata.*) \ |
17 | *(__vermagic) /* Kernel version magic */ \ | 17 | *(__vermagic) /* Kernel version magic */ \ |
18 | } \ | 18 | } \ |
@@ -119,17 +119,16 @@ | |||
119 | *(__ksymtab_strings) \ | 119 | *(__ksymtab_strings) \ |
120 | } \ | 120 | } \ |
121 | \ | 121 | \ |
122 | EH_FRAME \ | ||
123 | \ | ||
122 | /* Built-in module parameters. */ \ | 124 | /* Built-in module parameters. */ \ |
123 | __param : AT(ADDR(__param) - LOAD_OFFSET) { \ | 125 | __param : AT(ADDR(__param) - LOAD_OFFSET) { \ |
124 | VMLINUX_SYMBOL(__start___param) = .; \ | 126 | VMLINUX_SYMBOL(__start___param) = .; \ |
125 | *(__param) \ | 127 | *(__param) \ |
126 | VMLINUX_SYMBOL(__stop___param) = .; \ | 128 | VMLINUX_SYMBOL(__stop___param) = .; \ |
129 | VMLINUX_SYMBOL(__end_rodata) = .; \ | ||
127 | } \ | 130 | } \ |
128 | \ | 131 | \ |
129 | /* Unwind data binary search table */ \ | ||
130 | EH_FRAME_HDR \ | ||
131 | \ | ||
132 | __end_rodata = .; \ | ||
133 | . = ALIGN(4096); | 132 | . = ALIGN(4096); |
134 | 133 | ||
135 | #define SECURITY_INIT \ | 134 | #define SECURITY_INIT \ |
@@ -162,15 +161,23 @@ | |||
162 | VMLINUX_SYMBOL(__kprobes_text_end) = .; | 161 | VMLINUX_SYMBOL(__kprobes_text_end) = .; |
163 | 162 | ||
164 | #ifdef CONFIG_STACK_UNWIND | 163 | #ifdef CONFIG_STACK_UNWIND |
165 | /* Unwind data binary search table */ | 164 | #define EH_FRAME \ |
166 | #define EH_FRAME_HDR \ | 165 | /* Unwind data binary search table */ \ |
166 | . = ALIGN(8); \ | ||
167 | .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \ | 167 | .eh_frame_hdr : AT(ADDR(.eh_frame_hdr) - LOAD_OFFSET) { \ |
168 | VMLINUX_SYMBOL(__start_unwind_hdr) = .; \ | 168 | VMLINUX_SYMBOL(__start_unwind_hdr) = .; \ |
169 | *(.eh_frame_hdr) \ | 169 | *(.eh_frame_hdr) \ |
170 | VMLINUX_SYMBOL(__end_unwind_hdr) = .; \ | 170 | VMLINUX_SYMBOL(__end_unwind_hdr) = .; \ |
171 | } \ | ||
172 | /* Unwind data */ \ | ||
173 | . = ALIGN(8); \ | ||
174 | .eh_frame : AT(ADDR(.eh_frame) - LOAD_OFFSET) { \ | ||
175 | VMLINUX_SYMBOL(__start_unwind) = .; \ | ||
176 | *(.eh_frame) \ | ||
177 | VMLINUX_SYMBOL(__end_unwind) = .; \ | ||
171 | } | 178 | } |
172 | #else | 179 | #else |
173 | #define EH_FRAME_HDR | 180 | #define EH_FRAME |
174 | #endif | 181 | #endif |
175 | 182 | ||
176 | /* DWARF debug sections. | 183 | /* DWARF debug sections. |
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h index b01a7ec409ce..b8fa9557c532 100644 --- a/include/asm-i386/alternative.h +++ b/include/asm-i386/alternative.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <asm/types.h> | 6 | #include <asm/types.h> |
7 | 7 | #include <linux/stddef.h> | |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | 9 | ||
10 | struct alt_instr { | 10 | struct alt_instr { |
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {} | |||
118 | #define LOCK_PREFIX "" | 118 | #define LOCK_PREFIX "" |
119 | #endif | 119 | #endif |
120 | 120 | ||
121 | struct paravirt_patch; | ||
122 | #ifdef CONFIG_PARAVIRT | ||
123 | void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); | ||
124 | #else | ||
125 | static inline void | ||
126 | apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) | ||
127 | {} | ||
128 | #define __start_parainstructions NULL | ||
129 | #define __stop_parainstructions NULL | ||
130 | #endif | ||
131 | |||
121 | #endif /* _I386_ALTERNATIVE_H */ | 132 | #endif /* _I386_ALTERNATIVE_H */ |
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index b9529578fc37..41a44319905f 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h | |||
@@ -37,18 +37,27 @@ extern void generic_apic_probe(void); | |||
37 | /* | 37 | /* |
38 | * Basic functions accessing APICs. | 38 | * Basic functions accessing APICs. |
39 | */ | 39 | */ |
40 | #ifdef CONFIG_PARAVIRT | ||
41 | #include <asm/paravirt.h> | ||
42 | #else | ||
43 | #define apic_write native_apic_write | ||
44 | #define apic_write_atomic native_apic_write_atomic | ||
45 | #define apic_read native_apic_read | ||
46 | #endif | ||
40 | 47 | ||
41 | static __inline void apic_write(unsigned long reg, unsigned long v) | 48 | static __inline fastcall void native_apic_write(unsigned long reg, |
49 | unsigned long v) | ||
42 | { | 50 | { |
43 | *((volatile unsigned long *)(APIC_BASE+reg)) = v; | 51 | *((volatile unsigned long *)(APIC_BASE+reg)) = v; |
44 | } | 52 | } |
45 | 53 | ||
46 | static __inline void apic_write_atomic(unsigned long reg, unsigned long v) | 54 | static __inline fastcall void native_apic_write_atomic(unsigned long reg, |
55 | unsigned long v) | ||
47 | { | 56 | { |
48 | xchg((volatile unsigned long *)(APIC_BASE+reg), v); | 57 | xchg((volatile unsigned long *)(APIC_BASE+reg), v); |
49 | } | 58 | } |
50 | 59 | ||
51 | static __inline unsigned long apic_read(unsigned long reg) | 60 | static __inline fastcall unsigned long native_apic_read(unsigned long reg) |
52 | { | 61 | { |
53 | return *((volatile unsigned long *)(APIC_BASE+reg)); | 62 | return *((volatile unsigned long *)(APIC_BASE+reg)); |
54 | } | 63 | } |
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index a6c024e2506f..c57441bb2905 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h | |||
@@ -187,9 +187,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
187 | /* Modern 486+ processor */ | 187 | /* Modern 486+ processor */ |
188 | __i = i; | 188 | __i = i; |
189 | __asm__ __volatile__( | 189 | __asm__ __volatile__( |
190 | LOCK_PREFIX "xaddl %0, %1;" | 190 | LOCK_PREFIX "xaddl %0, %1" |
191 | :"=r"(i) | 191 | :"+r" (i), "+m" (v->counter) |
192 | :"m"(v->counter), "0"(i)); | 192 | : : "memory"); |
193 | return i + __i; | 193 | return i + __i; |
194 | 194 | ||
195 | #ifdef CONFIG_M386 | 195 | #ifdef CONFIG_M386 |
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h index 96b228e6e79c..8ce79a6fa891 100644 --- a/include/asm-i386/boot.h +++ b/include/asm-i386/boot.h | |||
@@ -12,4 +12,8 @@ | |||
12 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ | 12 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ |
13 | #define ASK_VGA 0xfffd /* ask for it at bootup */ | 13 | #define ASK_VGA 0xfffd /* ask for it at bootup */ |
14 | 14 | ||
15 | #endif | 15 | /* Physical address where kenrel should be loaded. */ |
16 | #define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \ | ||
17 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) | ||
18 | |||
19 | #endif /* _LINUX_BOOT_H */ | ||
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index 592ffeeda45e..38f1aebbbdb5 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/processor.h> | 21 | #include <asm/processor.h> |
22 | #include <asm/i387.h> | 22 | #include <asm/i387.h> |
23 | #include <asm/msr.h> | 23 | #include <asm/msr.h> |
24 | #include <asm/paravirt.h> | ||
24 | 25 | ||
25 | static int __init no_halt(char *s) | 26 | static int __init no_halt(char *s) |
26 | { | 27 | { |
@@ -91,6 +92,9 @@ static void __init check_fpu(void) | |||
91 | 92 | ||
92 | static void __init check_hlt(void) | 93 | static void __init check_hlt(void) |
93 | { | 94 | { |
95 | if (paravirt_enabled()) | ||
96 | return; | ||
97 | |||
94 | printk(KERN_INFO "Checking 'hlt' instruction... "); | 98 | printk(KERN_INFO "Checking 'hlt' instruction... "); |
95 | if (!boot_cpu_data.hlt_works_ok) { | 99 | if (!boot_cpu_data.hlt_works_ok) { |
96 | printk("disabled\n"); | 100 | printk("disabled\n"); |
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h index b1bc7b1b64b0..9d914e1e4aad 100644 --- a/include/asm-i386/cpu.h +++ b/include/asm-i386/cpu.h | |||
@@ -13,6 +13,9 @@ struct i386_cpu { | |||
13 | extern int arch_register_cpu(int num); | 13 | extern int arch_register_cpu(int num); |
14 | #ifdef CONFIG_HOTPLUG_CPU | 14 | #ifdef CONFIG_HOTPLUG_CPU |
15 | extern void arch_unregister_cpu(int); | 15 | extern void arch_unregister_cpu(int); |
16 | extern int enable_cpu_hotplug; | ||
17 | #else | ||
18 | #define enable_cpu_hotplug 0 | ||
16 | #endif | 19 | #endif |
17 | 20 | ||
18 | DECLARE_PER_CPU(int, cpu_state); | 21 | DECLARE_PER_CPU(int, cpu_state); |
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index d314ebb3d59e..3f92b94e0d75 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h | |||
@@ -31,7 +31,7 @@ | |||
31 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | 31 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
32 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | 32 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
33 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | 33 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ |
34 | #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ | 34 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ |
35 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | 35 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
36 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 36 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
37 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | 37 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ |
@@ -73,6 +73,8 @@ | |||
73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
74 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | 74 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ |
75 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 75 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
76 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | ||
77 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | ||
76 | 78 | ||
77 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 79 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
78 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 80 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
@@ -134,6 +136,10 @@ | |||
134 | #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) | 136 | #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) |
135 | #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) | 137 | #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) |
136 | #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) | 138 | #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) |
139 | #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) | ||
140 | #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) | ||
141 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | ||
142 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | ||
137 | 143 | ||
138 | #endif /* __ASM_I386_CPUFEATURE_H */ | 144 | #endif /* __ASM_I386_CPUFEATURE_H */ |
139 | 145 | ||
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h index 3cbbecd79016..5252ee0f6d7a 100644 --- a/include/asm-i386/current.h +++ b/include/asm-i386/current.h | |||
@@ -1,13 +1,14 @@ | |||
1 | #ifndef _I386_CURRENT_H | 1 | #ifndef _I386_CURRENT_H |
2 | #define _I386_CURRENT_H | 2 | #define _I386_CURRENT_H |
3 | 3 | ||
4 | #include <linux/thread_info.h> | 4 | #include <asm/pda.h> |
5 | #include <linux/compiler.h> | ||
5 | 6 | ||
6 | struct task_struct; | 7 | struct task_struct; |
7 | 8 | ||
8 | static __always_inline struct task_struct * get_current(void) | 9 | static __always_inline struct task_struct *get_current(void) |
9 | { | 10 | { |
10 | return current_thread_info()->task; | 11 | return read_pda(pcurrent); |
11 | } | 12 | } |
12 | 13 | ||
13 | #define current get_current() | 14 | #define current get_current() |
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h index b1c7650dc7b9..32d6678d0bbf 100644 --- a/include/asm-i386/delay.h +++ b/include/asm-i386/delay.h | |||
@@ -7,6 +7,7 @@ | |||
7 | * Delay routines calling functions in arch/i386/lib/delay.c | 7 | * Delay routines calling functions in arch/i386/lib/delay.c |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* Undefined functions to get compile-time errors */ | ||
10 | extern void __bad_udelay(void); | 11 | extern void __bad_udelay(void); |
11 | extern void __bad_ndelay(void); | 12 | extern void __bad_ndelay(void); |
12 | 13 | ||
@@ -15,13 +16,23 @@ extern void __ndelay(unsigned long nsecs); | |||
15 | extern void __const_udelay(unsigned long usecs); | 16 | extern void __const_udelay(unsigned long usecs); |
16 | extern void __delay(unsigned long loops); | 17 | extern void __delay(unsigned long loops); |
17 | 18 | ||
19 | #if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY) | ||
20 | #define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul) | ||
21 | |||
22 | #define ndelay(n) paravirt_ops.const_udelay((n) * 5ul) | ||
23 | |||
24 | #else /* !PARAVIRT || USE_REAL_TIME_DELAY */ | ||
25 | |||
26 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ | ||
18 | #define udelay(n) (__builtin_constant_p(n) ? \ | 27 | #define udelay(n) (__builtin_constant_p(n) ? \ |
19 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ | 28 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ |
20 | __udelay(n)) | 29 | __udelay(n)) |
21 | 30 | ||
31 | /* 0x5 is 2**32 / 1000000000 (rounded up) */ | ||
22 | #define ndelay(n) (__builtin_constant_p(n) ? \ | 32 | #define ndelay(n) (__builtin_constant_p(n) ? \ |
23 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | 33 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ |
24 | __ndelay(n)) | 34 | __ndelay(n)) |
35 | #endif | ||
25 | 36 | ||
26 | void use_tsc_delay(void); | 37 | void use_tsc_delay(void); |
27 | 38 | ||
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 5874ef119ffd..f398cc456448 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <asm/ldt.h> | 4 | #include <asm/ldt.h> |
5 | #include <asm/segment.h> | 5 | #include <asm/segment.h> |
6 | 6 | ||
7 | #define CPU_16BIT_STACK_SIZE 1024 | ||
8 | |||
9 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
10 | 8 | ||
11 | #include <linux/preempt.h> | 9 | #include <linux/preempt.h> |
@@ -16,8 +14,6 @@ | |||
16 | 14 | ||
17 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; | 15 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; |
18 | 16 | ||
19 | DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | ||
20 | |||
21 | struct Xgt_desc_struct { | 17 | struct Xgt_desc_struct { |
22 | unsigned short size; | 18 | unsigned short size; |
23 | unsigned long address __attribute__((packed)); | 19 | unsigned long address __attribute__((packed)); |
@@ -33,11 +29,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | |||
33 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; | 29 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; |
34 | } | 30 | } |
35 | 31 | ||
36 | /* | ||
37 | * This is the ldt that every process will get unless we need | ||
38 | * something other than this. | ||
39 | */ | ||
40 | extern struct desc_struct default_ldt[]; | ||
41 | extern struct desc_struct idt_table[]; | 32 | extern struct desc_struct idt_table[]; |
42 | extern void set_intr_gate(unsigned int irq, void * addr); | 33 | extern void set_intr_gate(unsigned int irq, void * addr); |
43 | 34 | ||
@@ -64,8 +55,10 @@ static inline void pack_gate(__u32 *a, __u32 *b, | |||
64 | #define DESCTYPE_DPL3 0x60 /* DPL-3 */ | 55 | #define DESCTYPE_DPL3 0x60 /* DPL-3 */ |
65 | #define DESCTYPE_S 0x10 /* !system */ | 56 | #define DESCTYPE_S 0x10 /* !system */ |
66 | 57 | ||
58 | #ifdef CONFIG_PARAVIRT | ||
59 | #include <asm/paravirt.h> | ||
60 | #else | ||
67 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) | 61 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) |
68 | #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) | ||
69 | 62 | ||
70 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) | 63 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) |
71 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) | 64 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) |
@@ -88,6 +81,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | |||
88 | #undef C | 81 | #undef C |
89 | } | 82 | } |
90 | 83 | ||
84 | #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
85 | #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
86 | #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
87 | |||
91 | static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) | 88 | static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) |
92 | { | 89 | { |
93 | __u32 *lp = (__u32 *)((char *)dt + entry*8); | 90 | __u32 *lp = (__u32 *)((char *)dt + entry*8); |
@@ -95,9 +92,25 @@ static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entr | |||
95 | *(lp+1) = entry_b; | 92 | *(lp+1) = entry_b; |
96 | } | 93 | } |
97 | 94 | ||
98 | #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | 95 | #define set_ldt native_set_ldt |
99 | #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | 96 | #endif /* CONFIG_PARAVIRT */ |
100 | #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | 97 | |
98 | static inline fastcall void native_set_ldt(const void *addr, | ||
99 | unsigned int entries) | ||
100 | { | ||
101 | if (likely(entries == 0)) | ||
102 | __asm__ __volatile__("lldt %w0"::"q" (0)); | ||
103 | else { | ||
104 | unsigned cpu = smp_processor_id(); | ||
105 | __u32 a, b; | ||
106 | |||
107 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
108 | entries * sizeof(struct desc_struct) - 1, | ||
109 | DESCTYPE_LDT, 0); | ||
110 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); | ||
111 | __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); | ||
112 | } | ||
113 | } | ||
101 | 114 | ||
102 | static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) | 115 | static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) |
103 | { | 116 | { |
@@ -115,14 +128,6 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo | |||
115 | write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); | 128 | write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); |
116 | } | 129 | } |
117 | 130 | ||
118 | static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries) | ||
119 | { | ||
120 | __u32 a, b; | ||
121 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
122 | entries * sizeof(struct desc_struct) - 1, | ||
123 | DESCTYPE_LDT, 0); | ||
124 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); | ||
125 | } | ||
126 | 131 | ||
127 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | 132 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) |
128 | 133 | ||
@@ -153,35 +158,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri | |||
153 | 158 | ||
154 | static inline void clear_LDT(void) | 159 | static inline void clear_LDT(void) |
155 | { | 160 | { |
156 | int cpu = get_cpu(); | 161 | set_ldt(NULL, 0); |
157 | |||
158 | set_ldt_desc(cpu, &default_ldt[0], 5); | ||
159 | load_LDT_desc(); | ||
160 | put_cpu(); | ||
161 | } | 162 | } |
162 | 163 | ||
163 | /* | 164 | /* |
164 | * load one particular LDT into the current CPU | 165 | * load one particular LDT into the current CPU |
165 | */ | 166 | */ |
166 | static inline void load_LDT_nolock(mm_context_t *pc, int cpu) | 167 | static inline void load_LDT_nolock(mm_context_t *pc) |
167 | { | 168 | { |
168 | void *segments = pc->ldt; | 169 | set_ldt(pc->ldt, pc->size); |
169 | int count = pc->size; | ||
170 | |||
171 | if (likely(!count)) { | ||
172 | segments = &default_ldt[0]; | ||
173 | count = 5; | ||
174 | } | ||
175 | |||
176 | set_ldt_desc(cpu, segments, count); | ||
177 | load_LDT_desc(); | ||
178 | } | 170 | } |
179 | 171 | ||
180 | static inline void load_LDT(mm_context_t *pc) | 172 | static inline void load_LDT(mm_context_t *pc) |
181 | { | 173 | { |
182 | int cpu = get_cpu(); | 174 | preempt_disable(); |
183 | load_LDT_nolock(pc, cpu); | 175 | load_LDT_nolock(pc); |
184 | put_cpu(); | 176 | preempt_enable(); |
185 | } | 177 | } |
186 | 178 | ||
187 | static inline unsigned long get_desc_base(unsigned long *desc) | 179 | static inline unsigned long get_desc_base(unsigned long *desc) |
@@ -193,6 +185,29 @@ static inline unsigned long get_desc_base(unsigned long *desc) | |||
193 | return base; | 185 | return base; |
194 | } | 186 | } |
195 | 187 | ||
188 | #else /* __ASSEMBLY__ */ | ||
189 | |||
190 | /* | ||
191 | * GET_DESC_BASE reads the descriptor base of the specified segment. | ||
192 | * | ||
193 | * Args: | ||
194 | * idx - descriptor index | ||
195 | * gdt - GDT pointer | ||
196 | * base - 32bit register to which the base will be written | ||
197 | * lo_w - lo word of the "base" register | ||
198 | * lo_b - lo byte of the "base" register | ||
199 | * hi_b - hi byte of the low word of the "base" register | ||
200 | * | ||
201 | * Example: | ||
202 | * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) | ||
203 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. | ||
204 | */ | ||
205 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ | ||
206 | movb idx*8+4(gdt), lo_b; \ | ||
207 | movb idx*8+7(gdt), hi_b; \ | ||
208 | shll $16, base; \ | ||
209 | movw idx*8+2(gdt), lo_w; | ||
210 | |||
196 | #endif /* !__ASSEMBLY__ */ | 211 | #endif /* !__ASSEMBLY__ */ |
197 | 212 | ||
198 | #endif | 213 | #endif |
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h index f7514fb6e8e4..395077aba583 100644 --- a/include/asm-i386/e820.h +++ b/include/asm-i386/e820.h | |||
@@ -38,6 +38,11 @@ extern struct e820map e820; | |||
38 | 38 | ||
39 | extern int e820_all_mapped(unsigned long start, unsigned long end, | 39 | extern int e820_all_mapped(unsigned long start, unsigned long end, |
40 | unsigned type); | 40 | unsigned type); |
41 | extern void find_max_pfn(void); | ||
42 | extern void register_bootmem_low_pages(unsigned long max_low_pfn); | ||
43 | extern void register_memory(void); | ||
44 | extern void limit_regions(unsigned long long size); | ||
45 | extern void print_memory_map(char *who); | ||
41 | 46 | ||
42 | #endif/*!__ASSEMBLY__*/ | 47 | #endif/*!__ASSEMBLY__*/ |
43 | 48 | ||
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 3a05436f31c0..45d21a0c95bf 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h | |||
@@ -91,7 +91,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t; | |||
91 | pr_reg[7] = regs->xds; \ | 91 | pr_reg[7] = regs->xds; \ |
92 | pr_reg[8] = regs->xes; \ | 92 | pr_reg[8] = regs->xes; \ |
93 | savesegment(fs,pr_reg[9]); \ | 93 | savesegment(fs,pr_reg[9]); \ |
94 | savesegment(gs,pr_reg[10]); \ | 94 | pr_reg[10] = regs->xgs; \ |
95 | pr_reg[11] = regs->orig_eax; \ | 95 | pr_reg[11] = regs->orig_eax; \ |
96 | pr_reg[12] = regs->eip; \ | 96 | pr_reg[12] = regs->eip; \ |
97 | pr_reg[13] = regs->xcs; \ | 97 | pr_reg[13] = regs->xcs; \ |
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h index 8ffbb0f07457..fd2be593b06e 100644 --- a/include/asm-i386/genapic.h +++ b/include/asm-i386/genapic.h | |||
@@ -122,6 +122,6 @@ struct genapic { | |||
122 | APICFUNC(phys_pkg_id) \ | 122 | APICFUNC(phys_pkg_id) \ |
123 | } | 123 | } |
124 | 124 | ||
125 | extern struct genapic *genapic; | 125 | extern struct genapic *genapic, apic_default; |
126 | 126 | ||
127 | #endif | 127 | #endif |
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h index bc1d6edae1ed..434936c732d6 100644 --- a/include/asm-i386/i387.h +++ b/include/asm-i386/i387.h | |||
@@ -76,7 +76,9 @@ static inline void __save_init_fpu( struct task_struct *tsk ) | |||
76 | 76 | ||
77 | #define __unlazy_fpu( tsk ) do { \ | 77 | #define __unlazy_fpu( tsk ) do { \ |
78 | if (task_thread_info(tsk)->status & TS_USEDFPU) \ | 78 | if (task_thread_info(tsk)->status & TS_USEDFPU) \ |
79 | save_init_fpu( tsk ); \ | 79 | save_init_fpu( tsk ); \ |
80 | else \ | ||
81 | tsk->fpu_counter = 0; \ | ||
80 | } while (0) | 82 | } while (0) |
81 | 83 | ||
82 | #define __clear_fpu( tsk ) \ | 84 | #define __clear_fpu( tsk ) \ |
@@ -118,6 +120,7 @@ static inline void save_init_fpu( struct task_struct *tsk ) | |||
118 | extern unsigned short get_fpu_cwd( struct task_struct *tsk ); | 120 | extern unsigned short get_fpu_cwd( struct task_struct *tsk ); |
119 | extern unsigned short get_fpu_swd( struct task_struct *tsk ); | 121 | extern unsigned short get_fpu_swd( struct task_struct *tsk ); |
120 | extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); | 122 | extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); |
123 | extern asmlinkage void math_state_restore(void); | ||
121 | 124 | ||
122 | /* | 125 | /* |
123 | * Signal frame handlers... | 126 | * Signal frame handlers... |
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h index 68df0dc3ab8f..86ff5e83be2f 100644 --- a/include/asm-i386/io.h +++ b/include/asm-i386/io.h | |||
@@ -256,11 +256,11 @@ static inline void flush_write_buffers(void) | |||
256 | 256 | ||
257 | #endif /* __KERNEL__ */ | 257 | #endif /* __KERNEL__ */ |
258 | 258 | ||
259 | #ifdef SLOW_IO_BY_JUMPING | 259 | #if defined(CONFIG_PARAVIRT) |
260 | #define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" | 260 | #include <asm/paravirt.h> |
261 | #else | 261 | #else |
262 | |||
262 | #define __SLOW_DOWN_IO "outb %%al,$0x80;" | 263 | #define __SLOW_DOWN_IO "outb %%al,$0x80;" |
263 | #endif | ||
264 | 264 | ||
265 | static inline void slow_down_io(void) { | 265 | static inline void slow_down_io(void) { |
266 | __asm__ __volatile__( | 266 | __asm__ __volatile__( |
@@ -271,6 +271,8 @@ static inline void slow_down_io(void) { | |||
271 | : : ); | 271 | : : ); |
272 | } | 272 | } |
273 | 273 | ||
274 | #endif | ||
275 | |||
274 | #ifdef CONFIG_X86_NUMAQ | 276 | #ifdef CONFIG_X86_NUMAQ |
275 | extern void *xquad_portio; /* Where the IO area was mapped */ | 277 | extern void *xquad_portio; /* Where the IO area was mapped */ |
276 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) | 278 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) |
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h index 331726b41128..11761cdaae19 100644 --- a/include/asm-i386/irq.h +++ b/include/asm-i386/irq.h | |||
@@ -37,8 +37,13 @@ static __inline__ int irq_canonicalize(int irq) | |||
37 | extern int irqbalance_disable(char *str); | 37 | extern int irqbalance_disable(char *str); |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | extern void quirk_intel_irqbalance(void); | ||
41 | |||
40 | #ifdef CONFIG_HOTPLUG_CPU | 42 | #ifdef CONFIG_HOTPLUG_CPU |
41 | extern void fixup_irqs(cpumask_t map); | 43 | extern void fixup_irqs(cpumask_t map); |
42 | #endif | 44 | #endif |
43 | 45 | ||
46 | void init_IRQ(void); | ||
47 | void __init native_init_IRQ(void); | ||
48 | |||
44 | #endif /* _ASM_IRQ_H */ | 49 | #endif /* _ASM_IRQ_H */ |
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h index 3dd9c0b70270..a1b3f7f594a2 100644 --- a/include/asm-i386/irq_regs.h +++ b/include/asm-i386/irq_regs.h | |||
@@ -1 +1,27 @@ | |||
1 | #include <asm-generic/irq_regs.h> | 1 | /* |
2 | * Per-cpu current frame pointer - the location of the last exception frame on | ||
3 | * the stack, stored in the PDA. | ||
4 | * | ||
5 | * Jeremy Fitzhardinge <jeremy@goop.org> | ||
6 | */ | ||
7 | #ifndef _ASM_I386_IRQ_REGS_H | ||
8 | #define _ASM_I386_IRQ_REGS_H | ||
9 | |||
10 | #include <asm/pda.h> | ||
11 | |||
12 | static inline struct pt_regs *get_irq_regs(void) | ||
13 | { | ||
14 | return read_pda(irq_regs); | ||
15 | } | ||
16 | |||
17 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | ||
18 | { | ||
19 | struct pt_regs *old_regs; | ||
20 | |||
21 | old_regs = read_pda(irq_regs); | ||
22 | write_pda(irq_regs, new_regs); | ||
23 | |||
24 | return old_regs; | ||
25 | } | ||
26 | |||
27 | #endif /* _ASM_I386_IRQ_REGS_H */ | ||
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h index e1bdb97c07fa..17b18cf4fe9d 100644 --- a/include/asm-i386/irqflags.h +++ b/include/asm-i386/irqflags.h | |||
@@ -10,6 +10,9 @@ | |||
10 | #ifndef _ASM_IRQFLAGS_H | 10 | #ifndef _ASM_IRQFLAGS_H |
11 | #define _ASM_IRQFLAGS_H | 11 | #define _ASM_IRQFLAGS_H |
12 | 12 | ||
13 | #ifdef CONFIG_PARAVIRT | ||
14 | #include <asm/paravirt.h> | ||
15 | #else | ||
13 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
14 | 17 | ||
15 | static inline unsigned long __raw_local_save_flags(void) | 18 | static inline unsigned long __raw_local_save_flags(void) |
@@ -25,9 +28,6 @@ static inline unsigned long __raw_local_save_flags(void) | |||
25 | return flags; | 28 | return flags; |
26 | } | 29 | } |
27 | 30 | ||
28 | #define raw_local_save_flags(flags) \ | ||
29 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
30 | |||
31 | static inline void raw_local_irq_restore(unsigned long flags) | 31 | static inline void raw_local_irq_restore(unsigned long flags) |
32 | { | 32 | { |
33 | __asm__ __volatile__( | 33 | __asm__ __volatile__( |
@@ -66,18 +66,6 @@ static inline void halt(void) | |||
66 | __asm__ __volatile__("hlt": : :"memory"); | 66 | __asm__ __volatile__("hlt": : :"memory"); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
70 | { | ||
71 | return !(flags & (1 << 9)); | ||
72 | } | ||
73 | |||
74 | static inline int raw_irqs_disabled(void) | ||
75 | { | ||
76 | unsigned long flags = __raw_local_save_flags(); | ||
77 | |||
78 | return raw_irqs_disabled_flags(flags); | ||
79 | } | ||
80 | |||
81 | /* | 69 | /* |
82 | * For spinlocks, etc: | 70 | * For spinlocks, etc: |
83 | */ | 71 | */ |
@@ -90,9 +78,33 @@ static inline unsigned long __raw_local_irq_save(void) | |||
90 | return flags; | 78 | return flags; |
91 | } | 79 | } |
92 | 80 | ||
81 | #else | ||
82 | #define DISABLE_INTERRUPTS(clobbers) cli | ||
83 | #define ENABLE_INTERRUPTS(clobbers) sti | ||
84 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
85 | #define INTERRUPT_RETURN iret | ||
86 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
87 | #endif /* __ASSEMBLY__ */ | ||
88 | #endif /* CONFIG_PARAVIRT */ | ||
89 | |||
90 | #ifndef __ASSEMBLY__ | ||
91 | #define raw_local_save_flags(flags) \ | ||
92 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
93 | |||
93 | #define raw_local_irq_save(flags) \ | 94 | #define raw_local_irq_save(flags) \ |
94 | do { (flags) = __raw_local_irq_save(); } while (0) | 95 | do { (flags) = __raw_local_irq_save(); } while (0) |
95 | 96 | ||
97 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
98 | { | ||
99 | return !(flags & (1 << 9)); | ||
100 | } | ||
101 | |||
102 | static inline int raw_irqs_disabled(void) | ||
103 | { | ||
104 | unsigned long flags = __raw_local_save_flags(); | ||
105 | |||
106 | return raw_irqs_disabled_flags(flags); | ||
107 | } | ||
96 | #endif /* __ASSEMBLY__ */ | 108 | #endif /* __ASSEMBLY__ */ |
97 | 109 | ||
98 | /* | 110 | /* |
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h index fb42099e7bd4..605e3ccb991b 100644 --- a/include/asm-i386/mach-default/setup_arch.h +++ b/include/asm-i386/mach-default/setup_arch.h | |||
@@ -2,4 +2,6 @@ | |||
2 | 2 | ||
3 | /* no action for generic */ | 3 | /* no action for generic */ |
4 | 4 | ||
5 | #ifndef ARCH_SETUP | ||
5 | #define ARCH_SETUP | 6 | #define ARCH_SETUP |
7 | #endif | ||
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h index 697673b555ce..a4b0aa3320e6 100644 --- a/include/asm-i386/math_emu.h +++ b/include/asm-i386/math_emu.h | |||
@@ -21,6 +21,7 @@ struct info { | |||
21 | long ___eax; | 21 | long ___eax; |
22 | long ___ds; | 22 | long ___ds; |
23 | long ___es; | 23 | long ___es; |
24 | long ___fs; | ||
24 | long ___orig_eax; | 25 | long ___orig_eax; |
25 | long ___eip; | 26 | long ___eip; |
26 | long ___cs; | 27 | long ___cs; |
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h index 62b7bf184094..68ff102d6f5e 100644 --- a/include/asm-i386/mmu_context.h +++ b/include/asm-i386/mmu_context.h | |||
@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
44 | * load the LDT, if the LDT is different: | 44 | * load the LDT, if the LDT is different: |
45 | */ | 45 | */ |
46 | if (unlikely(prev->context.ldt != next->context.ldt)) | 46 | if (unlikely(prev->context.ldt != next->context.ldt)) |
47 | load_LDT_nolock(&next->context, cpu); | 47 | load_LDT_nolock(&next->context); |
48 | } | 48 | } |
49 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
50 | else { | 50 | else { |
@@ -56,14 +56,14 @@ static inline void switch_mm(struct mm_struct *prev, | |||
56 | * tlb flush IPI delivery. We must reload %cr3. | 56 | * tlb flush IPI delivery. We must reload %cr3. |
57 | */ | 57 | */ |
58 | load_cr3(next->pgd); | 58 | load_cr3(next->pgd); |
59 | load_LDT_nolock(&next->context, cpu); | 59 | load_LDT_nolock(&next->context); |
60 | } | 60 | } |
61 | } | 61 | } |
62 | #endif | 62 | #endif |
63 | } | 63 | } |
64 | 64 | ||
65 | #define deactivate_mm(tsk, mm) \ | 65 | #define deactivate_mm(tsk, mm) \ |
66 | asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) | 66 | asm("movl %0,%%fs": :"r" (0)); |
67 | 67 | ||
68 | #define activate_mm(prev, next) \ | 68 | #define activate_mm(prev, next) \ |
69 | switch_mm((prev),(next),NULL) | 69 | switch_mm((prev),(next),NULL) |
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h index 424661d25bd3..02f8f541cbe0 100644 --- a/include/asm-i386/module.h +++ b/include/asm-i386/module.h | |||
@@ -20,6 +20,8 @@ struct mod_arch_specific | |||
20 | #define MODULE_PROC_FAMILY "586TSC " | 20 | #define MODULE_PROC_FAMILY "586TSC " |
21 | #elif defined CONFIG_M586MMX | 21 | #elif defined CONFIG_M586MMX |
22 | #define MODULE_PROC_FAMILY "586MMX " | 22 | #define MODULE_PROC_FAMILY "586MMX " |
23 | #elif defined CONFIG_MCORE2 | ||
24 | #define MODULE_PROC_FAMILY "CORE2 " | ||
23 | #elif defined CONFIG_M686 | 25 | #elif defined CONFIG_M686 |
24 | #define MODULE_PROC_FAMILY "686 " | 26 | #define MODULE_PROC_FAMILY "686 " |
25 | #elif defined CONFIG_MPENTIUMII | 27 | #elif defined CONFIG_MPENTIUMII |
@@ -60,18 +62,12 @@ struct mod_arch_specific | |||
60 | #error unknown processor family | 62 | #error unknown processor family |
61 | #endif | 63 | #endif |
62 | 64 | ||
63 | #ifdef CONFIG_REGPARM | ||
64 | #define MODULE_REGPARM "REGPARM " | ||
65 | #else | ||
66 | #define MODULE_REGPARM "" | ||
67 | #endif | ||
68 | |||
69 | #ifdef CONFIG_4KSTACKS | 65 | #ifdef CONFIG_4KSTACKS |
70 | #define MODULE_STACKSIZE "4KSTACKS " | 66 | #define MODULE_STACKSIZE "4KSTACKS " |
71 | #else | 67 | #else |
72 | #define MODULE_STACKSIZE "" | 68 | #define MODULE_STACKSIZE "" |
73 | #endif | 69 | #endif |
74 | 70 | ||
75 | #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE | 71 | #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE |
76 | 72 | ||
77 | #endif /* _ASM_I386_MODULE_H */ | 73 | #endif /* _ASM_I386_MODULE_H */ |
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h index 76feedf85a8a..13bafb16e7af 100644 --- a/include/asm-i386/mpspec_def.h +++ b/include/asm-i386/mpspec_def.h | |||
@@ -97,7 +97,6 @@ struct mpc_config_bus | |||
97 | #define BUSTYPE_TC "TC" | 97 | #define BUSTYPE_TC "TC" |
98 | #define BUSTYPE_VME "VME" | 98 | #define BUSTYPE_VME "VME" |
99 | #define BUSTYPE_XPRESS "XPRESS" | 99 | #define BUSTYPE_XPRESS "XPRESS" |
100 | #define BUSTYPE_NEC98 "NEC98" | ||
101 | 100 | ||
102 | struct mpc_config_ioapic | 101 | struct mpc_config_ioapic |
103 | { | 102 | { |
@@ -182,7 +181,6 @@ enum mp_bustype { | |||
182 | MP_BUS_EISA, | 181 | MP_BUS_EISA, |
183 | MP_BUS_PCI, | 182 | MP_BUS_PCI, |
184 | MP_BUS_MCA, | 183 | MP_BUS_MCA, |
185 | MP_BUS_NEC98 | ||
186 | }; | 184 | }; |
187 | #endif | 185 | #endif |
188 | 186 | ||
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h index 62b76cd96957..5679d4993072 100644 --- a/include/asm-i386/msr.h +++ b/include/asm-i386/msr.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef __ASM_MSR_H | 1 | #ifndef __ASM_MSR_H |
2 | #define __ASM_MSR_H | 2 | #define __ASM_MSR_H |
3 | 3 | ||
4 | #ifdef CONFIG_PARAVIRT | ||
5 | #include <asm/paravirt.h> | ||
6 | #else | ||
7 | |||
4 | /* | 8 | /* |
5 | * Access to machine-specific registers (available on 586 and better only) | 9 | * Access to machine-specific registers (available on 586 and better only) |
6 | * Note: the rd* operations modify the parameters directly (without using | 10 | * Note: the rd* operations modify the parameters directly (without using |
@@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val) | |||
77 | __asm__ __volatile__("rdpmc" \ | 81 | __asm__ __volatile__("rdpmc" \ |
78 | : "=a" (low), "=d" (high) \ | 82 | : "=a" (low), "=d" (high) \ |
79 | : "c" (counter)) | 83 | : "c" (counter)) |
84 | #endif /* !CONFIG_PARAVIRT */ | ||
80 | 85 | ||
81 | /* symbolic names for some interesting MSRs */ | 86 | /* symbolic names for some interesting MSRs */ |
82 | /* Intel defined MSRs. */ | 87 | /* Intel defined MSRs. */ |
@@ -141,6 +146,10 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val) | |||
141 | #define MSR_IA32_MC0_ADDR 0x402 | 146 | #define MSR_IA32_MC0_ADDR 0x402 |
142 | #define MSR_IA32_MC0_MISC 0x403 | 147 | #define MSR_IA32_MC0_MISC 0x403 |
143 | 148 | ||
149 | #define MSR_IA32_PEBS_ENABLE 0x3f1 | ||
150 | #define MSR_IA32_DS_AREA 0x600 | ||
151 | #define MSR_IA32_PERF_CAPABILITIES 0x345 | ||
152 | |||
144 | /* Pentium IV performance counter MSRs */ | 153 | /* Pentium IV performance counter MSRs */ |
145 | #define MSR_P4_BPU_PERFCTR0 0x300 | 154 | #define MSR_P4_BPU_PERFCTR0 0x300 |
146 | #define MSR_P4_BPU_PERFCTR1 0x301 | 155 | #define MSR_P4_BPU_PERFCTR1 0x301 |
@@ -284,4 +293,13 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val) | |||
284 | #define MSR_TMTA_LRTI_READOUT 0x80868018 | 293 | #define MSR_TMTA_LRTI_READOUT 0x80868018 |
285 | #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a | 294 | #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a |
286 | 295 | ||
296 | /* Intel Core-based CPU performance counters */ | ||
297 | #define MSR_CORE_PERF_FIXED_CTR0 0x309 | ||
298 | #define MSR_CORE_PERF_FIXED_CTR1 0x30a | ||
299 | #define MSR_CORE_PERF_FIXED_CTR2 0x30b | ||
300 | #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d | ||
301 | #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e | ||
302 | #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f | ||
303 | #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 | ||
304 | |||
287 | #endif /* __ASM_MSR_H */ | 305 | #endif /* __ASM_MSR_H */ |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 269d315719ca..b04333ea6f31 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -5,6 +5,9 @@ | |||
5 | #define ASM_NMI_H | 5 | #define ASM_NMI_H |
6 | 6 | ||
7 | #include <linux/pm.h> | 7 | #include <linux/pm.h> |
8 | #include <asm/irq.h> | ||
9 | |||
10 | #ifdef ARCH_HAS_NMI_WATCHDOG | ||
8 | 11 | ||
9 | /** | 12 | /** |
10 | * do_nmi_callback | 13 | * do_nmi_callback |
@@ -42,4 +45,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | |||
42 | void __user *, size_t *, loff_t *); | 45 | void __user *, size_t *, loff_t *); |
43 | extern int unknown_nmi_panic; | 46 | extern int unknown_nmi_panic; |
44 | 47 | ||
48 | void __trigger_all_cpu_backtrace(void); | ||
49 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
50 | |||
51 | #endif | ||
52 | |||
45 | #endif /* ASM_NMI_H */ | 53 | #endif /* ASM_NMI_H */ |
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index f5bf544c729a..fd3f64ace248 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h | |||
@@ -52,6 +52,7 @@ typedef struct { unsigned long long pgprot; } pgprot_t; | |||
52 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | 52 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) |
53 | #define __pmd(x) ((pmd_t) { (x) } ) | 53 | #define __pmd(x) ((pmd_t) { (x) } ) |
54 | #define HPAGE_SHIFT 21 | 54 | #define HPAGE_SHIFT 21 |
55 | #include <asm-generic/pgtable-nopud.h> | ||
55 | #else | 56 | #else |
56 | typedef struct { unsigned long pte_low; } pte_t; | 57 | typedef struct { unsigned long pte_low; } pte_t; |
57 | typedef struct { unsigned long pgd; } pgd_t; | 58 | typedef struct { unsigned long pgd; } pgd_t; |
@@ -59,6 +60,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
59 | #define boot_pte_t pte_t /* or would you rather have a typedef */ | 60 | #define boot_pte_t pte_t /* or would you rather have a typedef */ |
60 | #define pte_val(x) ((x).pte_low) | 61 | #define pte_val(x) ((x).pte_low) |
61 | #define HPAGE_SHIFT 22 | 62 | #define HPAGE_SHIFT 22 |
63 | #include <asm-generic/pgtable-nopmd.h> | ||
62 | #endif | 64 | #endif |
63 | #define PTE_MASK PAGE_MASK | 65 | #define PTE_MASK PAGE_MASK |
64 | 66 | ||
@@ -112,18 +114,18 @@ extern int page_is_ram(unsigned long pagenr); | |||
112 | 114 | ||
113 | #ifdef __ASSEMBLY__ | 115 | #ifdef __ASSEMBLY__ |
114 | #define __PAGE_OFFSET CONFIG_PAGE_OFFSET | 116 | #define __PAGE_OFFSET CONFIG_PAGE_OFFSET |
115 | #define __PHYSICAL_START CONFIG_PHYSICAL_START | ||
116 | #else | 117 | #else |
117 | #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) | 118 | #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) |
118 | #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) | ||
119 | #endif | 119 | #endif |
120 | #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) | ||
121 | 120 | ||
122 | 121 | ||
123 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | 122 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
124 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) | 123 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) |
125 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) | 124 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) |
126 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | 125 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) |
126 | /* __pa_symbol should be used for C visible symbols. | ||
127 | This seems to be the official gcc blessed way to do such arithmetic. */ | ||
128 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) | ||
127 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 129 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
128 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 130 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
129 | #ifdef CONFIG_FLATMEM | 131 | #ifdef CONFIG_FLATMEM |
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h new file mode 100644 index 000000000000..9f06265065f4 --- /dev/null +++ b/include/asm-i386/paravirt.h | |||
@@ -0,0 +1,505 @@ | |||
1 | #ifndef __ASM_PARAVIRT_H | ||
2 | #define __ASM_PARAVIRT_H | ||
3 | /* Various instructions on x86 need to be replaced for | ||
4 | * para-virtualization: those hooks are defined here. */ | ||
5 | #include <linux/linkage.h> | ||
6 | #include <linux/stringify.h> | ||
7 | #include <asm/page.h> | ||
8 | |||
9 | #ifdef CONFIG_PARAVIRT | ||
10 | /* These are the most performance critical ops, so we want to be able to patch | ||
11 | * callers */ | ||
12 | #define PARAVIRT_IRQ_DISABLE 0 | ||
13 | #define PARAVIRT_IRQ_ENABLE 1 | ||
14 | #define PARAVIRT_RESTORE_FLAGS 2 | ||
15 | #define PARAVIRT_SAVE_FLAGS 3 | ||
16 | #define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4 | ||
17 | #define PARAVIRT_INTERRUPT_RETURN 5 | ||
18 | #define PARAVIRT_STI_SYSEXIT 6 | ||
19 | |||
20 | /* Bitmask of what can be clobbered: usually at least eax. */ | ||
21 | #define CLBR_NONE 0x0 | ||
22 | #define CLBR_EAX 0x1 | ||
23 | #define CLBR_ECX 0x2 | ||
24 | #define CLBR_EDX 0x4 | ||
25 | #define CLBR_ANY 0x7 | ||
26 | |||
27 | #ifndef __ASSEMBLY__ | ||
28 | struct thread_struct; | ||
29 | struct Xgt_desc_struct; | ||
30 | struct tss_struct; | ||
31 | struct mm_struct; | ||
32 | struct paravirt_ops | ||
33 | { | ||
34 | unsigned int kernel_rpl; | ||
35 | int paravirt_enabled; | ||
36 | const char *name; | ||
37 | |||
38 | /* | ||
39 | * Patch may replace one of the defined code sequences with arbitrary | ||
40 | * code, subject to the same register constraints. This generally | ||
41 | * means the code is not free to clobber any registers other than EAX. | ||
42 | * The patch function should return the number of bytes of code | ||
43 | * generated, as we nop pad the rest in generic code. | ||
44 | */ | ||
45 | unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len); | ||
46 | |||
47 | void (*arch_setup)(void); | ||
48 | char *(*memory_setup)(void); | ||
49 | void (*init_IRQ)(void); | ||
50 | |||
51 | void (*banner)(void); | ||
52 | |||
53 | unsigned long (*get_wallclock)(void); | ||
54 | int (*set_wallclock)(unsigned long); | ||
55 | void (*time_init)(void); | ||
56 | |||
57 | /* All the function pointers here are declared as "fastcall" | ||
58 | so that we get a specific register-based calling | ||
59 | convention. This makes it easier to implement inline | ||
60 | assembler replacements. */ | ||
61 | |||
62 | void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx, | ||
63 | unsigned int *ecx, unsigned int *edx); | ||
64 | |||
65 | unsigned long (fastcall *get_debugreg)(int regno); | ||
66 | void (fastcall *set_debugreg)(int regno, unsigned long value); | ||
67 | |||
68 | void (fastcall *clts)(void); | ||
69 | |||
70 | unsigned long (fastcall *read_cr0)(void); | ||
71 | void (fastcall *write_cr0)(unsigned long); | ||
72 | |||
73 | unsigned long (fastcall *read_cr2)(void); | ||
74 | void (fastcall *write_cr2)(unsigned long); | ||
75 | |||
76 | unsigned long (fastcall *read_cr3)(void); | ||
77 | void (fastcall *write_cr3)(unsigned long); | ||
78 | |||
79 | unsigned long (fastcall *read_cr4_safe)(void); | ||
80 | unsigned long (fastcall *read_cr4)(void); | ||
81 | void (fastcall *write_cr4)(unsigned long); | ||
82 | |||
83 | unsigned long (fastcall *save_fl)(void); | ||
84 | void (fastcall *restore_fl)(unsigned long); | ||
85 | void (fastcall *irq_disable)(void); | ||
86 | void (fastcall *irq_enable)(void); | ||
87 | void (fastcall *safe_halt)(void); | ||
88 | void (fastcall *halt)(void); | ||
89 | void (fastcall *wbinvd)(void); | ||
90 | |||
91 | /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | ||
92 | u64 (fastcall *read_msr)(unsigned int msr, int *err); | ||
93 | int (fastcall *write_msr)(unsigned int msr, u64 val); | ||
94 | |||
95 | u64 (fastcall *read_tsc)(void); | ||
96 | u64 (fastcall *read_pmc)(void); | ||
97 | |||
98 | void (fastcall *load_tr_desc)(void); | ||
99 | void (fastcall *load_gdt)(const struct Xgt_desc_struct *); | ||
100 | void (fastcall *load_idt)(const struct Xgt_desc_struct *); | ||
101 | void (fastcall *store_gdt)(struct Xgt_desc_struct *); | ||
102 | void (fastcall *store_idt)(struct Xgt_desc_struct *); | ||
103 | void (fastcall *set_ldt)(const void *desc, unsigned entries); | ||
104 | unsigned long (fastcall *store_tr)(void); | ||
105 | void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu); | ||
106 | void (fastcall *write_ldt_entry)(void *dt, int entrynum, | ||
107 | u32 low, u32 high); | ||
108 | void (fastcall *write_gdt_entry)(void *dt, int entrynum, | ||
109 | u32 low, u32 high); | ||
110 | void (fastcall *write_idt_entry)(void *dt, int entrynum, | ||
111 | u32 low, u32 high); | ||
112 | void (fastcall *load_esp0)(struct tss_struct *tss, | ||
113 | struct thread_struct *thread); | ||
114 | |||
115 | void (fastcall *set_iopl_mask)(unsigned mask); | ||
116 | |||
117 | void (fastcall *io_delay)(void); | ||
118 | void (*const_udelay)(unsigned long loops); | ||
119 | |||
120 | #ifdef CONFIG_X86_LOCAL_APIC | ||
121 | void (fastcall *apic_write)(unsigned long reg, unsigned long v); | ||
122 | void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v); | ||
123 | unsigned long (fastcall *apic_read)(unsigned long reg); | ||
124 | #endif | ||
125 | |||
126 | void (fastcall *flush_tlb_user)(void); | ||
127 | void (fastcall *flush_tlb_kernel)(void); | ||
128 | void (fastcall *flush_tlb_single)(u32 addr); | ||
129 | |||
130 | void (fastcall *set_pte)(pte_t *ptep, pte_t pteval); | ||
131 | void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval); | ||
132 | void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval); | ||
133 | void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep); | ||
134 | void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep); | ||
135 | #ifdef CONFIG_X86_PAE | ||
136 | void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval); | ||
137 | void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); | ||
138 | void (fastcall *set_pud)(pud_t *pudp, pud_t pudval); | ||
139 | void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
140 | void (fastcall *pmd_clear)(pmd_t *pmdp); | ||
141 | #endif | ||
142 | |||
143 | /* These two are jmp to, not actually called. */ | ||
144 | void (fastcall *irq_enable_sysexit)(void); | ||
145 | void (fastcall *iret)(void); | ||
146 | }; | ||
147 | |||
148 | /* Mark a paravirt probe function. */ | ||
149 | #define paravirt_probe(fn) \ | ||
150 | static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \ | ||
151 | __attribute__((__section__(".paravirtprobe"))) = fn | ||
152 | |||
153 | extern struct paravirt_ops paravirt_ops; | ||
154 | |||
155 | #define paravirt_enabled() (paravirt_ops.paravirt_enabled) | ||
156 | |||
157 | static inline void load_esp0(struct tss_struct *tss, | ||
158 | struct thread_struct *thread) | ||
159 | { | ||
160 | paravirt_ops.load_esp0(tss, thread); | ||
161 | } | ||
162 | |||
163 | #define ARCH_SETUP paravirt_ops.arch_setup(); | ||
164 | static inline unsigned long get_wallclock(void) | ||
165 | { | ||
166 | return paravirt_ops.get_wallclock(); | ||
167 | } | ||
168 | |||
169 | static inline int set_wallclock(unsigned long nowtime) | ||
170 | { | ||
171 | return paravirt_ops.set_wallclock(nowtime); | ||
172 | } | ||
173 | |||
174 | static inline void do_time_init(void) | ||
175 | { | ||
176 | return paravirt_ops.time_init(); | ||
177 | } | ||
178 | |||
179 | /* The paravirtualized CPUID instruction. */ | ||
180 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | ||
181 | unsigned int *ecx, unsigned int *edx) | ||
182 | { | ||
183 | paravirt_ops.cpuid(eax, ebx, ecx, edx); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * These special macros can be used to get or set a debugging register | ||
188 | */ | ||
189 | #define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg) | ||
190 | #define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val) | ||
191 | |||
192 | #define clts() paravirt_ops.clts() | ||
193 | |||
194 | #define read_cr0() paravirt_ops.read_cr0() | ||
195 | #define write_cr0(x) paravirt_ops.write_cr0(x) | ||
196 | |||
197 | #define read_cr2() paravirt_ops.read_cr2() | ||
198 | #define write_cr2(x) paravirt_ops.write_cr2(x) | ||
199 | |||
200 | #define read_cr3() paravirt_ops.read_cr3() | ||
201 | #define write_cr3(x) paravirt_ops.write_cr3(x) | ||
202 | |||
203 | #define read_cr4() paravirt_ops.read_cr4() | ||
204 | #define read_cr4_safe(x) paravirt_ops.read_cr4_safe() | ||
205 | #define write_cr4(x) paravirt_ops.write_cr4(x) | ||
206 | |||
207 | static inline void raw_safe_halt(void) | ||
208 | { | ||
209 | paravirt_ops.safe_halt(); | ||
210 | } | ||
211 | |||
212 | static inline void halt(void) | ||
213 | { | ||
214 | paravirt_ops.safe_halt(); | ||
215 | } | ||
216 | #define wbinvd() paravirt_ops.wbinvd() | ||
217 | |||
218 | #define get_kernel_rpl() (paravirt_ops.kernel_rpl) | ||
219 | |||
220 | #define rdmsr(msr,val1,val2) do { \ | ||
221 | int _err; \ | ||
222 | u64 _l = paravirt_ops.read_msr(msr,&_err); \ | ||
223 | val1 = (u32)_l; \ | ||
224 | val2 = _l >> 32; \ | ||
225 | } while(0) | ||
226 | |||
227 | #define wrmsr(msr,val1,val2) do { \ | ||
228 | u64 _l = ((u64)(val2) << 32) | (val1); \ | ||
229 | paravirt_ops.write_msr((msr), _l); \ | ||
230 | } while(0) | ||
231 | |||
232 | #define rdmsrl(msr,val) do { \ | ||
233 | int _err; \ | ||
234 | val = paravirt_ops.read_msr((msr),&_err); \ | ||
235 | } while(0) | ||
236 | |||
237 | #define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val))) | ||
238 | #define wrmsr_safe(msr,a,b) ({ \ | ||
239 | u64 _l = ((u64)(b) << 32) | (a); \ | ||
240 | paravirt_ops.write_msr((msr),_l); \ | ||
241 | }) | ||
242 | |||
243 | /* rdmsr with exception handling */ | ||
244 | #define rdmsr_safe(msr,a,b) ({ \ | ||
245 | int _err; \ | ||
246 | u64 _l = paravirt_ops.read_msr(msr,&_err); \ | ||
247 | (*a) = (u32)_l; \ | ||
248 | (*b) = _l >> 32; \ | ||
249 | _err; }) | ||
250 | |||
251 | #define rdtsc(low,high) do { \ | ||
252 | u64 _l = paravirt_ops.read_tsc(); \ | ||
253 | low = (u32)_l; \ | ||
254 | high = _l >> 32; \ | ||
255 | } while(0) | ||
256 | |||
257 | #define rdtscl(low) do { \ | ||
258 | u64 _l = paravirt_ops.read_tsc(); \ | ||
259 | low = (int)_l; \ | ||
260 | } while(0) | ||
261 | |||
262 | #define rdtscll(val) (val = paravirt_ops.read_tsc()) | ||
263 | |||
264 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | ||
265 | |||
266 | #define rdpmc(counter,low,high) do { \ | ||
267 | u64 _l = paravirt_ops.read_pmc(); \ | ||
268 | low = (u32)_l; \ | ||
269 | high = _l >> 32; \ | ||
270 | } while(0) | ||
271 | |||
272 | #define load_TR_desc() (paravirt_ops.load_tr_desc()) | ||
273 | #define load_gdt(dtr) (paravirt_ops.load_gdt(dtr)) | ||
274 | #define load_idt(dtr) (paravirt_ops.load_idt(dtr)) | ||
275 | #define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries))) | ||
276 | #define store_gdt(dtr) (paravirt_ops.store_gdt(dtr)) | ||
277 | #define store_idt(dtr) (paravirt_ops.store_idt(dtr)) | ||
278 | #define store_tr(tr) ((tr) = paravirt_ops.store_tr()) | ||
279 | #define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu))) | ||
280 | #define write_ldt_entry(dt, entry, low, high) \ | ||
281 | (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high))) | ||
282 | #define write_gdt_entry(dt, entry, low, high) \ | ||
283 | (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high))) | ||
284 | #define write_idt_entry(dt, entry, low, high) \ | ||
285 | (paravirt_ops.write_idt_entry((dt), (entry), (low), (high))) | ||
286 | #define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask)) | ||
287 | |||
288 | /* The paravirtualized I/O functions */ | ||
289 | static inline void slow_down_io(void) { | ||
290 | paravirt_ops.io_delay(); | ||
291 | #ifdef REALLY_SLOW_IO | ||
292 | paravirt_ops.io_delay(); | ||
293 | paravirt_ops.io_delay(); | ||
294 | paravirt_ops.io_delay(); | ||
295 | #endif | ||
296 | } | ||
297 | |||
298 | #ifdef CONFIG_X86_LOCAL_APIC | ||
299 | /* | ||
300 | * Basic functions accessing APICs. | ||
301 | */ | ||
302 | static inline void apic_write(unsigned long reg, unsigned long v) | ||
303 | { | ||
304 | paravirt_ops.apic_write(reg,v); | ||
305 | } | ||
306 | |||
307 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) | ||
308 | { | ||
309 | paravirt_ops.apic_write_atomic(reg,v); | ||
310 | } | ||
311 | |||
312 | static inline unsigned long apic_read(unsigned long reg) | ||
313 | { | ||
314 | return paravirt_ops.apic_read(reg); | ||
315 | } | ||
316 | #endif | ||
317 | |||
318 | |||
319 | #define __flush_tlb() paravirt_ops.flush_tlb_user() | ||
320 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() | ||
321 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) | ||
322 | |||
323 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
324 | { | ||
325 | paravirt_ops.set_pte(ptep, pteval); | ||
326 | } | ||
327 | |||
328 | static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval) | ||
329 | { | ||
330 | paravirt_ops.set_pte_at(mm, addr, ptep, pteval); | ||
331 | } | ||
332 | |||
333 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
334 | { | ||
335 | paravirt_ops.set_pmd(pmdp, pmdval); | ||
336 | } | ||
337 | |||
338 | static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep) | ||
339 | { | ||
340 | paravirt_ops.pte_update(mm, addr, ptep); | ||
341 | } | ||
342 | |||
343 | static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep) | ||
344 | { | ||
345 | paravirt_ops.pte_update_defer(mm, addr, ptep); | ||
346 | } | ||
347 | |||
348 | #ifdef CONFIG_X86_PAE | ||
349 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) | ||
350 | { | ||
351 | paravirt_ops.set_pte_atomic(ptep, pteval); | ||
352 | } | ||
353 | |||
354 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
355 | { | ||
356 | paravirt_ops.set_pte_present(mm, addr, ptep, pte); | ||
357 | } | ||
358 | |||
359 | static inline void set_pud(pud_t *pudp, pud_t pudval) | ||
360 | { | ||
361 | paravirt_ops.set_pud(pudp, pudval); | ||
362 | } | ||
363 | |||
364 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
365 | { | ||
366 | paravirt_ops.pte_clear(mm, addr, ptep); | ||
367 | } | ||
368 | |||
369 | static inline void pmd_clear(pmd_t *pmdp) | ||
370 | { | ||
371 | paravirt_ops.pmd_clear(pmdp); | ||
372 | } | ||
373 | #endif | ||
374 | |||
375 | /* These all sit in the .parainstructions section to tell us what to patch. */ | ||
376 | struct paravirt_patch { | ||
377 | u8 *instr; /* original instructions */ | ||
378 | u8 instrtype; /* type of this instruction */ | ||
379 | u8 len; /* length of original instruction */ | ||
380 | u16 clobbers; /* what registers you may clobber */ | ||
381 | }; | ||
382 | |||
383 | #define paravirt_alt(insn_string, typenum, clobber) \ | ||
384 | "771:\n\t" insn_string "\n" "772:\n" \ | ||
385 | ".pushsection .parainstructions,\"a\"\n" \ | ||
386 | " .long 771b\n" \ | ||
387 | " .byte " __stringify(typenum) "\n" \ | ||
388 | " .byte 772b-771b\n" \ | ||
389 | " .short " __stringify(clobber) "\n" \ | ||
390 | ".popsection" | ||
391 | |||
392 | static inline unsigned long __raw_local_save_flags(void) | ||
393 | { | ||
394 | unsigned long f; | ||
395 | |||
396 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
397 | "call *%1;" | ||
398 | "popl %%edx; popl %%ecx", | ||
399 | PARAVIRT_SAVE_FLAGS, CLBR_NONE) | ||
400 | : "=a"(f): "m"(paravirt_ops.save_fl) | ||
401 | : "memory", "cc"); | ||
402 | return f; | ||
403 | } | ||
404 | |||
405 | static inline void raw_local_irq_restore(unsigned long f) | ||
406 | { | ||
407 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
408 | "call *%1;" | ||
409 | "popl %%edx; popl %%ecx", | ||
410 | PARAVIRT_RESTORE_FLAGS, CLBR_EAX) | ||
411 | : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f) | ||
412 | : "memory", "cc"); | ||
413 | } | ||
414 | |||
415 | static inline void raw_local_irq_disable(void) | ||
416 | { | ||
417 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
418 | "call *%0;" | ||
419 | "popl %%edx; popl %%ecx", | ||
420 | PARAVIRT_IRQ_DISABLE, CLBR_EAX) | ||
421 | : : "m" (paravirt_ops.irq_disable) | ||
422 | : "memory", "eax", "cc"); | ||
423 | } | ||
424 | |||
425 | static inline void raw_local_irq_enable(void) | ||
426 | { | ||
427 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
428 | "call *%0;" | ||
429 | "popl %%edx; popl %%ecx", | ||
430 | PARAVIRT_IRQ_ENABLE, CLBR_EAX) | ||
431 | : : "m" (paravirt_ops.irq_enable) | ||
432 | : "memory", "eax", "cc"); | ||
433 | } | ||
434 | |||
435 | static inline unsigned long __raw_local_irq_save(void) | ||
436 | { | ||
437 | unsigned long f; | ||
438 | |||
439 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
440 | "call *%1; pushl %%eax;" | ||
441 | "call *%2; popl %%eax;" | ||
442 | "popl %%edx; popl %%ecx", | ||
443 | PARAVIRT_SAVE_FLAGS_IRQ_DISABLE, | ||
444 | CLBR_NONE) | ||
445 | : "=a"(f) | ||
446 | : "m" (paravirt_ops.save_fl), | ||
447 | "m" (paravirt_ops.irq_disable) | ||
448 | : "memory", "cc"); | ||
449 | return f; | ||
450 | } | ||
451 | |||
452 | #define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
453 | "call *paravirt_ops+%c[irq_disable];" \ | ||
454 | "popl %%edx; popl %%ecx", \ | ||
455 | PARAVIRT_IRQ_DISABLE, CLBR_EAX) | ||
456 | |||
457 | #define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
458 | "call *paravirt_ops+%c[irq_enable];" \ | ||
459 | "popl %%edx; popl %%ecx", \ | ||
460 | PARAVIRT_IRQ_ENABLE, CLBR_EAX) | ||
461 | #define CLI_STI_CLOBBERS , "%eax" | ||
462 | #define CLI_STI_INPUT_ARGS \ | ||
463 | , \ | ||
464 | [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \ | ||
465 | [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable)) | ||
466 | |||
467 | #else /* __ASSEMBLY__ */ | ||
468 | |||
469 | #define PARA_PATCH(ptype, clobbers, ops) \ | ||
470 | 771:; \ | ||
471 | ops; \ | ||
472 | 772:; \ | ||
473 | .pushsection .parainstructions,"a"; \ | ||
474 | .long 771b; \ | ||
475 | .byte ptype; \ | ||
476 | .byte 772b-771b; \ | ||
477 | .short clobbers; \ | ||
478 | .popsection | ||
479 | |||
480 | #define INTERRUPT_RETURN \ | ||
481 | PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \ | ||
482 | jmp *%cs:paravirt_ops+PARAVIRT_iret) | ||
483 | |||
484 | #define DISABLE_INTERRUPTS(clobbers) \ | ||
485 | PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \ | ||
486 | pushl %ecx; pushl %edx; \ | ||
487 | call *paravirt_ops+PARAVIRT_irq_disable; \ | ||
488 | popl %edx; popl %ecx) \ | ||
489 | |||
490 | #define ENABLE_INTERRUPTS(clobbers) \ | ||
491 | PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \ | ||
492 | pushl %ecx; pushl %edx; \ | ||
493 | call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ | ||
494 | popl %edx; popl %ecx) | ||
495 | |||
496 | #define ENABLE_INTERRUPTS_SYSEXIT \ | ||
497 | PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \ | ||
498 | jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) | ||
499 | |||
500 | #define GET_CR0_INTO_EAX \ | ||
501 | call *paravirt_ops+PARAVIRT_read_cr0 | ||
502 | |||
503 | #endif /* __ASSEMBLY__ */ | ||
504 | #endif /* CONFIG_PARAVIRT */ | ||
505 | #endif /* __ASM_PARAVIRT_H */ | ||
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h new file mode 100644 index 000000000000..2ba2736aa109 --- /dev/null +++ b/include/asm-i386/pda.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | Per-processor Data Areas | ||
3 | Jeremy Fitzhardinge <jeremy@goop.org> 2006 | ||
4 | Based on asm-x86_64/pda.h by Andi Kleen. | ||
5 | */ | ||
6 | #ifndef _I386_PDA_H | ||
7 | #define _I386_PDA_H | ||
8 | |||
9 | #include <linux/stddef.h> | ||
10 | #include <linux/types.h> | ||
11 | |||
12 | struct i386_pda | ||
13 | { | ||
14 | struct i386_pda *_pda; /* pointer to self */ | ||
15 | |||
16 | int cpu_number; | ||
17 | struct task_struct *pcurrent; /* current process */ | ||
18 | struct pt_regs *irq_regs; | ||
19 | }; | ||
20 | |||
21 | extern struct i386_pda *_cpu_pda[]; | ||
22 | |||
23 | #define cpu_pda(i) (_cpu_pda[i]) | ||
24 | |||
25 | #define pda_offset(field) offsetof(struct i386_pda, field) | ||
26 | |||
27 | extern void __bad_pda_field(void); | ||
28 | |||
29 | /* This variable is never instantiated. It is only used as a stand-in | ||
30 | for the real per-cpu PDA memory, so that gcc can understand what | ||
31 | memory operations the inline asms() below are performing. This | ||
32 | eliminates the need to make the asms volatile or have memory | ||
33 | clobbers, so gcc can readily analyse them. */ | ||
34 | extern struct i386_pda _proxy_pda; | ||
35 | |||
36 | #define pda_to_op(op,field,val) \ | ||
37 | do { \ | ||
38 | typedef typeof(_proxy_pda.field) T__; \ | ||
39 | if (0) { T__ tmp__; tmp__ = (val); } \ | ||
40 | switch (sizeof(_proxy_pda.field)) { \ | ||
41 | case 1: \ | ||
42 | asm(op "b %1,%%gs:%c2" \ | ||
43 | : "+m" (_proxy_pda.field) \ | ||
44 | :"ri" ((T__)val), \ | ||
45 | "i"(pda_offset(field))); \ | ||
46 | break; \ | ||
47 | case 2: \ | ||
48 | asm(op "w %1,%%gs:%c2" \ | ||
49 | : "+m" (_proxy_pda.field) \ | ||
50 | :"ri" ((T__)val), \ | ||
51 | "i"(pda_offset(field))); \ | ||
52 | break; \ | ||
53 | case 4: \ | ||
54 | asm(op "l %1,%%gs:%c2" \ | ||
55 | : "+m" (_proxy_pda.field) \ | ||
56 | :"ri" ((T__)val), \ | ||
57 | "i"(pda_offset(field))); \ | ||
58 | break; \ | ||
59 | default: __bad_pda_field(); \ | ||
60 | } \ | ||
61 | } while (0) | ||
62 | |||
63 | #define pda_from_op(op,field) \ | ||
64 | ({ \ | ||
65 | typeof(_proxy_pda.field) ret__; \ | ||
66 | switch (sizeof(_proxy_pda.field)) { \ | ||
67 | case 1: \ | ||
68 | asm(op "b %%gs:%c1,%0" \ | ||
69 | : "=r" (ret__) \ | ||
70 | : "i" (pda_offset(field)), \ | ||
71 | "m" (_proxy_pda.field)); \ | ||
72 | break; \ | ||
73 | case 2: \ | ||
74 | asm(op "w %%gs:%c1,%0" \ | ||
75 | : "=r" (ret__) \ | ||
76 | : "i" (pda_offset(field)), \ | ||
77 | "m" (_proxy_pda.field)); \ | ||
78 | break; \ | ||
79 | case 4: \ | ||
80 | asm(op "l %%gs:%c1,%0" \ | ||
81 | : "=r" (ret__) \ | ||
82 | : "i" (pda_offset(field)), \ | ||
83 | "m" (_proxy_pda.field)); \ | ||
84 | break; \ | ||
85 | default: __bad_pda_field(); \ | ||
86 | } \ | ||
87 | ret__; }) | ||
88 | |||
89 | /* Return a pointer to a pda field */ | ||
90 | #define pda_addr(field) \ | ||
91 | ((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \ | ||
92 | pda_offset(field))) | ||
93 | |||
94 | #define read_pda(field) pda_from_op("mov",field) | ||
95 | #define write_pda(field,val) pda_to_op("mov",field,val) | ||
96 | #define add_pda(field,val) pda_to_op("add",field,val) | ||
97 | #define sub_pda(field,val) pda_to_op("sub",field,val) | ||
98 | #define or_pda(field,val) pda_to_op("or",field,val) | ||
99 | |||
100 | #endif /* _I386_PDA_H */ | ||
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h index 5764afa4b6a4..510ae1d3486c 100644 --- a/include/asm-i386/percpu.h +++ b/include/asm-i386/percpu.h | |||
@@ -1,6 +1,31 @@ | |||
1 | #ifndef __ARCH_I386_PERCPU__ | 1 | #ifndef __ARCH_I386_PERCPU__ |
2 | #define __ARCH_I386_PERCPU__ | 2 | #define __ARCH_I386_PERCPU__ |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | ||
4 | #include <asm-generic/percpu.h> | 5 | #include <asm-generic/percpu.h> |
6 | #else | ||
7 | |||
8 | /* | ||
9 | * PER_CPU finds an address of a per-cpu variable. | ||
10 | * | ||
11 | * Args: | ||
12 | * var - variable name | ||
13 | * cpu - 32bit register containing the current CPU number | ||
14 | * | ||
15 | * The resulting address is stored in the "cpu" argument. | ||
16 | * | ||
17 | * Example: | ||
18 | * PER_CPU(cpu_gdt_descr, %ebx) | ||
19 | */ | ||
20 | #ifdef CONFIG_SMP | ||
21 | #define PER_CPU(var, cpu) \ | ||
22 | movl __per_cpu_offset(,cpu,4), cpu; \ | ||
23 | addl $per_cpu__/**/var, cpu; | ||
24 | #else /* ! SMP */ | ||
25 | #define PER_CPU(var, cpu) \ | ||
26 | movl $per_cpu__/**/var, cpu; | ||
27 | #endif /* SMP */ | ||
28 | |||
29 | #endif /* !__ASSEMBLY__ */ | ||
5 | 30 | ||
6 | #endif /* __ARCH_I386_PERCPU__ */ | 31 | #endif /* __ARCH_I386_PERCPU__ */ |
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 8d8d3b9ecdb0..38c3fcc0676d 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _I386_PGTABLE_2LEVEL_H | 1 | #ifndef _I386_PGTABLE_2LEVEL_H |
2 | #define _I386_PGTABLE_2LEVEL_H | 2 | #define _I386_PGTABLE_2LEVEL_H |
3 | 3 | ||
4 | #include <asm-generic/pgtable-nopmd.h> | ||
5 | |||
6 | #define pte_ERROR(e) \ | 4 | #define pte_ERROR(e) \ |
7 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) | 5 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) |
8 | #define pgd_ERROR(e) \ | 6 | #define pgd_ERROR(e) \ |
@@ -13,17 +11,19 @@ | |||
13 | * within a page table are directly modified. Thus, the following | 11 | * within a page table are directly modified. Thus, the following |
14 | * hook is made available. | 12 | * hook is made available. |
15 | */ | 13 | */ |
14 | #ifndef CONFIG_PARAVIRT | ||
16 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | 15 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) |
17 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 16 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
17 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | ||
18 | #endif | ||
19 | |||
18 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) | 20 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) |
19 | #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) | 21 | #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) |
20 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | ||
21 | 22 | ||
22 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 23 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
23 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | 24 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) |
24 | 25 | ||
25 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 26 | #define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) |
26 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) | ||
27 | 27 | ||
28 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 28 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
29 | #define pte_none(x) (!(x).pte_low) | 29 | #define pte_none(x) (!(x).pte_low) |
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index c2d701ea35be..7a2318f38303 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _I386_PGTABLE_3LEVEL_H | 1 | #ifndef _I386_PGTABLE_3LEVEL_H |
2 | #define _I386_PGTABLE_3LEVEL_H | 2 | #define _I386_PGTABLE_3LEVEL_H |
3 | 3 | ||
4 | #include <asm-generic/pgtable-nopud.h> | ||
5 | |||
6 | /* | 4 | /* |
7 | * Intel Physical Address Extension (PAE) Mode - three-level page | 5 | * Intel Physical Address Extension (PAE) Mode - three-level page |
8 | * tables on PPro+ CPUs. | 6 | * tables on PPro+ CPUs. |
@@ -44,6 +42,7 @@ static inline int pte_exec_kernel(pte_t pte) | |||
44 | return pte_x(pte); | 42 | return pte_x(pte); |
45 | } | 43 | } |
46 | 44 | ||
45 | #ifndef CONFIG_PARAVIRT | ||
47 | /* Rules for using set_pte: the pte being assigned *must* be | 46 | /* Rules for using set_pte: the pte being assigned *must* be |
48 | * either not present or in a state where the hardware will | 47 | * either not present or in a state where the hardware will |
49 | * not attempt to update the pte. In places where this is | 48 | * not attempt to update the pte. In places where this is |
@@ -81,25 +80,6 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte | |||
81 | (*(pudptr) = (pudval)) | 80 | (*(pudptr) = (pudval)) |
82 | 81 | ||
83 | /* | 82 | /* |
84 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | ||
85 | * the TLB via cr3 if the top-level pgd is changed... | ||
86 | * We do not let the generic code free and clear pgd entries due to | ||
87 | * this erratum. | ||
88 | */ | ||
89 | static inline void pud_clear (pud_t * pud) { } | ||
90 | |||
91 | #define pud_page(pud) \ | ||
92 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | ||
93 | |||
94 | #define pud_page_vaddr(pud) \ | ||
95 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | ||
96 | |||
97 | |||
98 | /* Find an entry in the second-level page table.. */ | ||
99 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | ||
100 | pmd_index(address)) | ||
101 | |||
102 | /* | ||
103 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | 83 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table |
104 | * entry, so clear the bottom half first and enforce ordering with a compiler | 84 | * entry, so clear the bottom half first and enforce ordering with a compiler |
105 | * barrier. | 85 | * barrier. |
@@ -118,9 +98,28 @@ static inline void pmd_clear(pmd_t *pmd) | |||
118 | smp_wmb(); | 98 | smp_wmb(); |
119 | *(tmp + 1) = 0; | 99 | *(tmp + 1) = 0; |
120 | } | 100 | } |
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | ||
105 | * the TLB via cr3 if the top-level pgd is changed... | ||
106 | * We do not let the generic code free and clear pgd entries due to | ||
107 | * this erratum. | ||
108 | */ | ||
109 | static inline void pud_clear (pud_t * pud) { } | ||
110 | |||
111 | #define pud_page(pud) \ | ||
112 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | ||
113 | |||
114 | #define pud_page_vaddr(pud) \ | ||
115 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | ||
116 | |||
117 | |||
118 | /* Find an entry in the second-level page table.. */ | ||
119 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | ||
120 | pmd_index(address)) | ||
121 | 121 | ||
122 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 122 | static inline pte_t raw_ptep_get_and_clear(pte_t *ptep) |
123 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
124 | { | 123 | { |
125 | pte_t res; | 124 | pte_t res; |
126 | 125 | ||
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index bfee7ddfff53..e6a4723f0eb1 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | #include <asm/fixmap.h> | 16 | #include <asm/fixmap.h> |
17 | #include <linux/threads.h> | 17 | #include <linux/threads.h> |
18 | #include <asm/paravirt.h> | ||
18 | 19 | ||
19 | #ifndef _I386_BITOPS_H | 20 | #ifndef _I386_BITOPS_H |
20 | #include <asm/bitops.h> | 21 | #include <asm/bitops.h> |
@@ -246,6 +247,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
246 | # include <asm/pgtable-2level.h> | 247 | # include <asm/pgtable-2level.h> |
247 | #endif | 248 | #endif |
248 | 249 | ||
250 | #ifndef CONFIG_PARAVIRT | ||
249 | /* | 251 | /* |
250 | * Rules for using pte_update - it must be called after any PTE update which | 252 | * Rules for using pte_update - it must be called after any PTE update which |
251 | * has not been done using the set_pte / clear_pte interfaces. It is used by | 253 | * has not been done using the set_pte / clear_pte interfaces. It is used by |
@@ -261,7 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
261 | */ | 263 | */ |
262 | #define pte_update(mm, addr, ptep) do { } while (0) | 264 | #define pte_update(mm, addr, ptep) do { } while (0) |
263 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | 265 | #define pte_update_defer(mm, addr, ptep) do { } while (0) |
264 | 266 | #endif | |
265 | 267 | ||
266 | /* | 268 | /* |
267 | * We only update the dirty/accessed state if we set | 269 | * We only update the dirty/accessed state if we set |
@@ -275,7 +277,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
275 | do { \ | 277 | do { \ |
276 | if (dirty) { \ | 278 | if (dirty) { \ |
277 | (ptep)->pte_low = (entry).pte_low; \ | 279 | (ptep)->pte_low = (entry).pte_low; \ |
278 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | 280 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
279 | flush_tlb_page(vma, address); \ | 281 | flush_tlb_page(vma, address); \ |
280 | } \ | 282 | } \ |
281 | } while (0) | 283 | } while (0) |
@@ -305,7 +307,7 @@ do { \ | |||
305 | __dirty = pte_dirty(*(ptep)); \ | 307 | __dirty = pte_dirty(*(ptep)); \ |
306 | if (__dirty) { \ | 308 | if (__dirty) { \ |
307 | clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ | 309 | clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ |
308 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | 310 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
309 | flush_tlb_page(vma, address); \ | 311 | flush_tlb_page(vma, address); \ |
310 | } \ | 312 | } \ |
311 | __dirty; \ | 313 | __dirty; \ |
@@ -318,12 +320,20 @@ do { \ | |||
318 | __young = pte_young(*(ptep)); \ | 320 | __young = pte_young(*(ptep)); \ |
319 | if (__young) { \ | 321 | if (__young) { \ |
320 | clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ | 322 | clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ |
321 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | 323 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
322 | flush_tlb_page(vma, address); \ | 324 | flush_tlb_page(vma, address); \ |
323 | } \ | 325 | } \ |
324 | __young; \ | 326 | __young; \ |
325 | }) | 327 | }) |
326 | 328 | ||
329 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
330 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
331 | { | ||
332 | pte_t pte = raw_ptep_get_and_clear(ptep); | ||
333 | pte_update(mm, addr, ptep); | ||
334 | return pte; | ||
335 | } | ||
336 | |||
327 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 337 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
328 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 338 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
329 | { | 339 | { |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index e0ddca94d50c..a52d65440429 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/threads.h> | 20 | #include <linux/threads.h> |
21 | #include <asm/percpu.h> | 21 | #include <asm/percpu.h> |
22 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <linux/init.h> | ||
23 | 24 | ||
24 | /* flag for disabling the tsc */ | 25 | /* flag for disabling the tsc */ |
25 | extern int tsc_disable; | 26 | extern int tsc_disable; |
@@ -72,6 +73,7 @@ struct cpuinfo_x86 { | |||
72 | #endif | 73 | #endif |
73 | unsigned char x86_max_cores; /* cpuid returned max cores value */ | 74 | unsigned char x86_max_cores; /* cpuid returned max cores value */ |
74 | unsigned char apicid; | 75 | unsigned char apicid; |
76 | unsigned short x86_clflush_size; | ||
75 | #ifdef CONFIG_SMP | 77 | #ifdef CONFIG_SMP |
76 | unsigned char booted_cores; /* number of cores as seen by OS */ | 78 | unsigned char booted_cores; /* number of cores as seen by OS */ |
77 | __u8 phys_proc_id; /* Physical processor id. */ | 79 | __u8 phys_proc_id; /* Physical processor id. */ |
@@ -111,6 +113,8 @@ extern struct cpuinfo_x86 cpu_data[]; | |||
111 | extern int cpu_llc_id[NR_CPUS]; | 113 | extern int cpu_llc_id[NR_CPUS]; |
112 | extern char ignore_fpu_irq; | 114 | extern char ignore_fpu_irq; |
113 | 115 | ||
116 | void __init cpu_detect(struct cpuinfo_x86 *c); | ||
117 | |||
114 | extern void identify_cpu(struct cpuinfo_x86 *); | 118 | extern void identify_cpu(struct cpuinfo_x86 *); |
115 | extern void print_cpu_info(struct cpuinfo_x86 *); | 119 | extern void print_cpu_info(struct cpuinfo_x86 *); |
116 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 120 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
@@ -143,8 +147,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} | |||
143 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ | 147 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ |
144 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ | 148 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ |
145 | 149 | ||
146 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | 150 | static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx, |
147 | unsigned int *ecx, unsigned int *edx) | 151 | unsigned int *ecx, unsigned int *edx) |
148 | { | 152 | { |
149 | /* ecx is often an input as well as an output. */ | 153 | /* ecx is often an input as well as an output. */ |
150 | __asm__("cpuid" | 154 | __asm__("cpuid" |
@@ -155,59 +159,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | |||
155 | : "0" (*eax), "2" (*ecx)); | 159 | : "0" (*eax), "2" (*ecx)); |
156 | } | 160 | } |
157 | 161 | ||
158 | /* | ||
159 | * Generic CPUID function | ||
160 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
161 | * resulting in stale register contents being returned. | ||
162 | */ | ||
163 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) | ||
164 | { | ||
165 | *eax = op; | ||
166 | *ecx = 0; | ||
167 | __cpuid(eax, ebx, ecx, edx); | ||
168 | } | ||
169 | |||
170 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
171 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | ||
172 | int *edx) | ||
173 | { | ||
174 | *eax = op; | ||
175 | *ecx = count; | ||
176 | __cpuid(eax, ebx, ecx, edx); | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * CPUID functions returning a single datum | ||
181 | */ | ||
182 | static inline unsigned int cpuid_eax(unsigned int op) | ||
183 | { | ||
184 | unsigned int eax, ebx, ecx, edx; | ||
185 | |||
186 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
187 | return eax; | ||
188 | } | ||
189 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
190 | { | ||
191 | unsigned int eax, ebx, ecx, edx; | ||
192 | |||
193 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
194 | return ebx; | ||
195 | } | ||
196 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
197 | { | ||
198 | unsigned int eax, ebx, ecx, edx; | ||
199 | |||
200 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
201 | return ecx; | ||
202 | } | ||
203 | static inline unsigned int cpuid_edx(unsigned int op) | ||
204 | { | ||
205 | unsigned int eax, ebx, ecx, edx; | ||
206 | |||
207 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
208 | return edx; | ||
209 | } | ||
210 | |||
211 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) | 162 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) |
212 | 163 | ||
213 | /* | 164 | /* |
@@ -473,6 +424,7 @@ struct thread_struct { | |||
473 | .vm86_info = NULL, \ | 424 | .vm86_info = NULL, \ |
474 | .sysenter_cs = __KERNEL_CS, \ | 425 | .sysenter_cs = __KERNEL_CS, \ |
475 | .io_bitmap_ptr = NULL, \ | 426 | .io_bitmap_ptr = NULL, \ |
427 | .gs = __KERNEL_PDA, \ | ||
476 | } | 428 | } |
477 | 429 | ||
478 | /* | 430 | /* |
@@ -489,18 +441,9 @@ struct thread_struct { | |||
489 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ | 441 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ |
490 | } | 442 | } |
491 | 443 | ||
492 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
493 | { | ||
494 | tss->esp0 = thread->esp0; | ||
495 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
496 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | ||
497 | tss->ss1 = thread->sysenter_cs; | ||
498 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
499 | } | ||
500 | } | ||
501 | |||
502 | #define start_thread(regs, new_eip, new_esp) do { \ | 444 | #define start_thread(regs, new_eip, new_esp) do { \ |
503 | __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ | 445 | __asm__("movl %0,%%fs": :"r" (0)); \ |
446 | regs->xgs = 0; \ | ||
504 | set_fs(USER_DS); \ | 447 | set_fs(USER_DS); \ |
505 | regs->xds = __USER_DS; \ | 448 | regs->xds = __USER_DS; \ |
506 | regs->xes = __USER_DS; \ | 449 | regs->xes = __USER_DS; \ |
@@ -510,33 +453,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa | |||
510 | regs->esp = new_esp; \ | 453 | regs->esp = new_esp; \ |
511 | } while (0) | 454 | } while (0) |
512 | 455 | ||
513 | /* | ||
514 | * These special macros can be used to get or set a debugging register | ||
515 | */ | ||
516 | #define get_debugreg(var, register) \ | ||
517 | __asm__("movl %%db" #register ", %0" \ | ||
518 | :"=r" (var)) | ||
519 | #define set_debugreg(value, register) \ | ||
520 | __asm__("movl %0,%%db" #register \ | ||
521 | : /* no output */ \ | ||
522 | :"r" (value)) | ||
523 | |||
524 | /* | ||
525 | * Set IOPL bits in EFLAGS from given mask | ||
526 | */ | ||
527 | static inline void set_iopl_mask(unsigned mask) | ||
528 | { | ||
529 | unsigned int reg; | ||
530 | __asm__ __volatile__ ("pushfl;" | ||
531 | "popl %0;" | ||
532 | "andl %1, %0;" | ||
533 | "orl %2, %0;" | ||
534 | "pushl %0;" | ||
535 | "popfl" | ||
536 | : "=&r" (reg) | ||
537 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
538 | } | ||
539 | |||
540 | /* Forward declaration, a strange C thing */ | 456 | /* Forward declaration, a strange C thing */ |
541 | struct task_struct; | 457 | struct task_struct; |
542 | struct mm_struct; | 458 | struct mm_struct; |
@@ -628,6 +544,105 @@ static inline void rep_nop(void) | |||
628 | 544 | ||
629 | #define cpu_relax() rep_nop() | 545 | #define cpu_relax() rep_nop() |
630 | 546 | ||
547 | #ifdef CONFIG_PARAVIRT | ||
548 | #include <asm/paravirt.h> | ||
549 | #else | ||
550 | #define paravirt_enabled() 0 | ||
551 | #define __cpuid native_cpuid | ||
552 | |||
553 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
554 | { | ||
555 | tss->esp0 = thread->esp0; | ||
556 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
557 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | ||
558 | tss->ss1 = thread->sysenter_cs; | ||
559 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | /* | ||
564 | * These special macros can be used to get or set a debugging register | ||
565 | */ | ||
566 | #define get_debugreg(var, register) \ | ||
567 | __asm__("movl %%db" #register ", %0" \ | ||
568 | :"=r" (var)) | ||
569 | #define set_debugreg(value, register) \ | ||
570 | __asm__("movl %0,%%db" #register \ | ||
571 | : /* no output */ \ | ||
572 | :"r" (value)) | ||
573 | |||
574 | #define set_iopl_mask native_set_iopl_mask | ||
575 | #endif /* CONFIG_PARAVIRT */ | ||
576 | |||
577 | /* | ||
578 | * Set IOPL bits in EFLAGS from given mask | ||
579 | */ | ||
580 | static fastcall inline void native_set_iopl_mask(unsigned mask) | ||
581 | { | ||
582 | unsigned int reg; | ||
583 | __asm__ __volatile__ ("pushfl;" | ||
584 | "popl %0;" | ||
585 | "andl %1, %0;" | ||
586 | "orl %2, %0;" | ||
587 | "pushl %0;" | ||
588 | "popfl" | ||
589 | : "=&r" (reg) | ||
590 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Generic CPUID function | ||
595 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
596 | * resulting in stale register contents being returned. | ||
597 | */ | ||
598 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) | ||
599 | { | ||
600 | *eax = op; | ||
601 | *ecx = 0; | ||
602 | __cpuid(eax, ebx, ecx, edx); | ||
603 | } | ||
604 | |||
605 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
606 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | ||
607 | int *edx) | ||
608 | { | ||
609 | *eax = op; | ||
610 | *ecx = count; | ||
611 | __cpuid(eax, ebx, ecx, edx); | ||
612 | } | ||
613 | |||
614 | /* | ||
615 | * CPUID functions returning a single datum | ||
616 | */ | ||
617 | static inline unsigned int cpuid_eax(unsigned int op) | ||
618 | { | ||
619 | unsigned int eax, ebx, ecx, edx; | ||
620 | |||
621 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
622 | return eax; | ||
623 | } | ||
624 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
625 | { | ||
626 | unsigned int eax, ebx, ecx, edx; | ||
627 | |||
628 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
629 | return ebx; | ||
630 | } | ||
631 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
632 | { | ||
633 | unsigned int eax, ebx, ecx, edx; | ||
634 | |||
635 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
636 | return ecx; | ||
637 | } | ||
638 | static inline unsigned int cpuid_edx(unsigned int op) | ||
639 | { | ||
640 | unsigned int eax, ebx, ecx, edx; | ||
641 | |||
642 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
643 | return edx; | ||
644 | } | ||
645 | |||
631 | /* generic versions from gas */ | 646 | /* generic versions from gas */ |
632 | #define GENERIC_NOP1 ".byte 0x90\n" | 647 | #define GENERIC_NOP1 ".byte 0x90\n" |
633 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | 648 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" |
@@ -727,4 +742,7 @@ extern unsigned long boot_option_idle_override; | |||
727 | extern void enable_sep_cpu(void); | 742 | extern void enable_sep_cpu(void); |
728 | extern int sysenter_setup(void); | 743 | extern int sysenter_setup(void); |
729 | 744 | ||
745 | extern int init_gdt(int cpu, struct task_struct *idle); | ||
746 | extern void secondary_cpu_init(void); | ||
747 | |||
730 | #endif /* __ASM_I386_PROCESSOR_H */ | 748 | #endif /* __ASM_I386_PROCESSOR_H */ |
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index d505f501077a..bdbc894339b4 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h | |||
@@ -16,6 +16,8 @@ struct pt_regs { | |||
16 | long eax; | 16 | long eax; |
17 | int xds; | 17 | int xds; |
18 | int xes; | 18 | int xes; |
19 | /* int xfs; */ | ||
20 | int xgs; | ||
19 | long orig_eax; | 21 | long orig_eax; |
20 | long eip; | 22 | long eip; |
21 | int xcs; | 23 | int xcs; |
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h index b7ab59685ba7..3c796af33776 100644 --- a/include/asm-i386/segment.h +++ b/include/asm-i386/segment.h | |||
@@ -39,7 +39,7 @@ | |||
39 | * 25 - APM BIOS support | 39 | * 25 - APM BIOS support |
40 | * | 40 | * |
41 | * 26 - ESPFIX small SS | 41 | * 26 - ESPFIX small SS |
42 | * 27 - unused | 42 | * 27 - PDA [ per-cpu private data area ] |
43 | * 28 - unused | 43 | * 28 - unused |
44 | * 29 - unused | 44 | * 29 - unused |
45 | * 30 - unused | 45 | * 30 - unused |
@@ -74,6 +74,9 @@ | |||
74 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) | 74 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) |
75 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) | 75 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) |
76 | 76 | ||
77 | #define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15) | ||
78 | #define __KERNEL_PDA (GDT_ENTRY_PDA * 8) | ||
79 | |||
77 | #define GDT_ENTRY_DOUBLEFAULT_TSS 31 | 80 | #define GDT_ENTRY_DOUBLEFAULT_TSS 31 |
78 | 81 | ||
79 | /* | 82 | /* |
@@ -128,5 +131,7 @@ | |||
128 | #define SEGMENT_LDT 0x4 | 131 | #define SEGMENT_LDT 0x4 |
129 | #define SEGMENT_GDT 0x0 | 132 | #define SEGMENT_GDT 0x0 |
130 | 133 | ||
134 | #ifndef CONFIG_PARAVIRT | ||
131 | #define get_kernel_rpl() 0 | 135 | #define get_kernel_rpl() 0 |
132 | #endif | 136 | #endif |
137 | #endif | ||
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index c5b504bfbaad..67659dbaf120 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h | |||
@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_SIZE]; | |||
70 | struct e820entry; | 70 | struct e820entry; |
71 | 71 | ||
72 | char * __init machine_specific_memory_setup(void); | 72 | char * __init machine_specific_memory_setup(void); |
73 | char *memory_setup(void); | ||
73 | 74 | ||
74 | int __init copy_e820_map(struct e820entry * biosmap, int nr_map); | 75 | int __init copy_e820_map(struct e820entry * biosmap, int nr_map); |
75 | int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); | 76 | int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index bd59c1508e71..64fe624c02ca 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/threads.h> | 9 | #include <linux/threads.h> |
10 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
11 | #include <asm/pda.h> | ||
11 | #endif | 12 | #endif |
12 | 13 | ||
13 | #ifdef CONFIG_X86_LOCAL_APIC | 14 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -56,7 +57,7 @@ extern void cpu_uninit(void); | |||
56 | * from the initial startup. We map APIC_BASE very early in page_setup(), | 57 | * from the initial startup. We map APIC_BASE very early in page_setup(), |
57 | * so this is correct in the x86 case. | 58 | * so this is correct in the x86 case. |
58 | */ | 59 | */ |
59 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 60 | #define raw_smp_processor_id() (read_pda(cpu_number)) |
60 | 61 | ||
61 | extern cpumask_t cpu_callout_map; | 62 | extern cpumask_t cpu_callout_map; |
62 | extern cpumask_t cpu_callin_map; | 63 | extern cpumask_t cpu_callin_map; |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index c18b71fae6b3..d3bcebed60ca 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -7,8 +7,14 @@ | |||
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | 9 | ||
10 | #ifdef CONFIG_PARAVIRT | ||
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
10 | #define CLI_STRING "cli" | 13 | #define CLI_STRING "cli" |
11 | #define STI_STRING "sti" | 14 | #define STI_STRING "sti" |
15 | #define CLI_STI_CLOBBERS | ||
16 | #define CLI_STI_INPUT_ARGS | ||
17 | #endif /* CONFIG_PARAVIRT */ | ||
12 | 18 | ||
13 | /* | 19 | /* |
14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 20 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
@@ -53,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
53 | { | 59 | { |
54 | asm volatile( | 60 | asm volatile( |
55 | "\n1:\t" | 61 | "\n1:\t" |
56 | LOCK_PREFIX " ; decb %0\n\t" | 62 | LOCK_PREFIX " ; decb %[slock]\n\t" |
57 | "jns 5f\n" | 63 | "jns 5f\n" |
58 | "2:\t" | 64 | "2:\t" |
59 | "testl $0x200, %1\n\t" | 65 | "testl $0x200, %[flags]\n\t" |
60 | "jz 4f\n\t" | 66 | "jz 4f\n\t" |
61 | STI_STRING "\n" | 67 | STI_STRING "\n" |
62 | "3:\t" | 68 | "3:\t" |
63 | "rep;nop\n\t" | 69 | "rep;nop\n\t" |
64 | "cmpb $0, %0\n\t" | 70 | "cmpb $0, %[slock]\n\t" |
65 | "jle 3b\n\t" | 71 | "jle 3b\n\t" |
66 | CLI_STRING "\n\t" | 72 | CLI_STRING "\n\t" |
67 | "jmp 1b\n" | 73 | "jmp 1b\n" |
68 | "4:\t" | 74 | "4:\t" |
69 | "rep;nop\n\t" | 75 | "rep;nop\n\t" |
70 | "cmpb $0, %0\n\t" | 76 | "cmpb $0, %[slock]\n\t" |
71 | "jg 1b\n\t" | 77 | "jg 1b\n\t" |
72 | "jmp 4b\n" | 78 | "jmp 4b\n" |
73 | "5:\n\t" | 79 | "5:\n\t" |
74 | : "+m" (lock->slock) : "r" (flags) : "memory"); | 80 | : [slock] "+m" (lock->slock) |
81 | : [flags] "r" (flags) | ||
82 | CLI_STI_INPUT_ARGS | ||
83 | : "memory" CLI_STI_CLOBBERS); | ||
75 | } | 84 | } |
76 | #endif | 85 | #endif |
77 | 86 | ||
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h index c1da5caafaf7..8dbaafe611ff 100644 --- a/include/asm-i386/suspend.h +++ b/include/asm-i386/suspend.h | |||
@@ -12,12 +12,8 @@ static inline int arch_prepare_suspend(void) { return 0; } | |||
12 | struct saved_context { | 12 | struct saved_context { |
13 | u16 es, fs, gs, ss; | 13 | u16 es, fs, gs, ss; |
14 | unsigned long cr0, cr2, cr3, cr4; | 14 | unsigned long cr0, cr2, cr3, cr4; |
15 | u16 gdt_pad; | 15 | struct Xgt_desc_struct gdt; |
16 | u16 gdt_limit; | 16 | struct Xgt_desc_struct idt; |
17 | unsigned long gdt_base; | ||
18 | u16 idt_pad; | ||
19 | u16 idt_limit; | ||
20 | unsigned long idt_base; | ||
21 | u16 ldt; | 17 | u16 ldt; |
22 | u16 tss; | 18 | u16 tss; |
23 | unsigned long tr; | 19 | unsigned long tr; |
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index a6dabbcd6e6a..a6d20d9a1a30 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h | |||
@@ -88,6 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |||
88 | #define savesegment(seg, value) \ | 88 | #define savesegment(seg, value) \ |
89 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | 89 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
90 | 90 | ||
91 | #ifdef CONFIG_PARAVIRT | ||
92 | #include <asm/paravirt.h> | ||
93 | #else | ||
91 | #define read_cr0() ({ \ | 94 | #define read_cr0() ({ \ |
92 | unsigned int __dummy; \ | 95 | unsigned int __dummy; \ |
93 | __asm__ __volatile__( \ | 96 | __asm__ __volatile__( \ |
@@ -139,17 +142,18 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |||
139 | #define write_cr4(x) \ | 142 | #define write_cr4(x) \ |
140 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) | 143 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) |
141 | 144 | ||
142 | /* | 145 | #define wbinvd() \ |
143 | * Clear and set 'TS' bit respectively | 146 | __asm__ __volatile__ ("wbinvd": : :"memory") |
144 | */ | 147 | |
148 | /* Clear the 'TS' bit */ | ||
145 | #define clts() __asm__ __volatile__ ("clts") | 149 | #define clts() __asm__ __volatile__ ("clts") |
150 | #endif/* CONFIG_PARAVIRT */ | ||
151 | |||
152 | /* Set the 'TS' bit */ | ||
146 | #define stts() write_cr0(8 | read_cr0()) | 153 | #define stts() write_cr0(8 | read_cr0()) |
147 | 154 | ||
148 | #endif /* __KERNEL__ */ | 155 | #endif /* __KERNEL__ */ |
149 | 156 | ||
150 | #define wbinvd() \ | ||
151 | __asm__ __volatile__ ("wbinvd": : :"memory") | ||
152 | |||
153 | static inline unsigned long get_limit(unsigned long segment) | 157 | static inline unsigned long get_limit(unsigned long segment) |
154 | { | 158 | { |
155 | unsigned long __limit; | 159 | unsigned long __limit; |
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index 54d6d7aea938..46d32ad92082 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h | |||
@@ -95,15 +95,7 @@ static inline struct thread_info *current_thread_info(void) | |||
95 | 95 | ||
96 | /* thread information allocation */ | 96 | /* thread information allocation */ |
97 | #ifdef CONFIG_DEBUG_STACK_USAGE | 97 | #ifdef CONFIG_DEBUG_STACK_USAGE |
98 | #define alloc_thread_info(tsk) \ | 98 | #define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) |
99 | ({ \ | ||
100 | struct thread_info *ret; \ | ||
101 | \ | ||
102 | ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \ | ||
103 | if (ret) \ | ||
104 | memset(ret, 0, THREAD_SIZE); \ | ||
105 | ret; \ | ||
106 | }) | ||
107 | #else | 99 | #else |
108 | #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) | 100 | #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) |
109 | #endif | 101 | #endif |
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h new file mode 100644 index 000000000000..ea8065af825a --- /dev/null +++ b/include/asm-i386/time.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef _ASMi386_TIME_H | ||
2 | #define _ASMi386_TIME_H | ||
3 | |||
4 | #include <linux/efi.h> | ||
5 | #include "mach_time.h" | ||
6 | |||
7 | static inline unsigned long native_get_wallclock(void) | ||
8 | { | ||
9 | unsigned long retval; | ||
10 | |||
11 | if (efi_enabled) | ||
12 | retval = efi_get_time(); | ||
13 | else | ||
14 | retval = mach_get_cmos_time(); | ||
15 | |||
16 | return retval; | ||
17 | } | ||
18 | |||
19 | static inline int native_set_wallclock(unsigned long nowtime) | ||
20 | { | ||
21 | int retval; | ||
22 | |||
23 | if (efi_enabled) | ||
24 | retval = efi_set_rtc_mmss(nowtime); | ||
25 | else | ||
26 | retval = mach_set_rtc_mmss(nowtime); | ||
27 | |||
28 | return retval; | ||
29 | } | ||
30 | |||
31 | #ifdef CONFIG_PARAVIRT | ||
32 | #include <asm/paravirt.h> | ||
33 | #else /* !CONFIG_PARAVIRT */ | ||
34 | |||
35 | #define get_wallclock() native_get_wallclock() | ||
36 | #define set_wallclock(x) native_set_wallclock(x) | ||
37 | #define do_time_init() time_init_hook() | ||
38 | |||
39 | #endif /* CONFIG_PARAVIRT */ | ||
40 | |||
41 | #endif | ||
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h index 360648b0f2b3..4dd82840d53b 100644 --- a/include/asm-i386/tlbflush.h +++ b/include/asm-i386/tlbflush.h | |||
@@ -4,7 +4,15 @@ | |||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | 6 | ||
7 | #define __flush_tlb() \ | 7 | #ifdef CONFIG_PARAVIRT |
8 | #include <asm/paravirt.h> | ||
9 | #else | ||
10 | #define __flush_tlb() __native_flush_tlb() | ||
11 | #define __flush_tlb_global() __native_flush_tlb_global() | ||
12 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | ||
13 | #endif | ||
14 | |||
15 | #define __native_flush_tlb() \ | ||
8 | do { \ | 16 | do { \ |
9 | unsigned int tmpreg; \ | 17 | unsigned int tmpreg; \ |
10 | \ | 18 | \ |
@@ -19,7 +27,7 @@ | |||
19 | * Global pages have to be flushed a bit differently. Not a real | 27 | * Global pages have to be flushed a bit differently. Not a real |
20 | * performance problem because this does not happen often. | 28 | * performance problem because this does not happen often. |
21 | */ | 29 | */ |
22 | #define __flush_tlb_global() \ | 30 | #define __native_flush_tlb_global() \ |
23 | do { \ | 31 | do { \ |
24 | unsigned int tmpreg, cr4, cr4_orig; \ | 32 | unsigned int tmpreg, cr4, cr4_orig; \ |
25 | \ | 33 | \ |
@@ -36,6 +44,9 @@ | |||
36 | : "memory"); \ | 44 | : "memory"); \ |
37 | } while (0) | 45 | } while (0) |
38 | 46 | ||
47 | #define __native_flush_tlb_single(addr) \ | ||
48 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") | ||
49 | |||
39 | # define __flush_tlb_all() \ | 50 | # define __flush_tlb_all() \ |
40 | do { \ | 51 | do { \ |
41 | if (cpu_has_pge) \ | 52 | if (cpu_has_pge) \ |
@@ -46,9 +57,6 @@ | |||
46 | 57 | ||
47 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) | 58 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
48 | 59 | ||
49 | #define __flush_tlb_single(addr) \ | ||
50 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") | ||
51 | |||
52 | #ifdef CONFIG_X86_INVLPG | 60 | #ifdef CONFIG_X86_INVLPG |
53 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) | 61 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) |
54 | #else | 62 | #else |
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index 5031d693b89d..aa2c931e30db 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h | |||
@@ -71,6 +71,7 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info) | |||
71 | info->regs.xss = __KERNEL_DS; | 71 | info->regs.xss = __KERNEL_DS; |
72 | info->regs.xds = __USER_DS; | 72 | info->regs.xds = __USER_DS; |
73 | info->regs.xes = __USER_DS; | 73 | info->regs.xes = __USER_DS; |
74 | info->regs.xgs = __KERNEL_PDA; | ||
74 | } | 75 | } |
75 | 76 | ||
76 | extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, | 77 | extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, |
@@ -78,17 +79,13 @@ extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, | |||
78 | void *arg), | 79 | void *arg), |
79 | void *arg); | 80 | void *arg); |
80 | 81 | ||
81 | static inline int arch_unw_user_mode(const struct unwind_frame_info *info) | 82 | static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info) |
82 | { | 83 | { |
83 | #if 0 /* This can only work when selector register and EFLAGS saves/restores | 84 | return user_mode_vm(&info->regs) |
84 | are properly annotated (and tracked in UNW_REGISTER_INFO). */ | 85 | || info->regs.eip < PAGE_OFFSET |
85 | return user_mode_vm(&info->regs); | ||
86 | #else | ||
87 | return info->regs.eip < PAGE_OFFSET | ||
88 | || (info->regs.eip >= __fix_to_virt(FIX_VDSO) | 86 | || (info->regs.eip >= __fix_to_virt(FIX_VDSO) |
89 | && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) | 87 | && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) |
90 | || info->regs.esp < PAGE_OFFSET; | 88 | || info->regs.esp < PAGE_OFFSET; |
91 | #endif | ||
92 | } | 89 | } |
93 | 90 | ||
94 | #else | 91 | #else |
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h index 952fd6957380..a5edf517b992 100644 --- a/include/asm-i386/vm86.h +++ b/include/asm-i386/vm86.h | |||
@@ -145,26 +145,13 @@ struct vm86plus_struct { | |||
145 | * at the end of the structure. Look at ptrace.h to see the "normal" | 145 | * at the end of the structure. Look at ptrace.h to see the "normal" |
146 | * setup. For user space layout see 'struct vm86_regs' above. | 146 | * setup. For user space layout see 'struct vm86_regs' above. |
147 | */ | 147 | */ |
148 | #include <asm/ptrace.h> | ||
148 | 149 | ||
149 | struct kernel_vm86_regs { | 150 | struct kernel_vm86_regs { |
150 | /* | 151 | /* |
151 | * normal regs, with special meaning for the segment descriptors.. | 152 | * normal regs, with special meaning for the segment descriptors.. |
152 | */ | 153 | */ |
153 | long ebx; | 154 | struct pt_regs pt; |
154 | long ecx; | ||
155 | long edx; | ||
156 | long esi; | ||
157 | long edi; | ||
158 | long ebp; | ||
159 | long eax; | ||
160 | long __null_ds; | ||
161 | long __null_es; | ||
162 | long orig_eax; | ||
163 | long eip; | ||
164 | unsigned short cs, __csh; | ||
165 | long eflags; | ||
166 | long esp; | ||
167 | unsigned short ss, __ssh; | ||
168 | /* | 155 | /* |
169 | * these are specific to v86 mode: | 156 | * these are specific to v86 mode: |
170 | */ | 157 | */ |
diff --git a/include/asm-x86_64/Kbuild b/include/asm-x86_64/Kbuild index 763521358fb8..ebd7117782a6 100644 --- a/include/asm-x86_64/Kbuild +++ b/include/asm-x86_64/Kbuild | |||
@@ -6,7 +6,6 @@ ALTARCHDEF := defined __i386__ | |||
6 | 6 | ||
7 | header-y += boot.h | 7 | header-y += boot.h |
8 | header-y += bootsetup.h | 8 | header-y += bootsetup.h |
9 | header-y += cpufeature.h | ||
10 | header-y += debugreg.h | 9 | header-y += debugreg.h |
11 | header-y += ldt.h | 10 | header-y += ldt.h |
12 | header-y += msr.h | 11 | header-y += msr.h |
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h index a584826cc570..a6657b4f3e0e 100644 --- a/include/asm-x86_64/alternative.h +++ b/include/asm-x86_64/alternative.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/stddef.h> | ||
7 | #include <asm/cpufeature.h> | 8 | #include <asm/cpufeature.h> |
8 | 9 | ||
9 | struct alt_instr { | 10 | struct alt_instr { |
@@ -133,4 +134,15 @@ static inline void alternatives_smp_switch(int smp) {} | |||
133 | #define LOCK_PREFIX "" | 134 | #define LOCK_PREFIX "" |
134 | #endif | 135 | #endif |
135 | 136 | ||
137 | struct paravirt_patch; | ||
138 | #ifdef CONFIG_PARAVIRT | ||
139 | void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); | ||
140 | #else | ||
141 | static inline void | ||
142 | apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) | ||
143 | {} | ||
144 | #define __start_parainstructions NULL | ||
145 | #define __stop_parainstructions NULL | ||
146 | #endif | ||
147 | |||
136 | #endif /* _X86_64_ALTERNATIVE_H */ | 148 | #endif /* _X86_64_ALTERNATIVE_H */ |
diff --git a/include/asm-x86_64/atomic.h b/include/asm-x86_64/atomic.h index 93849f7abc24..706ca4b60000 100644 --- a/include/asm-x86_64/atomic.h +++ b/include/asm-x86_64/atomic.h | |||
@@ -189,9 +189,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
189 | { | 189 | { |
190 | int __i = i; | 190 | int __i = i; |
191 | __asm__ __volatile__( | 191 | __asm__ __volatile__( |
192 | LOCK_PREFIX "xaddl %0, %1;" | 192 | LOCK_PREFIX "xaddl %0, %1" |
193 | :"=r"(i) | 193 | :"+r" (i), "+m" (v->counter) |
194 | :"m"(v->counter), "0"(i)); | 194 | : : "memory"); |
195 | return i + __i; | 195 | return i + __i; |
196 | } | 196 | } |
197 | 197 | ||
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h index 6b93f5a3a5c8..7ee900645719 100644 --- a/include/asm-x86_64/calgary.h +++ b/include/asm-x86_64/calgary.h | |||
@@ -51,6 +51,8 @@ struct iommu_table { | |||
51 | #define TCE_TABLE_SIZE_4M 6 | 51 | #define TCE_TABLE_SIZE_4M 6 |
52 | #define TCE_TABLE_SIZE_8M 7 | 52 | #define TCE_TABLE_SIZE_8M 7 |
53 | 53 | ||
54 | extern int use_calgary; | ||
55 | |||
54 | #ifdef CONFIG_CALGARY_IOMMU | 56 | #ifdef CONFIG_CALGARY_IOMMU |
55 | extern int calgary_iommu_init(void); | 57 | extern int calgary_iommu_init(void); |
56 | extern void detect_calgary(void); | 58 | extern void detect_calgary(void); |
diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index ee792faaca01..0b3c686139f1 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h | |||
@@ -29,7 +29,7 @@ | |||
29 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | 29 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
30 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | 30 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
31 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | 31 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ |
32 | #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ | 32 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ |
33 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | 33 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
34 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 34 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
35 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | 35 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ |
@@ -68,6 +68,8 @@ | |||
68 | #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ | 68 | #define X86_FEATURE_FXSAVE_LEAK (3*32+7) /* FIP/FOP/FDP leaks through FXSAVE */ |
69 | #define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ | 69 | #define X86_FEATURE_UP (3*32+8) /* SMP kernel running on UP */ |
70 | #define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ | 70 | #define X86_FEATURE_ARCH_PERFMON (3*32+9) /* Intel Architectural PerfMon */ |
71 | #define X86_FEATURE_PEBS (3*32+10) /* Precise-Event Based Sampling */ | ||
72 | #define X86_FEATURE_BTS (3*32+11) /* Branch Trace Store */ | ||
71 | 73 | ||
72 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 74 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
73 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 75 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
@@ -112,5 +114,8 @@ | |||
112 | #define cpu_has_cyrix_arr 0 | 114 | #define cpu_has_cyrix_arr 0 |
113 | #define cpu_has_centaur_mcr 0 | 115 | #define cpu_has_centaur_mcr 0 |
114 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | 116 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) |
117 | #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) | ||
118 | #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) | ||
119 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | ||
115 | 120 | ||
116 | #endif /* __ASM_X8664_CPUFEATURE_H */ | 121 | #endif /* __ASM_X8664_CPUFEATURE_H */ |
diff --git a/include/asm-x86_64/delay.h b/include/asm-x86_64/delay.h index 65f64acc5319..c2669f1f5529 100644 --- a/include/asm-x86_64/delay.h +++ b/include/asm-x86_64/delay.h | |||
@@ -7,18 +7,21 @@ | |||
7 | * Delay routines calling functions in arch/x86_64/lib/delay.c | 7 | * Delay routines calling functions in arch/x86_64/lib/delay.c |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* Undefined functions to get compile-time errors */ | ||
10 | extern void __bad_udelay(void); | 11 | extern void __bad_udelay(void); |
11 | extern void __bad_ndelay(void); | 12 | extern void __bad_ndelay(void); |
12 | 13 | ||
13 | extern void __udelay(unsigned long usecs); | 14 | extern void __udelay(unsigned long usecs); |
14 | extern void __ndelay(unsigned long usecs); | 15 | extern void __ndelay(unsigned long nsecs); |
15 | extern void __const_udelay(unsigned long usecs); | 16 | extern void __const_udelay(unsigned long usecs); |
16 | extern void __delay(unsigned long loops); | 17 | extern void __delay(unsigned long loops); |
17 | 18 | ||
19 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ | ||
18 | #define udelay(n) (__builtin_constant_p(n) ? \ | 20 | #define udelay(n) (__builtin_constant_p(n) ? \ |
19 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c6ul)) : \ | 21 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ |
20 | __udelay(n)) | 22 | __udelay(n)) |
21 | 23 | ||
24 | /* 0x5 is 2**32 / 1000000000 (rounded up) */ | ||
22 | #define ndelay(n) (__builtin_constant_p(n) ? \ | 25 | #define ndelay(n) (__builtin_constant_p(n) ? \ |
23 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | 26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ |
24 | __ndelay(n)) | 27 | __ndelay(n)) |
diff --git a/include/asm-x86_64/desc.h b/include/asm-x86_64/desc.h index eb7723a46790..913d6ac00033 100644 --- a/include/asm-x86_64/desc.h +++ b/include/asm-x86_64/desc.h | |||
@@ -9,64 +9,13 @@ | |||
9 | 9 | ||
10 | #include <linux/string.h> | 10 | #include <linux/string.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | #include <asm/desc_defs.h> | ||
12 | 13 | ||
13 | #include <asm/segment.h> | 14 | #include <asm/segment.h> |
14 | #include <asm/mmu.h> | 15 | #include <asm/mmu.h> |
15 | 16 | ||
16 | // 8 byte segment descriptor | ||
17 | struct desc_struct { | ||
18 | u16 limit0; | ||
19 | u16 base0; | ||
20 | unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; | ||
21 | unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; | ||
22 | } __attribute__((packed)); | ||
23 | |||
24 | struct n_desc_struct { | ||
25 | unsigned int a,b; | ||
26 | }; | ||
27 | |||
28 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; | 17 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; |
29 | 18 | ||
30 | enum { | ||
31 | GATE_INTERRUPT = 0xE, | ||
32 | GATE_TRAP = 0xF, | ||
33 | GATE_CALL = 0xC, | ||
34 | }; | ||
35 | |||
36 | // 16byte gate | ||
37 | struct gate_struct { | ||
38 | u16 offset_low; | ||
39 | u16 segment; | ||
40 | unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; | ||
41 | u16 offset_middle; | ||
42 | u32 offset_high; | ||
43 | u32 zero1; | ||
44 | } __attribute__((packed)); | ||
45 | |||
46 | #define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) | ||
47 | #define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) | ||
48 | #define PTR_HIGH(x) ((unsigned long)(x) >> 32) | ||
49 | |||
50 | enum { | ||
51 | DESC_TSS = 0x9, | ||
52 | DESC_LDT = 0x2, | ||
53 | }; | ||
54 | |||
55 | // LDT or TSS descriptor in the GDT. 16 bytes. | ||
56 | struct ldttss_desc { | ||
57 | u16 limit0; | ||
58 | u16 base0; | ||
59 | unsigned base1 : 8, type : 5, dpl : 2, p : 1; | ||
60 | unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; | ||
61 | u32 base3; | ||
62 | u32 zero1; | ||
63 | } __attribute__((packed)); | ||
64 | |||
65 | struct desc_ptr { | ||
66 | unsigned short size; | ||
67 | unsigned long address; | ||
68 | } __attribute__((packed)) ; | ||
69 | |||
70 | #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) | 19 | #define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8)) |
71 | #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) | 20 | #define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8)) |
72 | #define clear_LDT() asm volatile("lldt %w0"::"r" (0)) | 21 | #define clear_LDT() asm volatile("lldt %w0"::"r" (0)) |
diff --git a/include/asm-x86_64/desc_defs.h b/include/asm-x86_64/desc_defs.h new file mode 100644 index 000000000000..089004070099 --- /dev/null +++ b/include/asm-x86_64/desc_defs.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* Written 2000 by Andi Kleen */ | ||
2 | #ifndef __ARCH_DESC_DEFS_H | ||
3 | #define __ARCH_DESC_DEFS_H | ||
4 | |||
5 | /* | ||
6 | * Segment descriptor structure definitions, usable from both x86_64 and i386 | ||
7 | * archs. | ||
8 | */ | ||
9 | |||
10 | #ifndef __ASSEMBLY__ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | |||
14 | // 8 byte segment descriptor | ||
15 | struct desc_struct { | ||
16 | u16 limit0; | ||
17 | u16 base0; | ||
18 | unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1; | ||
19 | unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8; | ||
20 | } __attribute__((packed)); | ||
21 | |||
22 | struct n_desc_struct { | ||
23 | unsigned int a,b; | ||
24 | }; | ||
25 | |||
26 | enum { | ||
27 | GATE_INTERRUPT = 0xE, | ||
28 | GATE_TRAP = 0xF, | ||
29 | GATE_CALL = 0xC, | ||
30 | }; | ||
31 | |||
32 | // 16byte gate | ||
33 | struct gate_struct { | ||
34 | u16 offset_low; | ||
35 | u16 segment; | ||
36 | unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1; | ||
37 | u16 offset_middle; | ||
38 | u32 offset_high; | ||
39 | u32 zero1; | ||
40 | } __attribute__((packed)); | ||
41 | |||
42 | #define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF) | ||
43 | #define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF) | ||
44 | #define PTR_HIGH(x) ((unsigned long)(x) >> 32) | ||
45 | |||
46 | enum { | ||
47 | DESC_TSS = 0x9, | ||
48 | DESC_LDT = 0x2, | ||
49 | }; | ||
50 | |||
51 | // LDT or TSS descriptor in the GDT. 16 bytes. | ||
52 | struct ldttss_desc { | ||
53 | u16 limit0; | ||
54 | u16 base0; | ||
55 | unsigned base1 : 8, type : 5, dpl : 2, p : 1; | ||
56 | unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8; | ||
57 | u32 base3; | ||
58 | u32 zero1; | ||
59 | } __attribute__((packed)); | ||
60 | |||
61 | struct desc_ptr { | ||
62 | unsigned short size; | ||
63 | unsigned long address; | ||
64 | } __attribute__((packed)) ; | ||
65 | |||
66 | |||
67 | #endif /* !__ASSEMBLY__ */ | ||
68 | |||
69 | #endif | ||
diff --git a/include/asm-x86_64/genapic.h b/include/asm-x86_64/genapic.h index a0e9a4b93484..b80f4bb5f273 100644 --- a/include/asm-x86_64/genapic.h +++ b/include/asm-x86_64/genapic.h | |||
@@ -30,6 +30,6 @@ struct genapic { | |||
30 | }; | 30 | }; |
31 | 31 | ||
32 | 32 | ||
33 | extern struct genapic *genapic; | 33 | extern struct genapic *genapic, *genapic_force, apic_flat; |
34 | 34 | ||
35 | #endif | 35 | #endif |
diff --git a/include/asm-x86_64/msr.h b/include/asm-x86_64/msr.h index 37e194169fac..952783d35c7b 100644 --- a/include/asm-x86_64/msr.h +++ b/include/asm-x86_64/msr.h | |||
@@ -169,8 +169,8 @@ static inline unsigned int cpuid_edx(unsigned int op) | |||
169 | #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ | 169 | #define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */ |
170 | #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ | 170 | #define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */ |
171 | #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ | 171 | #define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */ |
172 | #define MSR_FS_BASE 0xc0000100 /* 64bit GS base */ | 172 | #define MSR_FS_BASE 0xc0000100 /* 64bit FS base */ |
173 | #define MSR_GS_BASE 0xc0000101 /* 64bit FS base */ | 173 | #define MSR_GS_BASE 0xc0000101 /* 64bit GS base */ |
174 | #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */ | 174 | #define MSR_KERNEL_GS_BASE 0xc0000102 /* SwapGS GS shadow (or USER_GS from kernel) */ |
175 | /* EFER bits: */ | 175 | /* EFER bits: */ |
176 | #define _EFER_SCE 0 /* SYSCALL/SYSRET */ | 176 | #define _EFER_SCE 0 /* SYSCALL/SYSRET */ |
@@ -210,6 +210,10 @@ static inline unsigned int cpuid_edx(unsigned int op) | |||
210 | #define MSR_IA32_LASTINTFROMIP 0x1dd | 210 | #define MSR_IA32_LASTINTFROMIP 0x1dd |
211 | #define MSR_IA32_LASTINTTOIP 0x1de | 211 | #define MSR_IA32_LASTINTTOIP 0x1de |
212 | 212 | ||
213 | #define MSR_IA32_PEBS_ENABLE 0x3f1 | ||
214 | #define MSR_IA32_DS_AREA 0x600 | ||
215 | #define MSR_IA32_PERF_CAPABILITIES 0x345 | ||
216 | |||
213 | #define MSR_MTRRfix64K_00000 0x250 | 217 | #define MSR_MTRRfix64K_00000 0x250 |
214 | #define MSR_MTRRfix16K_80000 0x258 | 218 | #define MSR_MTRRfix16K_80000 0x258 |
215 | #define MSR_MTRRfix16K_A0000 0x259 | 219 | #define MSR_MTRRfix16K_A0000 0x259 |
@@ -407,4 +411,13 @@ static inline unsigned int cpuid_edx(unsigned int op) | |||
407 | #define MSR_P4_U2L_ESCR0 0x3b0 | 411 | #define MSR_P4_U2L_ESCR0 0x3b0 |
408 | #define MSR_P4_U2L_ESCR1 0x3b1 | 412 | #define MSR_P4_U2L_ESCR1 0x3b1 |
409 | 413 | ||
414 | /* Intel Core-based CPU performance counters */ | ||
415 | #define MSR_CORE_PERF_FIXED_CTR0 0x309 | ||
416 | #define MSR_CORE_PERF_FIXED_CTR1 0x30a | ||
417 | #define MSR_CORE_PERF_FIXED_CTR2 0x30b | ||
418 | #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d | ||
419 | #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e | ||
420 | #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f | ||
421 | #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 | ||
422 | |||
410 | #endif | 423 | #endif |
diff --git a/include/asm-x86_64/nmi.h b/include/asm-x86_64/nmi.h index f367d4014b42..72375e7d32a8 100644 --- a/include/asm-x86_64/nmi.h +++ b/include/asm-x86_64/nmi.h | |||
@@ -77,4 +77,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | |||
77 | 77 | ||
78 | extern int unknown_nmi_panic; | 78 | extern int unknown_nmi_panic; |
79 | 79 | ||
80 | void __trigger_all_cpu_backtrace(void); | ||
81 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
82 | |||
80 | #endif /* ASM_NMI_H */ | 83 | #endif /* ASM_NMI_H */ |
diff --git a/include/asm-x86_64/pci-direct.h b/include/asm-x86_64/pci-direct.h index eba9cb471df3..6823fa4f1afa 100644 --- a/include/asm-x86_64/pci-direct.h +++ b/include/asm-x86_64/pci-direct.h | |||
@@ -10,6 +10,7 @@ extern u32 read_pci_config(u8 bus, u8 slot, u8 func, u8 offset); | |||
10 | extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); | 10 | extern u8 read_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset); |
11 | extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); | 11 | extern u16 read_pci_config_16(u8 bus, u8 slot, u8 func, u8 offset); |
12 | extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); | 12 | extern void write_pci_config(u8 bus, u8 slot, u8 func, u8 offset, u32 val); |
13 | extern void write_pci_config_byte(u8 bus, u8 slot, u8 func, u8 offset, u8 val); | ||
13 | 14 | ||
14 | extern int early_pci_allowed(void); | 15 | extern int early_pci_allowed(void); |
15 | 16 | ||
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h index 0555c1c4d8fa..59901c690a0d 100644 --- a/include/asm-x86_64/pgtable.h +++ b/include/asm-x86_64/pgtable.h | |||
@@ -221,20 +221,19 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
221 | #define __S110 PAGE_SHARED_EXEC | 221 | #define __S110 PAGE_SHARED_EXEC |
222 | #define __S111 PAGE_SHARED_EXEC | 222 | #define __S111 PAGE_SHARED_EXEC |
223 | 223 | ||
224 | static inline unsigned long pgd_bad(pgd_t pgd) | 224 | static inline unsigned long pgd_bad(pgd_t pgd) |
225 | { | 225 | { |
226 | unsigned long val = pgd_val(pgd); | 226 | return pgd_val(pgd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); |
227 | val &= ~PTE_MASK; | 227 | } |
228 | val &= ~(_PAGE_USER | _PAGE_DIRTY); | ||
229 | return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); | ||
230 | } | ||
231 | 228 | ||
232 | static inline unsigned long pud_bad(pud_t pud) | 229 | static inline unsigned long pud_bad(pud_t pud) |
233 | { | 230 | { |
234 | unsigned long val = pud_val(pud); | 231 | return pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); |
235 | val &= ~PTE_MASK; | 232 | } |
236 | val &= ~(_PAGE_USER | _PAGE_DIRTY); | 233 | |
237 | return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); | 234 | static inline unsigned long pmd_bad(pmd_t pmd) |
235 | { | ||
236 | return pmd_val(pmd) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER); | ||
238 | } | 237 | } |
239 | 238 | ||
240 | #define pte_none(x) (!pte_val(x)) | 239 | #define pte_none(x) (!pte_val(x)) |
@@ -347,7 +346,6 @@ static inline int pmd_large(pmd_t pte) { | |||
347 | #define pmd_none(x) (!pmd_val(x)) | 346 | #define pmd_none(x) (!pmd_val(x)) |
348 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 347 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
349 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | 348 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) |
350 | #define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) | ||
351 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) | 349 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) |
352 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 350 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
353 | 351 | ||
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index cef17e0f828c..76552d72804c 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h | |||
@@ -475,6 +475,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) | |||
475 | : :"a" (eax), "c" (ecx)); | 475 | : :"a" (eax), "c" (ecx)); |
476 | } | 476 | } |
477 | 477 | ||
478 | static inline void __sti_mwait(unsigned long eax, unsigned long ecx) | ||
479 | { | ||
480 | /* "mwait %eax,%ecx;" */ | ||
481 | asm volatile( | ||
482 | "sti; .byte 0x0f,0x01,0xc9;" | ||
483 | : :"a" (eax), "c" (ecx)); | ||
484 | } | ||
485 | |||
478 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); | 486 | extern void mwait_idle_with_hints(unsigned long eax, unsigned long ecx); |
479 | 487 | ||
480 | #define stack_current() \ | 488 | #define stack_current() \ |
diff --git a/include/asm-x86_64/proto.h b/include/asm-x86_64/proto.h index e72cfcdf5344..6d324b838972 100644 --- a/include/asm-x86_64/proto.h +++ b/include/asm-x86_64/proto.h | |||
@@ -61,7 +61,6 @@ extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); | |||
61 | extern unsigned long numa_free_all_bootmem(void); | 61 | extern unsigned long numa_free_all_bootmem(void); |
62 | 62 | ||
63 | extern void reserve_bootmem_generic(unsigned long phys, unsigned len); | 63 | extern void reserve_bootmem_generic(unsigned long phys, unsigned len); |
64 | extern void free_bootmem_generic(unsigned long phys, unsigned len); | ||
65 | 64 | ||
66 | extern void load_gs_index(unsigned gs); | 65 | extern void load_gs_index(unsigned gs); |
67 | 66 | ||
@@ -88,6 +87,7 @@ extern void syscall32_cpu_init(void); | |||
88 | extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); | 87 | extern void setup_node_bootmem(int nodeid, unsigned long start, unsigned long end); |
89 | 88 | ||
90 | extern void early_quirks(void); | 89 | extern void early_quirks(void); |
90 | extern void quirk_intel_irqbalance(void); | ||
91 | extern void check_efer(void); | 91 | extern void check_efer(void); |
92 | 92 | ||
93 | extern int unhandled_signal(struct task_struct *tsk, int sig); | 93 | extern int unhandled_signal(struct task_struct *tsk, int sig); |
diff --git a/include/asm-x86_64/rio.h b/include/asm-x86_64/rio.h new file mode 100644 index 000000000000..c7350f6d2015 --- /dev/null +++ b/include/asm-x86_64/rio.h | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Derived from include/asm-i386/mach-summit/mach_mpparse.h | ||
3 | * and include/asm-i386/mach-default/bios_ebda.h | ||
4 | * | ||
5 | * Author: Laurent Vivier <Laurent.Vivier@bull.net> | ||
6 | */ | ||
7 | |||
8 | #ifndef __ASM_RIO_H | ||
9 | #define __ASM_RIO_H | ||
10 | |||
11 | #define RIO_TABLE_VERSION 3 | ||
12 | |||
13 | struct rio_table_hdr { | ||
14 | u8 version; /* Version number of this data structure */ | ||
15 | u8 num_scal_dev; /* # of Scalability devices */ | ||
16 | u8 num_rio_dev; /* # of RIO I/O devices */ | ||
17 | } __attribute__((packed)); | ||
18 | |||
19 | struct scal_detail { | ||
20 | u8 node_id; /* Scalability Node ID */ | ||
21 | u32 CBAR; /* Address of 1MB register space */ | ||
22 | u8 port0node; /* Node ID port connected to: 0xFF=None */ | ||
23 | u8 port0port; /* Port num port connected to: 0,1,2, or */ | ||
24 | /* 0xFF=None */ | ||
25 | u8 port1node; /* Node ID port connected to: 0xFF = None */ | ||
26 | u8 port1port; /* Port num port connected to: 0,1,2, or */ | ||
27 | /* 0xFF=None */ | ||
28 | u8 port2node; /* Node ID port connected to: 0xFF = None */ | ||
29 | u8 port2port; /* Port num port connected to: 0,1,2, or */ | ||
30 | /* 0xFF=None */ | ||
31 | u8 chassis_num; /* 1 based Chassis number (1 = boot node) */ | ||
32 | } __attribute__((packed)); | ||
33 | |||
34 | struct rio_detail { | ||
35 | u8 node_id; /* RIO Node ID */ | ||
36 | u32 BBAR; /* Address of 1MB register space */ | ||
37 | u8 type; /* Type of device */ | ||
38 | u8 owner_id; /* Node ID of Hurricane that owns this */ | ||
39 | /* node */ | ||
40 | u8 port0node; /* Node ID port connected to: 0xFF=None */ | ||
41 | u8 port0port; /* Port num port connected to: 0,1,2, or */ | ||
42 | /* 0xFF=None */ | ||
43 | u8 port1node; /* Node ID port connected to: 0xFF=None */ | ||
44 | u8 port1port; /* Port num port connected to: 0,1,2, or */ | ||
45 | /* 0xFF=None */ | ||
46 | u8 first_slot; /* Lowest slot number below this Calgary */ | ||
47 | u8 status; /* Bit 0 = 1 : the XAPIC is used */ | ||
48 | /* = 0 : the XAPIC is not used, ie: */ | ||
49 | /* ints fwded to another XAPIC */ | ||
50 | /* Bits1:7 Reserved */ | ||
51 | u8 WP_index; /* instance index - lower ones have */ | ||
52 | /* lower slot numbers/PCI bus numbers */ | ||
53 | u8 chassis_num; /* 1 based Chassis number */ | ||
54 | } __attribute__((packed)); | ||
55 | |||
56 | enum { | ||
57 | HURR_SCALABILTY = 0, /* Hurricane Scalability info */ | ||
58 | HURR_RIOIB = 2, /* Hurricane RIOIB info */ | ||
59 | COMPAT_CALGARY = 4, /* Compatibility Calgary */ | ||
60 | ALT_CALGARY = 5, /* Second Planar Calgary */ | ||
61 | }; | ||
62 | |||
63 | /* | ||
64 | * there is a real-mode segmented pointer pointing to the | ||
65 | * 4K EBDA area at 0x40E. | ||
66 | */ | ||
67 | static inline unsigned long get_bios_ebda(void) | ||
68 | { | ||
69 | unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL); | ||
70 | address <<= 4; | ||
71 | return address; | ||
72 | } | ||
73 | |||
74 | #endif /* __ASM_RIO_H */ | ||
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index f1bdd500d7ac..e17b9ec42e98 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h | |||
@@ -82,11 +82,6 @@ extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */ | |||
82 | extern u8 x86_cpu_to_log_apicid[NR_CPUS]; | 82 | extern u8 x86_cpu_to_log_apicid[NR_CPUS]; |
83 | extern u8 bios_cpu_apicid[]; | 83 | extern u8 bios_cpu_apicid[]; |
84 | 84 | ||
85 | static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask) | ||
86 | { | ||
87 | return cpus_addr(cpumask)[0]; | ||
88 | } | ||
89 | |||
90 | static inline int cpu_present_to_apicid(int mps_cpu) | 85 | static inline int cpu_present_to_apicid(int mps_cpu) |
91 | { | 86 | { |
92 | if (mps_cpu < NR_CPUS) | 87 | if (mps_cpu < NR_CPUS) |
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h index 05ef097ba55b..88bf981e73cf 100644 --- a/include/asm-x86_64/spinlock.h +++ b/include/asm-x86_64/spinlock.h | |||
@@ -36,7 +36,34 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) | |||
36 | "2:\t" : "=m" (lock->slock) : : "memory"); | 36 | "2:\t" : "=m" (lock->slock) : : "memory"); |
37 | } | 37 | } |
38 | 38 | ||
39 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 39 | /* |
40 | * Same as __raw_spin_lock, but reenable interrupts during spinning. | ||
41 | */ | ||
42 | #ifndef CONFIG_PROVE_LOCKING | ||
43 | static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) | ||
44 | { | ||
45 | asm volatile( | ||
46 | "\n1:\t" | ||
47 | LOCK_PREFIX " ; decl %0\n\t" | ||
48 | "jns 5f\n" | ||
49 | "testl $0x200, %1\n\t" /* interrupts were disabled? */ | ||
50 | "jz 4f\n\t" | ||
51 | "sti\n" | ||
52 | "3:\t" | ||
53 | "rep;nop\n\t" | ||
54 | "cmpl $0, %0\n\t" | ||
55 | "jle 3b\n\t" | ||
56 | "cli\n\t" | ||
57 | "jmp 1b\n" | ||
58 | "4:\t" | ||
59 | "rep;nop\n\t" | ||
60 | "cmpl $0, %0\n\t" | ||
61 | "jg 1b\n\t" | ||
62 | "jmp 4b\n" | ||
63 | "5:\n\t" | ||
64 | : "+m" (lock->slock) : "r" ((unsigned)flags) : "memory"); | ||
65 | } | ||
66 | #endif | ||
40 | 67 | ||
41 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) | 68 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
42 | { | 69 | { |
diff --git a/include/asm-x86_64/stacktrace.h b/include/asm-x86_64/stacktrace.h index 5eb9799bef76..6f0b54594307 100644 --- a/include/asm-x86_64/stacktrace.h +++ b/include/asm-x86_64/stacktrace.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _ASM_STACKTRACE_H | 1 | #ifndef _ASM_STACKTRACE_H |
2 | #define _ASM_STACKTRACE_H 1 | 2 | #define _ASM_STACKTRACE_H 1 |
3 | 3 | ||
4 | extern int kstack_depth_to_print; | ||
5 | |||
4 | /* Generic stack tracer with callbacks */ | 6 | /* Generic stack tracer with callbacks */ |
5 | 7 | ||
6 | struct stacktrace_ops { | 8 | struct stacktrace_ops { |
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h index 2e7ff10fd775..2f6349e48717 100644 --- a/include/asm-x86_64/unwind.h +++ b/include/asm-x86_64/unwind.h | |||
@@ -87,14 +87,10 @@ extern int arch_unwind_init_running(struct unwind_frame_info *, | |||
87 | 87 | ||
88 | static inline int arch_unw_user_mode(const struct unwind_frame_info *info) | 88 | static inline int arch_unw_user_mode(const struct unwind_frame_info *info) |
89 | { | 89 | { |
90 | #if 0 /* This can only work when selector register saves/restores | 90 | return user_mode(&info->regs) |
91 | are properly annotated (and tracked in UNW_REGISTER_INFO). */ | 91 | || (long)info->regs.rip >= 0 |
92 | return user_mode(&info->regs); | ||
93 | #else | ||
94 | return (long)info->regs.rip >= 0 | ||
95 | || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END) | 92 | || (info->regs.rip >= VSYSCALL_START && info->regs.rip < VSYSCALL_END) |
96 | || (long)info->regs.rsp >= 0; | 93 | || (long)info->regs.rsp >= 0; |
97 | #endif | ||
98 | } | 94 | } |
99 | 95 | ||
100 | #else | 96 | #else |
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h index 01d1c17e2849..05cb8dd200de 100644 --- a/include/asm-x86_64/vsyscall.h +++ b/include/asm-x86_64/vsyscall.h | |||
@@ -10,6 +10,7 @@ enum vsyscall_num { | |||
10 | #define VSYSCALL_START (-10UL << 20) | 10 | #define VSYSCALL_START (-10UL << 20) |
11 | #define VSYSCALL_SIZE 1024 | 11 | #define VSYSCALL_SIZE 1024 |
12 | #define VSYSCALL_END (-2UL << 20) | 12 | #define VSYSCALL_END (-2UL << 20) |
13 | #define VSYSCALL_MAPPED_PAGES 1 | ||
13 | #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) | 14 | #define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr)) |
14 | 15 | ||
15 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index bf00ce6ecadf..bfb520212d71 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | struct cpu { | 29 | struct cpu { |
30 | int node_id; /* The node which contains the CPU */ | 30 | int node_id; /* The node which contains the CPU */ |
31 | int no_control; /* Should the sysfs control file be created? */ | 31 | int hotpluggable; /* creates sysfs control file if hotpluggable */ |
32 | struct sys_device sysdev; | 32 | struct sys_device sysdev; |
33 | }; | 33 | }; |
34 | 34 | ||
diff --git a/include/linux/efi.h b/include/linux/efi.h index 66d621dbcb6c..df1c91855f0e 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -300,8 +300,9 @@ extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, | |||
300 | extern int __init efi_uart_console_only (void); | 300 | extern int __init efi_uart_console_only (void); |
301 | extern void efi_initialize_iomem_resources(struct resource *code_resource, | 301 | extern void efi_initialize_iomem_resources(struct resource *code_resource, |
302 | struct resource *data_resource); | 302 | struct resource *data_resource); |
303 | extern unsigned long __init efi_get_time(void); | 303 | extern unsigned long efi_get_time(void); |
304 | extern int __init efi_set_rtc_mmss(unsigned long nowtime); | 304 | extern int __init efi_set_rtc_mmss(unsigned long nowtime); |
305 | extern int is_available_memory(efi_memory_desc_t * md); | ||
305 | extern struct efi_memory_map memmap; | 306 | extern struct efi_memory_map memmap; |
306 | 307 | ||
307 | /** | 308 | /** |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index e16904e28c3a..acb4ed130247 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -15,9 +15,14 @@ | |||
15 | * disables interrupts for a long time. This call is stateless. | 15 | * disables interrupts for a long time. This call is stateless. |
16 | */ | 16 | */ |
17 | #ifdef ARCH_HAS_NMI_WATCHDOG | 17 | #ifdef ARCH_HAS_NMI_WATCHDOG |
18 | #include <asm/nmi.h> | ||
18 | extern void touch_nmi_watchdog(void); | 19 | extern void touch_nmi_watchdog(void); |
19 | #else | 20 | #else |
20 | # define touch_nmi_watchdog() touch_softlockup_watchdog() | 21 | # define touch_nmi_watchdog() touch_softlockup_watchdog() |
21 | #endif | 22 | #endif |
22 | 23 | ||
24 | #ifndef trigger_all_cpu_backtrace | ||
25 | #define trigger_all_cpu_backtrace() do { } while (0) | ||
26 | #endif | ||
27 | |||
23 | #endif | 28 | #endif |
diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 2925e66a6732..b02308ee7667 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h | |||
@@ -42,7 +42,8 @@ struct screen_info { | |||
42 | u16 pages; /* 0x32 */ | 42 | u16 pages; /* 0x32 */ |
43 | u16 vesa_attributes; /* 0x34 */ | 43 | u16 vesa_attributes; /* 0x34 */ |
44 | u32 capabilities; /* 0x36 */ | 44 | u32 capabilities; /* 0x36 */ |
45 | /* 0x3a -- 0x3f reserved for future expansion */ | 45 | /* 0x3a -- 0x3b reserved for future expansion */ |
46 | /* 0x3c -- 0x3f micro stack for relocatable kernels */ | ||
46 | }; | 47 | }; |
47 | 48 | ||
48 | extern struct screen_info screen_info; | 49 | extern struct screen_info screen_info; |
diff --git a/include/linux/start_kernel.h b/include/linux/start_kernel.h new file mode 100644 index 000000000000..d3e5f2756545 --- /dev/null +++ b/include/linux/start_kernel.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _LINUX_START_KERNEL_H | ||
2 | #define _LINUX_START_KERNEL_H | ||
3 | |||
4 | #include <linux/linkage.h> | ||
5 | #include <linux/init.h> | ||
6 | |||
7 | /* Define the prototype for start_kernel here, rather than cluttering | ||
8 | up something else. */ | ||
9 | |||
10 | extern asmlinkage void __init start_kernel(void); | ||
11 | |||
12 | #endif /* _LINUX_START_KERNEL_H */ | ||
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 76c3fe325101..975c963e5789 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -78,7 +78,7 @@ static inline unsigned long __copy_from_user_nocache(void *to, | |||
78 | \ | 78 | \ |
79 | set_fs(KERNEL_DS); \ | 79 | set_fs(KERNEL_DS); \ |
80 | pagefault_disable(); \ | 80 | pagefault_disable(); \ |
81 | ret = __get_user(retval, addr); \ | 81 | ret = __get_user(retval, (__force typeof(retval) __user *)(addr)); \ |
82 | pagefault_enable(); \ | 82 | pagefault_enable(); \ |
83 | set_fs(old_fs); \ | 83 | set_fs(old_fs); \ |
84 | ret; \ | 84 | ret; \ |