diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:59:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-07 11:59:11 -0500 |
commit | 4522d58275f124105819723e24e912c8e5bf3cdd (patch) | |
tree | b92c29014fadffe049c1925676037f0092b8d112 /include/asm-i386 | |
parent | 6cf24f031bc97cb5a7c9df3b6e73c45b628b2b28 (diff) | |
parent | 64a26a731235b59c9d73bbe82c1f896d57400d37 (diff) |
Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
* 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6: (156 commits)
[PATCH] x86-64: Export smp_call_function_single
[PATCH] i386: Clean up smp_tune_scheduling()
[PATCH] unwinder: move .eh_frame to RODATA
[PATCH] unwinder: fully support linker generated .eh_frame_hdr section
[PATCH] x86-64: don't use set_irq_regs()
[PATCH] x86-64: check vector in setup_ioapic_dest to verify if need setup_IO_APIC_irq
[PATCH] x86-64: Make ix86 default to HIGHMEM4G instead of NOHIGHMEM
[PATCH] i386: replace kmalloc+memset with kzalloc
[PATCH] x86-64: remove remaining pc98 code
[PATCH] x86-64: remove unused variable
[PATCH] x86-64: Fix constraints in atomic_add_return()
[PATCH] x86-64: fix asm constraints in i386 atomic_add_return
[PATCH] x86-64: Correct documentation for bzImage protocol v2.05
[PATCH] x86-64: replace kmalloc+memset with kzalloc in MTRR code
[PATCH] x86-64: Fix numaq build error
[PATCH] x86-64: include/asm-x86_64/cpufeature.h isn't a userspace header
[PATCH] unwinder: Add debugging output to the Dwarf2 unwinder
[PATCH] x86-64: Clarify error message in GART code
[PATCH] x86-64: Fix interrupt race in idle callback (3rd try)
[PATCH] x86-64: Remove unwind stack pointer alignment forcing again
...
Fixed conflict in include/linux/uaccess.h manually
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-i386')
45 files changed, 1114 insertions, 272 deletions
diff --git a/include/asm-i386/alternative.h b/include/asm-i386/alternative.h index b01a7ec409ce..b8fa9557c532 100644 --- a/include/asm-i386/alternative.h +++ b/include/asm-i386/alternative.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | #include <asm/types.h> | 6 | #include <asm/types.h> |
7 | 7 | #include <linux/stddef.h> | |
8 | #include <linux/types.h> | 8 | #include <linux/types.h> |
9 | 9 | ||
10 | struct alt_instr { | 10 | struct alt_instr { |
@@ -118,4 +118,15 @@ static inline void alternatives_smp_switch(int smp) {} | |||
118 | #define LOCK_PREFIX "" | 118 | #define LOCK_PREFIX "" |
119 | #endif | 119 | #endif |
120 | 120 | ||
121 | struct paravirt_patch; | ||
122 | #ifdef CONFIG_PARAVIRT | ||
123 | void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end); | ||
124 | #else | ||
125 | static inline void | ||
126 | apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end) | ||
127 | {} | ||
128 | #define __start_parainstructions NULL | ||
129 | #define __stop_parainstructions NULL | ||
130 | #endif | ||
131 | |||
121 | #endif /* _I386_ALTERNATIVE_H */ | 132 | #endif /* _I386_ALTERNATIVE_H */ |
diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index b9529578fc37..41a44319905f 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h | |||
@@ -37,18 +37,27 @@ extern void generic_apic_probe(void); | |||
37 | /* | 37 | /* |
38 | * Basic functions accessing APICs. | 38 | * Basic functions accessing APICs. |
39 | */ | 39 | */ |
40 | #ifdef CONFIG_PARAVIRT | ||
41 | #include <asm/paravirt.h> | ||
42 | #else | ||
43 | #define apic_write native_apic_write | ||
44 | #define apic_write_atomic native_apic_write_atomic | ||
45 | #define apic_read native_apic_read | ||
46 | #endif | ||
40 | 47 | ||
41 | static __inline void apic_write(unsigned long reg, unsigned long v) | 48 | static __inline fastcall void native_apic_write(unsigned long reg, |
49 | unsigned long v) | ||
42 | { | 50 | { |
43 | *((volatile unsigned long *)(APIC_BASE+reg)) = v; | 51 | *((volatile unsigned long *)(APIC_BASE+reg)) = v; |
44 | } | 52 | } |
45 | 53 | ||
46 | static __inline void apic_write_atomic(unsigned long reg, unsigned long v) | 54 | static __inline fastcall void native_apic_write_atomic(unsigned long reg, |
55 | unsigned long v) | ||
47 | { | 56 | { |
48 | xchg((volatile unsigned long *)(APIC_BASE+reg), v); | 57 | xchg((volatile unsigned long *)(APIC_BASE+reg), v); |
49 | } | 58 | } |
50 | 59 | ||
51 | static __inline unsigned long apic_read(unsigned long reg) | 60 | static __inline fastcall unsigned long native_apic_read(unsigned long reg) |
52 | { | 61 | { |
53 | return *((volatile unsigned long *)(APIC_BASE+reg)); | 62 | return *((volatile unsigned long *)(APIC_BASE+reg)); |
54 | } | 63 | } |
diff --git a/include/asm-i386/atomic.h b/include/asm-i386/atomic.h index a6c024e2506f..c57441bb2905 100644 --- a/include/asm-i386/atomic.h +++ b/include/asm-i386/atomic.h | |||
@@ -187,9 +187,9 @@ static __inline__ int atomic_add_return(int i, atomic_t *v) | |||
187 | /* Modern 486+ processor */ | 187 | /* Modern 486+ processor */ |
188 | __i = i; | 188 | __i = i; |
189 | __asm__ __volatile__( | 189 | __asm__ __volatile__( |
190 | LOCK_PREFIX "xaddl %0, %1;" | 190 | LOCK_PREFIX "xaddl %0, %1" |
191 | :"=r"(i) | 191 | :"+r" (i), "+m" (v->counter) |
192 | :"m"(v->counter), "0"(i)); | 192 | : : "memory"); |
193 | return i + __i; | 193 | return i + __i; |
194 | 194 | ||
195 | #ifdef CONFIG_M386 | 195 | #ifdef CONFIG_M386 |
diff --git a/include/asm-i386/boot.h b/include/asm-i386/boot.h index 96b228e6e79c..8ce79a6fa891 100644 --- a/include/asm-i386/boot.h +++ b/include/asm-i386/boot.h | |||
@@ -12,4 +12,8 @@ | |||
12 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ | 12 | #define EXTENDED_VGA 0xfffe /* 80x50 mode */ |
13 | #define ASK_VGA 0xfffd /* ask for it at bootup */ | 13 | #define ASK_VGA 0xfffd /* ask for it at bootup */ |
14 | 14 | ||
15 | #endif | 15 | /* Physical address where kenrel should be loaded. */ |
16 | #define LOAD_PHYSICAL_ADDR ((0x100000 + CONFIG_PHYSICAL_ALIGN - 1) \ | ||
17 | & ~(CONFIG_PHYSICAL_ALIGN - 1)) | ||
18 | |||
19 | #endif /* _LINUX_BOOT_H */ | ||
diff --git a/include/asm-i386/bugs.h b/include/asm-i386/bugs.h index 592ffeeda45e..38f1aebbbdb5 100644 --- a/include/asm-i386/bugs.h +++ b/include/asm-i386/bugs.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/processor.h> | 21 | #include <asm/processor.h> |
22 | #include <asm/i387.h> | 22 | #include <asm/i387.h> |
23 | #include <asm/msr.h> | 23 | #include <asm/msr.h> |
24 | #include <asm/paravirt.h> | ||
24 | 25 | ||
25 | static int __init no_halt(char *s) | 26 | static int __init no_halt(char *s) |
26 | { | 27 | { |
@@ -91,6 +92,9 @@ static void __init check_fpu(void) | |||
91 | 92 | ||
92 | static void __init check_hlt(void) | 93 | static void __init check_hlt(void) |
93 | { | 94 | { |
95 | if (paravirt_enabled()) | ||
96 | return; | ||
97 | |||
94 | printk(KERN_INFO "Checking 'hlt' instruction... "); | 98 | printk(KERN_INFO "Checking 'hlt' instruction... "); |
95 | if (!boot_cpu_data.hlt_works_ok) { | 99 | if (!boot_cpu_data.hlt_works_ok) { |
96 | printk("disabled\n"); | 100 | printk("disabled\n"); |
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h index b1bc7b1b64b0..9d914e1e4aad 100644 --- a/include/asm-i386/cpu.h +++ b/include/asm-i386/cpu.h | |||
@@ -13,6 +13,9 @@ struct i386_cpu { | |||
13 | extern int arch_register_cpu(int num); | 13 | extern int arch_register_cpu(int num); |
14 | #ifdef CONFIG_HOTPLUG_CPU | 14 | #ifdef CONFIG_HOTPLUG_CPU |
15 | extern void arch_unregister_cpu(int); | 15 | extern void arch_unregister_cpu(int); |
16 | extern int enable_cpu_hotplug; | ||
17 | #else | ||
18 | #define enable_cpu_hotplug 0 | ||
16 | #endif | 19 | #endif |
17 | 20 | ||
18 | DECLARE_PER_CPU(int, cpu_state); | 21 | DECLARE_PER_CPU(int, cpu_state); |
diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index d314ebb3d59e..3f92b94e0d75 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h | |||
@@ -31,7 +31,7 @@ | |||
31 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | 31 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ |
32 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | 32 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ |
33 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ | 33 | #define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */ |
34 | #define X86_FEATURE_DTES (0*32+21) /* Debug Trace Store */ | 34 | #define X86_FEATURE_DS (0*32+21) /* Debug Store */ |
35 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | 35 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ |
36 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 36 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ |
37 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ | 37 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */ |
@@ -73,6 +73,8 @@ | |||
73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 73 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ |
74 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ | 74 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ |
75 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 75 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ |
76 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | ||
77 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | ||
76 | 78 | ||
77 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 79 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
78 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ | 80 | #define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */ |
@@ -134,6 +136,10 @@ | |||
134 | #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) | 136 | #define cpu_has_phe_enabled boot_cpu_has(X86_FEATURE_PHE_EN) |
135 | #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) | 137 | #define cpu_has_pmm boot_cpu_has(X86_FEATURE_PMM) |
136 | #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) | 138 | #define cpu_has_pmm_enabled boot_cpu_has(X86_FEATURE_PMM_EN) |
139 | #define cpu_has_ds boot_cpu_has(X86_FEATURE_DS) | ||
140 | #define cpu_has_pebs boot_cpu_has(X86_FEATURE_PEBS) | ||
141 | #define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLSH) | ||
142 | #define cpu_has_bts boot_cpu_has(X86_FEATURE_BTS) | ||
137 | 143 | ||
138 | #endif /* __ASM_I386_CPUFEATURE_H */ | 144 | #endif /* __ASM_I386_CPUFEATURE_H */ |
139 | 145 | ||
diff --git a/include/asm-i386/current.h b/include/asm-i386/current.h index 3cbbecd79016..5252ee0f6d7a 100644 --- a/include/asm-i386/current.h +++ b/include/asm-i386/current.h | |||
@@ -1,13 +1,14 @@ | |||
1 | #ifndef _I386_CURRENT_H | 1 | #ifndef _I386_CURRENT_H |
2 | #define _I386_CURRENT_H | 2 | #define _I386_CURRENT_H |
3 | 3 | ||
4 | #include <linux/thread_info.h> | 4 | #include <asm/pda.h> |
5 | #include <linux/compiler.h> | ||
5 | 6 | ||
6 | struct task_struct; | 7 | struct task_struct; |
7 | 8 | ||
8 | static __always_inline struct task_struct * get_current(void) | 9 | static __always_inline struct task_struct *get_current(void) |
9 | { | 10 | { |
10 | return current_thread_info()->task; | 11 | return read_pda(pcurrent); |
11 | } | 12 | } |
12 | 13 | ||
13 | #define current get_current() | 14 | #define current get_current() |
diff --git a/include/asm-i386/delay.h b/include/asm-i386/delay.h index b1c7650dc7b9..32d6678d0bbf 100644 --- a/include/asm-i386/delay.h +++ b/include/asm-i386/delay.h | |||
@@ -7,6 +7,7 @@ | |||
7 | * Delay routines calling functions in arch/i386/lib/delay.c | 7 | * Delay routines calling functions in arch/i386/lib/delay.c |
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* Undefined functions to get compile-time errors */ | ||
10 | extern void __bad_udelay(void); | 11 | extern void __bad_udelay(void); |
11 | extern void __bad_ndelay(void); | 12 | extern void __bad_ndelay(void); |
12 | 13 | ||
@@ -15,13 +16,23 @@ extern void __ndelay(unsigned long nsecs); | |||
15 | extern void __const_udelay(unsigned long usecs); | 16 | extern void __const_udelay(unsigned long usecs); |
16 | extern void __delay(unsigned long loops); | 17 | extern void __delay(unsigned long loops); |
17 | 18 | ||
19 | #if defined(CONFIG_PARAVIRT) && !defined(USE_REAL_TIME_DELAY) | ||
20 | #define udelay(n) paravirt_ops.const_udelay((n) * 0x10c7ul) | ||
21 | |||
22 | #define ndelay(n) paravirt_ops.const_udelay((n) * 5ul) | ||
23 | |||
24 | #else /* !PARAVIRT || USE_REAL_TIME_DELAY */ | ||
25 | |||
26 | /* 0x10c7 is 2**32 / 1000000 (rounded up) */ | ||
18 | #define udelay(n) (__builtin_constant_p(n) ? \ | 27 | #define udelay(n) (__builtin_constant_p(n) ? \ |
19 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ | 28 | ((n) > 20000 ? __bad_udelay() : __const_udelay((n) * 0x10c7ul)) : \ |
20 | __udelay(n)) | 29 | __udelay(n)) |
21 | 30 | ||
31 | /* 0x5 is 2**32 / 1000000000 (rounded up) */ | ||
22 | #define ndelay(n) (__builtin_constant_p(n) ? \ | 32 | #define ndelay(n) (__builtin_constant_p(n) ? \ |
23 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | 33 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ |
24 | __ndelay(n)) | 34 | __ndelay(n)) |
35 | #endif | ||
25 | 36 | ||
26 | void use_tsc_delay(void); | 37 | void use_tsc_delay(void); |
27 | 38 | ||
diff --git a/include/asm-i386/desc.h b/include/asm-i386/desc.h index 5874ef119ffd..f398cc456448 100644 --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h | |||
@@ -4,8 +4,6 @@ | |||
4 | #include <asm/ldt.h> | 4 | #include <asm/ldt.h> |
5 | #include <asm/segment.h> | 5 | #include <asm/segment.h> |
6 | 6 | ||
7 | #define CPU_16BIT_STACK_SIZE 1024 | ||
8 | |||
9 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
10 | 8 | ||
11 | #include <linux/preempt.h> | 9 | #include <linux/preempt.h> |
@@ -16,8 +14,6 @@ | |||
16 | 14 | ||
17 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; | 15 | extern struct desc_struct cpu_gdt_table[GDT_ENTRIES]; |
18 | 16 | ||
19 | DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | ||
20 | |||
21 | struct Xgt_desc_struct { | 17 | struct Xgt_desc_struct { |
22 | unsigned short size; | 18 | unsigned short size; |
23 | unsigned long address __attribute__((packed)); | 19 | unsigned long address __attribute__((packed)); |
@@ -33,11 +29,6 @@ static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) | |||
33 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; | 29 | return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address; |
34 | } | 30 | } |
35 | 31 | ||
36 | /* | ||
37 | * This is the ldt that every process will get unless we need | ||
38 | * something other than this. | ||
39 | */ | ||
40 | extern struct desc_struct default_ldt[]; | ||
41 | extern struct desc_struct idt_table[]; | 32 | extern struct desc_struct idt_table[]; |
42 | extern void set_intr_gate(unsigned int irq, void * addr); | 33 | extern void set_intr_gate(unsigned int irq, void * addr); |
43 | 34 | ||
@@ -64,8 +55,10 @@ static inline void pack_gate(__u32 *a, __u32 *b, | |||
64 | #define DESCTYPE_DPL3 0x60 /* DPL-3 */ | 55 | #define DESCTYPE_DPL3 0x60 /* DPL-3 */ |
65 | #define DESCTYPE_S 0x10 /* !system */ | 56 | #define DESCTYPE_S 0x10 /* !system */ |
66 | 57 | ||
58 | #ifdef CONFIG_PARAVIRT | ||
59 | #include <asm/paravirt.h> | ||
60 | #else | ||
67 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) | 61 | #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) |
68 | #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) | ||
69 | 62 | ||
70 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) | 63 | #define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr)) |
71 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) | 64 | #define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr)) |
@@ -88,6 +81,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned int cpu) | |||
88 | #undef C | 81 | #undef C |
89 | } | 82 | } |
90 | 83 | ||
84 | #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
85 | #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
86 | #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | ||
87 | |||
91 | static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) | 88 | static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entry_b) |
92 | { | 89 | { |
93 | __u32 *lp = (__u32 *)((char *)dt + entry*8); | 90 | __u32 *lp = (__u32 *)((char *)dt + entry*8); |
@@ -95,9 +92,25 @@ static inline void write_dt_entry(void *dt, int entry, __u32 entry_a, __u32 entr | |||
95 | *(lp+1) = entry_b; | 92 | *(lp+1) = entry_b; |
96 | } | 93 | } |
97 | 94 | ||
98 | #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | 95 | #define set_ldt native_set_ldt |
99 | #define write_gdt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | 96 | #endif /* CONFIG_PARAVIRT */ |
100 | #define write_idt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) | 97 | |
98 | static inline fastcall void native_set_ldt(const void *addr, | ||
99 | unsigned int entries) | ||
100 | { | ||
101 | if (likely(entries == 0)) | ||
102 | __asm__ __volatile__("lldt %w0"::"q" (0)); | ||
103 | else { | ||
104 | unsigned cpu = smp_processor_id(); | ||
105 | __u32 a, b; | ||
106 | |||
107 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
108 | entries * sizeof(struct desc_struct) - 1, | ||
109 | DESCTYPE_LDT, 0); | ||
110 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); | ||
111 | __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)); | ||
112 | } | ||
113 | } | ||
101 | 114 | ||
102 | static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) | 115 | static inline void _set_gate(int gate, unsigned int type, void *addr, unsigned short seg) |
103 | { | 116 | { |
@@ -115,14 +128,6 @@ static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const vo | |||
115 | write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); | 128 | write_gdt_entry(get_cpu_gdt_table(cpu), entry, a, b); |
116 | } | 129 | } |
117 | 130 | ||
118 | static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entries) | ||
119 | { | ||
120 | __u32 a, b; | ||
121 | pack_descriptor(&a, &b, (unsigned long)addr, | ||
122 | entries * sizeof(struct desc_struct) - 1, | ||
123 | DESCTYPE_LDT, 0); | ||
124 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_LDT, a, b); | ||
125 | } | ||
126 | 131 | ||
127 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) | 132 | #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr) |
128 | 133 | ||
@@ -153,35 +158,22 @@ static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int entri | |||
153 | 158 | ||
154 | static inline void clear_LDT(void) | 159 | static inline void clear_LDT(void) |
155 | { | 160 | { |
156 | int cpu = get_cpu(); | 161 | set_ldt(NULL, 0); |
157 | |||
158 | set_ldt_desc(cpu, &default_ldt[0], 5); | ||
159 | load_LDT_desc(); | ||
160 | put_cpu(); | ||
161 | } | 162 | } |
162 | 163 | ||
163 | /* | 164 | /* |
164 | * load one particular LDT into the current CPU | 165 | * load one particular LDT into the current CPU |
165 | */ | 166 | */ |
166 | static inline void load_LDT_nolock(mm_context_t *pc, int cpu) | 167 | static inline void load_LDT_nolock(mm_context_t *pc) |
167 | { | 168 | { |
168 | void *segments = pc->ldt; | 169 | set_ldt(pc->ldt, pc->size); |
169 | int count = pc->size; | ||
170 | |||
171 | if (likely(!count)) { | ||
172 | segments = &default_ldt[0]; | ||
173 | count = 5; | ||
174 | } | ||
175 | |||
176 | set_ldt_desc(cpu, segments, count); | ||
177 | load_LDT_desc(); | ||
178 | } | 170 | } |
179 | 171 | ||
180 | static inline void load_LDT(mm_context_t *pc) | 172 | static inline void load_LDT(mm_context_t *pc) |
181 | { | 173 | { |
182 | int cpu = get_cpu(); | 174 | preempt_disable(); |
183 | load_LDT_nolock(pc, cpu); | 175 | load_LDT_nolock(pc); |
184 | put_cpu(); | 176 | preempt_enable(); |
185 | } | 177 | } |
186 | 178 | ||
187 | static inline unsigned long get_desc_base(unsigned long *desc) | 179 | static inline unsigned long get_desc_base(unsigned long *desc) |
@@ -193,6 +185,29 @@ static inline unsigned long get_desc_base(unsigned long *desc) | |||
193 | return base; | 185 | return base; |
194 | } | 186 | } |
195 | 187 | ||
188 | #else /* __ASSEMBLY__ */ | ||
189 | |||
190 | /* | ||
191 | * GET_DESC_BASE reads the descriptor base of the specified segment. | ||
192 | * | ||
193 | * Args: | ||
194 | * idx - descriptor index | ||
195 | * gdt - GDT pointer | ||
196 | * base - 32bit register to which the base will be written | ||
197 | * lo_w - lo word of the "base" register | ||
198 | * lo_b - lo byte of the "base" register | ||
199 | * hi_b - hi byte of the low word of the "base" register | ||
200 | * | ||
201 | * Example: | ||
202 | * GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah) | ||
203 | * Will read the base address of GDT_ENTRY_ESPFIX_SS and put it into %eax. | ||
204 | */ | ||
205 | #define GET_DESC_BASE(idx, gdt, base, lo_w, lo_b, hi_b) \ | ||
206 | movb idx*8+4(gdt), lo_b; \ | ||
207 | movb idx*8+7(gdt), hi_b; \ | ||
208 | shll $16, base; \ | ||
209 | movw idx*8+2(gdt), lo_w; | ||
210 | |||
196 | #endif /* !__ASSEMBLY__ */ | 211 | #endif /* !__ASSEMBLY__ */ |
197 | 212 | ||
198 | #endif | 213 | #endif |
diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h index f7514fb6e8e4..395077aba583 100644 --- a/include/asm-i386/e820.h +++ b/include/asm-i386/e820.h | |||
@@ -38,6 +38,11 @@ extern struct e820map e820; | |||
38 | 38 | ||
39 | extern int e820_all_mapped(unsigned long start, unsigned long end, | 39 | extern int e820_all_mapped(unsigned long start, unsigned long end, |
40 | unsigned type); | 40 | unsigned type); |
41 | extern void find_max_pfn(void); | ||
42 | extern void register_bootmem_low_pages(unsigned long max_low_pfn); | ||
43 | extern void register_memory(void); | ||
44 | extern void limit_regions(unsigned long long size); | ||
45 | extern void print_memory_map(char *who); | ||
41 | 46 | ||
42 | #endif/*!__ASSEMBLY__*/ | 47 | #endif/*!__ASSEMBLY__*/ |
43 | 48 | ||
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h index 3a05436f31c0..45d21a0c95bf 100644 --- a/include/asm-i386/elf.h +++ b/include/asm-i386/elf.h | |||
@@ -91,7 +91,7 @@ typedef struct user_fxsr_struct elf_fpxregset_t; | |||
91 | pr_reg[7] = regs->xds; \ | 91 | pr_reg[7] = regs->xds; \ |
92 | pr_reg[8] = regs->xes; \ | 92 | pr_reg[8] = regs->xes; \ |
93 | savesegment(fs,pr_reg[9]); \ | 93 | savesegment(fs,pr_reg[9]); \ |
94 | savesegment(gs,pr_reg[10]); \ | 94 | pr_reg[10] = regs->xgs; \ |
95 | pr_reg[11] = regs->orig_eax; \ | 95 | pr_reg[11] = regs->orig_eax; \ |
96 | pr_reg[12] = regs->eip; \ | 96 | pr_reg[12] = regs->eip; \ |
97 | pr_reg[13] = regs->xcs; \ | 97 | pr_reg[13] = regs->xcs; \ |
diff --git a/include/asm-i386/genapic.h b/include/asm-i386/genapic.h index 8ffbb0f07457..fd2be593b06e 100644 --- a/include/asm-i386/genapic.h +++ b/include/asm-i386/genapic.h | |||
@@ -122,6 +122,6 @@ struct genapic { | |||
122 | APICFUNC(phys_pkg_id) \ | 122 | APICFUNC(phys_pkg_id) \ |
123 | } | 123 | } |
124 | 124 | ||
125 | extern struct genapic *genapic; | 125 | extern struct genapic *genapic, apic_default; |
126 | 126 | ||
127 | #endif | 127 | #endif |
diff --git a/include/asm-i386/i387.h b/include/asm-i386/i387.h index bc1d6edae1ed..434936c732d6 100644 --- a/include/asm-i386/i387.h +++ b/include/asm-i386/i387.h | |||
@@ -76,7 +76,9 @@ static inline void __save_init_fpu( struct task_struct *tsk ) | |||
76 | 76 | ||
77 | #define __unlazy_fpu( tsk ) do { \ | 77 | #define __unlazy_fpu( tsk ) do { \ |
78 | if (task_thread_info(tsk)->status & TS_USEDFPU) \ | 78 | if (task_thread_info(tsk)->status & TS_USEDFPU) \ |
79 | save_init_fpu( tsk ); \ | 79 | save_init_fpu( tsk ); \ |
80 | else \ | ||
81 | tsk->fpu_counter = 0; \ | ||
80 | } while (0) | 82 | } while (0) |
81 | 83 | ||
82 | #define __clear_fpu( tsk ) \ | 84 | #define __clear_fpu( tsk ) \ |
@@ -118,6 +120,7 @@ static inline void save_init_fpu( struct task_struct *tsk ) | |||
118 | extern unsigned short get_fpu_cwd( struct task_struct *tsk ); | 120 | extern unsigned short get_fpu_cwd( struct task_struct *tsk ); |
119 | extern unsigned short get_fpu_swd( struct task_struct *tsk ); | 121 | extern unsigned short get_fpu_swd( struct task_struct *tsk ); |
120 | extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); | 122 | extern unsigned short get_fpu_mxcsr( struct task_struct *tsk ); |
123 | extern asmlinkage void math_state_restore(void); | ||
121 | 124 | ||
122 | /* | 125 | /* |
123 | * Signal frame handlers... | 126 | * Signal frame handlers... |
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h index 68df0dc3ab8f..86ff5e83be2f 100644 --- a/include/asm-i386/io.h +++ b/include/asm-i386/io.h | |||
@@ -256,11 +256,11 @@ static inline void flush_write_buffers(void) | |||
256 | 256 | ||
257 | #endif /* __KERNEL__ */ | 257 | #endif /* __KERNEL__ */ |
258 | 258 | ||
259 | #ifdef SLOW_IO_BY_JUMPING | 259 | #if defined(CONFIG_PARAVIRT) |
260 | #define __SLOW_DOWN_IO "jmp 1f; 1: jmp 1f; 1:" | 260 | #include <asm/paravirt.h> |
261 | #else | 261 | #else |
262 | |||
262 | #define __SLOW_DOWN_IO "outb %%al,$0x80;" | 263 | #define __SLOW_DOWN_IO "outb %%al,$0x80;" |
263 | #endif | ||
264 | 264 | ||
265 | static inline void slow_down_io(void) { | 265 | static inline void slow_down_io(void) { |
266 | __asm__ __volatile__( | 266 | __asm__ __volatile__( |
@@ -271,6 +271,8 @@ static inline void slow_down_io(void) { | |||
271 | : : ); | 271 | : : ); |
272 | } | 272 | } |
273 | 273 | ||
274 | #endif | ||
275 | |||
274 | #ifdef CONFIG_X86_NUMAQ | 276 | #ifdef CONFIG_X86_NUMAQ |
275 | extern void *xquad_portio; /* Where the IO area was mapped */ | 277 | extern void *xquad_portio; /* Where the IO area was mapped */ |
276 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) | 278 | #define XQUAD_PORT_ADDR(port, quad) (xquad_portio + (XQUAD_PORTIO_QUAD*quad) + port) |
diff --git a/include/asm-i386/irq.h b/include/asm-i386/irq.h index 331726b41128..11761cdaae19 100644 --- a/include/asm-i386/irq.h +++ b/include/asm-i386/irq.h | |||
@@ -37,8 +37,13 @@ static __inline__ int irq_canonicalize(int irq) | |||
37 | extern int irqbalance_disable(char *str); | 37 | extern int irqbalance_disable(char *str); |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | extern void quirk_intel_irqbalance(void); | ||
41 | |||
40 | #ifdef CONFIG_HOTPLUG_CPU | 42 | #ifdef CONFIG_HOTPLUG_CPU |
41 | extern void fixup_irqs(cpumask_t map); | 43 | extern void fixup_irqs(cpumask_t map); |
42 | #endif | 44 | #endif |
43 | 45 | ||
46 | void init_IRQ(void); | ||
47 | void __init native_init_IRQ(void); | ||
48 | |||
44 | #endif /* _ASM_IRQ_H */ | 49 | #endif /* _ASM_IRQ_H */ |
diff --git a/include/asm-i386/irq_regs.h b/include/asm-i386/irq_regs.h index 3dd9c0b70270..a1b3f7f594a2 100644 --- a/include/asm-i386/irq_regs.h +++ b/include/asm-i386/irq_regs.h | |||
@@ -1 +1,27 @@ | |||
1 | #include <asm-generic/irq_regs.h> | 1 | /* |
2 | * Per-cpu current frame pointer - the location of the last exception frame on | ||
3 | * the stack, stored in the PDA. | ||
4 | * | ||
5 | * Jeremy Fitzhardinge <jeremy@goop.org> | ||
6 | */ | ||
7 | #ifndef _ASM_I386_IRQ_REGS_H | ||
8 | #define _ASM_I386_IRQ_REGS_H | ||
9 | |||
10 | #include <asm/pda.h> | ||
11 | |||
12 | static inline struct pt_regs *get_irq_regs(void) | ||
13 | { | ||
14 | return read_pda(irq_regs); | ||
15 | } | ||
16 | |||
17 | static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs) | ||
18 | { | ||
19 | struct pt_regs *old_regs; | ||
20 | |||
21 | old_regs = read_pda(irq_regs); | ||
22 | write_pda(irq_regs, new_regs); | ||
23 | |||
24 | return old_regs; | ||
25 | } | ||
26 | |||
27 | #endif /* _ASM_I386_IRQ_REGS_H */ | ||
diff --git a/include/asm-i386/irqflags.h b/include/asm-i386/irqflags.h index e1bdb97c07fa..17b18cf4fe9d 100644 --- a/include/asm-i386/irqflags.h +++ b/include/asm-i386/irqflags.h | |||
@@ -10,6 +10,9 @@ | |||
10 | #ifndef _ASM_IRQFLAGS_H | 10 | #ifndef _ASM_IRQFLAGS_H |
11 | #define _ASM_IRQFLAGS_H | 11 | #define _ASM_IRQFLAGS_H |
12 | 12 | ||
13 | #ifdef CONFIG_PARAVIRT | ||
14 | #include <asm/paravirt.h> | ||
15 | #else | ||
13 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
14 | 17 | ||
15 | static inline unsigned long __raw_local_save_flags(void) | 18 | static inline unsigned long __raw_local_save_flags(void) |
@@ -25,9 +28,6 @@ static inline unsigned long __raw_local_save_flags(void) | |||
25 | return flags; | 28 | return flags; |
26 | } | 29 | } |
27 | 30 | ||
28 | #define raw_local_save_flags(flags) \ | ||
29 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
30 | |||
31 | static inline void raw_local_irq_restore(unsigned long flags) | 31 | static inline void raw_local_irq_restore(unsigned long flags) |
32 | { | 32 | { |
33 | __asm__ __volatile__( | 33 | __asm__ __volatile__( |
@@ -66,18 +66,6 @@ static inline void halt(void) | |||
66 | __asm__ __volatile__("hlt": : :"memory"); | 66 | __asm__ __volatile__("hlt": : :"memory"); |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
70 | { | ||
71 | return !(flags & (1 << 9)); | ||
72 | } | ||
73 | |||
74 | static inline int raw_irqs_disabled(void) | ||
75 | { | ||
76 | unsigned long flags = __raw_local_save_flags(); | ||
77 | |||
78 | return raw_irqs_disabled_flags(flags); | ||
79 | } | ||
80 | |||
81 | /* | 69 | /* |
82 | * For spinlocks, etc: | 70 | * For spinlocks, etc: |
83 | */ | 71 | */ |
@@ -90,9 +78,33 @@ static inline unsigned long __raw_local_irq_save(void) | |||
90 | return flags; | 78 | return flags; |
91 | } | 79 | } |
92 | 80 | ||
81 | #else | ||
82 | #define DISABLE_INTERRUPTS(clobbers) cli | ||
83 | #define ENABLE_INTERRUPTS(clobbers) sti | ||
84 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
85 | #define INTERRUPT_RETURN iret | ||
86 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
87 | #endif /* __ASSEMBLY__ */ | ||
88 | #endif /* CONFIG_PARAVIRT */ | ||
89 | |||
90 | #ifndef __ASSEMBLY__ | ||
91 | #define raw_local_save_flags(flags) \ | ||
92 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
93 | |||
93 | #define raw_local_irq_save(flags) \ | 94 | #define raw_local_irq_save(flags) \ |
94 | do { (flags) = __raw_local_irq_save(); } while (0) | 95 | do { (flags) = __raw_local_irq_save(); } while (0) |
95 | 96 | ||
97 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
98 | { | ||
99 | return !(flags & (1 << 9)); | ||
100 | } | ||
101 | |||
102 | static inline int raw_irqs_disabled(void) | ||
103 | { | ||
104 | unsigned long flags = __raw_local_save_flags(); | ||
105 | |||
106 | return raw_irqs_disabled_flags(flags); | ||
107 | } | ||
96 | #endif /* __ASSEMBLY__ */ | 108 | #endif /* __ASSEMBLY__ */ |
97 | 109 | ||
98 | /* | 110 | /* |
diff --git a/include/asm-i386/mach-default/setup_arch.h b/include/asm-i386/mach-default/setup_arch.h index fb42099e7bd4..605e3ccb991b 100644 --- a/include/asm-i386/mach-default/setup_arch.h +++ b/include/asm-i386/mach-default/setup_arch.h | |||
@@ -2,4 +2,6 @@ | |||
2 | 2 | ||
3 | /* no action for generic */ | 3 | /* no action for generic */ |
4 | 4 | ||
5 | #ifndef ARCH_SETUP | ||
5 | #define ARCH_SETUP | 6 | #define ARCH_SETUP |
7 | #endif | ||
diff --git a/include/asm-i386/math_emu.h b/include/asm-i386/math_emu.h index 697673b555ce..a4b0aa3320e6 100644 --- a/include/asm-i386/math_emu.h +++ b/include/asm-i386/math_emu.h | |||
@@ -21,6 +21,7 @@ struct info { | |||
21 | long ___eax; | 21 | long ___eax; |
22 | long ___ds; | 22 | long ___ds; |
23 | long ___es; | 23 | long ___es; |
24 | long ___fs; | ||
24 | long ___orig_eax; | 25 | long ___orig_eax; |
25 | long ___eip; | 26 | long ___eip; |
26 | long ___cs; | 27 | long ___cs; |
diff --git a/include/asm-i386/mmu_context.h b/include/asm-i386/mmu_context.h index 62b7bf184094..68ff102d6f5e 100644 --- a/include/asm-i386/mmu_context.h +++ b/include/asm-i386/mmu_context.h | |||
@@ -44,7 +44,7 @@ static inline void switch_mm(struct mm_struct *prev, | |||
44 | * load the LDT, if the LDT is different: | 44 | * load the LDT, if the LDT is different: |
45 | */ | 45 | */ |
46 | if (unlikely(prev->context.ldt != next->context.ldt)) | 46 | if (unlikely(prev->context.ldt != next->context.ldt)) |
47 | load_LDT_nolock(&next->context, cpu); | 47 | load_LDT_nolock(&next->context); |
48 | } | 48 | } |
49 | #ifdef CONFIG_SMP | 49 | #ifdef CONFIG_SMP |
50 | else { | 50 | else { |
@@ -56,14 +56,14 @@ static inline void switch_mm(struct mm_struct *prev, | |||
56 | * tlb flush IPI delivery. We must reload %cr3. | 56 | * tlb flush IPI delivery. We must reload %cr3. |
57 | */ | 57 | */ |
58 | load_cr3(next->pgd); | 58 | load_cr3(next->pgd); |
59 | load_LDT_nolock(&next->context, cpu); | 59 | load_LDT_nolock(&next->context); |
60 | } | 60 | } |
61 | } | 61 | } |
62 | #endif | 62 | #endif |
63 | } | 63 | } |
64 | 64 | ||
65 | #define deactivate_mm(tsk, mm) \ | 65 | #define deactivate_mm(tsk, mm) \ |
66 | asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0)) | 66 | asm("movl %0,%%fs": :"r" (0)); |
67 | 67 | ||
68 | #define activate_mm(prev, next) \ | 68 | #define activate_mm(prev, next) \ |
69 | switch_mm((prev),(next),NULL) | 69 | switch_mm((prev),(next),NULL) |
diff --git a/include/asm-i386/module.h b/include/asm-i386/module.h index 424661d25bd3..02f8f541cbe0 100644 --- a/include/asm-i386/module.h +++ b/include/asm-i386/module.h | |||
@@ -20,6 +20,8 @@ struct mod_arch_specific | |||
20 | #define MODULE_PROC_FAMILY "586TSC " | 20 | #define MODULE_PROC_FAMILY "586TSC " |
21 | #elif defined CONFIG_M586MMX | 21 | #elif defined CONFIG_M586MMX |
22 | #define MODULE_PROC_FAMILY "586MMX " | 22 | #define MODULE_PROC_FAMILY "586MMX " |
23 | #elif defined CONFIG_MCORE2 | ||
24 | #define MODULE_PROC_FAMILY "CORE2 " | ||
23 | #elif defined CONFIG_M686 | 25 | #elif defined CONFIG_M686 |
24 | #define MODULE_PROC_FAMILY "686 " | 26 | #define MODULE_PROC_FAMILY "686 " |
25 | #elif defined CONFIG_MPENTIUMII | 27 | #elif defined CONFIG_MPENTIUMII |
@@ -60,18 +62,12 @@ struct mod_arch_specific | |||
60 | #error unknown processor family | 62 | #error unknown processor family |
61 | #endif | 63 | #endif |
62 | 64 | ||
63 | #ifdef CONFIG_REGPARM | ||
64 | #define MODULE_REGPARM "REGPARM " | ||
65 | #else | ||
66 | #define MODULE_REGPARM "" | ||
67 | #endif | ||
68 | |||
69 | #ifdef CONFIG_4KSTACKS | 65 | #ifdef CONFIG_4KSTACKS |
70 | #define MODULE_STACKSIZE "4KSTACKS " | 66 | #define MODULE_STACKSIZE "4KSTACKS " |
71 | #else | 67 | #else |
72 | #define MODULE_STACKSIZE "" | 68 | #define MODULE_STACKSIZE "" |
73 | #endif | 69 | #endif |
74 | 70 | ||
75 | #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE | 71 | #define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE |
76 | 72 | ||
77 | #endif /* _ASM_I386_MODULE_H */ | 73 | #endif /* _ASM_I386_MODULE_H */ |
diff --git a/include/asm-i386/mpspec_def.h b/include/asm-i386/mpspec_def.h index 76feedf85a8a..13bafb16e7af 100644 --- a/include/asm-i386/mpspec_def.h +++ b/include/asm-i386/mpspec_def.h | |||
@@ -97,7 +97,6 @@ struct mpc_config_bus | |||
97 | #define BUSTYPE_TC "TC" | 97 | #define BUSTYPE_TC "TC" |
98 | #define BUSTYPE_VME "VME" | 98 | #define BUSTYPE_VME "VME" |
99 | #define BUSTYPE_XPRESS "XPRESS" | 99 | #define BUSTYPE_XPRESS "XPRESS" |
100 | #define BUSTYPE_NEC98 "NEC98" | ||
101 | 100 | ||
102 | struct mpc_config_ioapic | 101 | struct mpc_config_ioapic |
103 | { | 102 | { |
@@ -182,7 +181,6 @@ enum mp_bustype { | |||
182 | MP_BUS_EISA, | 181 | MP_BUS_EISA, |
183 | MP_BUS_PCI, | 182 | MP_BUS_PCI, |
184 | MP_BUS_MCA, | 183 | MP_BUS_MCA, |
185 | MP_BUS_NEC98 | ||
186 | }; | 184 | }; |
187 | #endif | 185 | #endif |
188 | 186 | ||
diff --git a/include/asm-i386/msr.h b/include/asm-i386/msr.h index 62b76cd96957..5679d4993072 100644 --- a/include/asm-i386/msr.h +++ b/include/asm-i386/msr.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef __ASM_MSR_H | 1 | #ifndef __ASM_MSR_H |
2 | #define __ASM_MSR_H | 2 | #define __ASM_MSR_H |
3 | 3 | ||
4 | #ifdef CONFIG_PARAVIRT | ||
5 | #include <asm/paravirt.h> | ||
6 | #else | ||
7 | |||
4 | /* | 8 | /* |
5 | * Access to machine-specific registers (available on 586 and better only) | 9 | * Access to machine-specific registers (available on 586 and better only) |
6 | * Note: the rd* operations modify the parameters directly (without using | 10 | * Note: the rd* operations modify the parameters directly (without using |
@@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val) | |||
77 | __asm__ __volatile__("rdpmc" \ | 81 | __asm__ __volatile__("rdpmc" \ |
78 | : "=a" (low), "=d" (high) \ | 82 | : "=a" (low), "=d" (high) \ |
79 | : "c" (counter)) | 83 | : "c" (counter)) |
84 | #endif /* !CONFIG_PARAVIRT */ | ||
80 | 85 | ||
81 | /* symbolic names for some interesting MSRs */ | 86 | /* symbolic names for some interesting MSRs */ |
82 | /* Intel defined MSRs. */ | 87 | /* Intel defined MSRs. */ |
@@ -141,6 +146,10 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val) | |||
141 | #define MSR_IA32_MC0_ADDR 0x402 | 146 | #define MSR_IA32_MC0_ADDR 0x402 |
142 | #define MSR_IA32_MC0_MISC 0x403 | 147 | #define MSR_IA32_MC0_MISC 0x403 |
143 | 148 | ||
149 | #define MSR_IA32_PEBS_ENABLE 0x3f1 | ||
150 | #define MSR_IA32_DS_AREA 0x600 | ||
151 | #define MSR_IA32_PERF_CAPABILITIES 0x345 | ||
152 | |||
144 | /* Pentium IV performance counter MSRs */ | 153 | /* Pentium IV performance counter MSRs */ |
145 | #define MSR_P4_BPU_PERFCTR0 0x300 | 154 | #define MSR_P4_BPU_PERFCTR0 0x300 |
146 | #define MSR_P4_BPU_PERFCTR1 0x301 | 155 | #define MSR_P4_BPU_PERFCTR1 0x301 |
@@ -284,4 +293,13 @@ static inline void wrmsrl (unsigned long msr, unsigned long long val) | |||
284 | #define MSR_TMTA_LRTI_READOUT 0x80868018 | 293 | #define MSR_TMTA_LRTI_READOUT 0x80868018 |
285 | #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a | 294 | #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a |
286 | 295 | ||
296 | /* Intel Core-based CPU performance counters */ | ||
297 | #define MSR_CORE_PERF_FIXED_CTR0 0x309 | ||
298 | #define MSR_CORE_PERF_FIXED_CTR1 0x30a | ||
299 | #define MSR_CORE_PERF_FIXED_CTR2 0x30b | ||
300 | #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d | ||
301 | #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e | ||
302 | #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f | ||
303 | #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 | ||
304 | |||
287 | #endif /* __ASM_MSR_H */ | 305 | #endif /* __ASM_MSR_H */ |
diff --git a/include/asm-i386/nmi.h b/include/asm-i386/nmi.h index 269d315719ca..b04333ea6f31 100644 --- a/include/asm-i386/nmi.h +++ b/include/asm-i386/nmi.h | |||
@@ -5,6 +5,9 @@ | |||
5 | #define ASM_NMI_H | 5 | #define ASM_NMI_H |
6 | 6 | ||
7 | #include <linux/pm.h> | 7 | #include <linux/pm.h> |
8 | #include <asm/irq.h> | ||
9 | |||
10 | #ifdef ARCH_HAS_NMI_WATCHDOG | ||
8 | 11 | ||
9 | /** | 12 | /** |
10 | * do_nmi_callback | 13 | * do_nmi_callback |
@@ -42,4 +45,9 @@ extern int proc_nmi_enabled(struct ctl_table *, int , struct file *, | |||
42 | void __user *, size_t *, loff_t *); | 45 | void __user *, size_t *, loff_t *); |
43 | extern int unknown_nmi_panic; | 46 | extern int unknown_nmi_panic; |
44 | 47 | ||
48 | void __trigger_all_cpu_backtrace(void); | ||
49 | #define trigger_all_cpu_backtrace() __trigger_all_cpu_backtrace() | ||
50 | |||
51 | #endif | ||
52 | |||
45 | #endif /* ASM_NMI_H */ | 53 | #endif /* ASM_NMI_H */ |
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index f5bf544c729a..fd3f64ace248 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h | |||
@@ -52,6 +52,7 @@ typedef struct { unsigned long long pgprot; } pgprot_t; | |||
52 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) | 52 | #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) |
53 | #define __pmd(x) ((pmd_t) { (x) } ) | 53 | #define __pmd(x) ((pmd_t) { (x) } ) |
54 | #define HPAGE_SHIFT 21 | 54 | #define HPAGE_SHIFT 21 |
55 | #include <asm-generic/pgtable-nopud.h> | ||
55 | #else | 56 | #else |
56 | typedef struct { unsigned long pte_low; } pte_t; | 57 | typedef struct { unsigned long pte_low; } pte_t; |
57 | typedef struct { unsigned long pgd; } pgd_t; | 58 | typedef struct { unsigned long pgd; } pgd_t; |
@@ -59,6 +60,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
59 | #define boot_pte_t pte_t /* or would you rather have a typedef */ | 60 | #define boot_pte_t pte_t /* or would you rather have a typedef */ |
60 | #define pte_val(x) ((x).pte_low) | 61 | #define pte_val(x) ((x).pte_low) |
61 | #define HPAGE_SHIFT 22 | 62 | #define HPAGE_SHIFT 22 |
63 | #include <asm-generic/pgtable-nopmd.h> | ||
62 | #endif | 64 | #endif |
63 | #define PTE_MASK PAGE_MASK | 65 | #define PTE_MASK PAGE_MASK |
64 | 66 | ||
@@ -112,18 +114,18 @@ extern int page_is_ram(unsigned long pagenr); | |||
112 | 114 | ||
113 | #ifdef __ASSEMBLY__ | 115 | #ifdef __ASSEMBLY__ |
114 | #define __PAGE_OFFSET CONFIG_PAGE_OFFSET | 116 | #define __PAGE_OFFSET CONFIG_PAGE_OFFSET |
115 | #define __PHYSICAL_START CONFIG_PHYSICAL_START | ||
116 | #else | 117 | #else |
117 | #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) | 118 | #define __PAGE_OFFSET ((unsigned long)CONFIG_PAGE_OFFSET) |
118 | #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START) | ||
119 | #endif | 119 | #endif |
120 | #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START) | ||
121 | 120 | ||
122 | 121 | ||
123 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) | 122 | #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) |
124 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) | 123 | #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE) |
125 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) | 124 | #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE) |
126 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) | 125 | #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) |
126 | /* __pa_symbol should be used for C visible symbols. | ||
127 | This seems to be the official gcc blessed way to do such arithmetic. */ | ||
128 | #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x),0)) | ||
127 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 129 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
128 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 130 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
129 | #ifdef CONFIG_FLATMEM | 131 | #ifdef CONFIG_FLATMEM |
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h new file mode 100644 index 000000000000..9f06265065f4 --- /dev/null +++ b/include/asm-i386/paravirt.h | |||
@@ -0,0 +1,505 @@ | |||
1 | #ifndef __ASM_PARAVIRT_H | ||
2 | #define __ASM_PARAVIRT_H | ||
3 | /* Various instructions on x86 need to be replaced for | ||
4 | * para-virtualization: those hooks are defined here. */ | ||
5 | #include <linux/linkage.h> | ||
6 | #include <linux/stringify.h> | ||
7 | #include <asm/page.h> | ||
8 | |||
9 | #ifdef CONFIG_PARAVIRT | ||
10 | /* These are the most performance critical ops, so we want to be able to patch | ||
11 | * callers */ | ||
12 | #define PARAVIRT_IRQ_DISABLE 0 | ||
13 | #define PARAVIRT_IRQ_ENABLE 1 | ||
14 | #define PARAVIRT_RESTORE_FLAGS 2 | ||
15 | #define PARAVIRT_SAVE_FLAGS 3 | ||
16 | #define PARAVIRT_SAVE_FLAGS_IRQ_DISABLE 4 | ||
17 | #define PARAVIRT_INTERRUPT_RETURN 5 | ||
18 | #define PARAVIRT_STI_SYSEXIT 6 | ||
19 | |||
20 | /* Bitmask of what can be clobbered: usually at least eax. */ | ||
21 | #define CLBR_NONE 0x0 | ||
22 | #define CLBR_EAX 0x1 | ||
23 | #define CLBR_ECX 0x2 | ||
24 | #define CLBR_EDX 0x4 | ||
25 | #define CLBR_ANY 0x7 | ||
26 | |||
27 | #ifndef __ASSEMBLY__ | ||
28 | struct thread_struct; | ||
29 | struct Xgt_desc_struct; | ||
30 | struct tss_struct; | ||
31 | struct mm_struct; | ||
32 | struct paravirt_ops | ||
33 | { | ||
34 | unsigned int kernel_rpl; | ||
35 | int paravirt_enabled; | ||
36 | const char *name; | ||
37 | |||
38 | /* | ||
39 | * Patch may replace one of the defined code sequences with arbitrary | ||
40 | * code, subject to the same register constraints. This generally | ||
41 | * means the code is not free to clobber any registers other than EAX. | ||
42 | * The patch function should return the number of bytes of code | ||
43 | * generated, as we nop pad the rest in generic code. | ||
44 | */ | ||
45 | unsigned (*patch)(u8 type, u16 clobber, void *firstinsn, unsigned len); | ||
46 | |||
47 | void (*arch_setup)(void); | ||
48 | char *(*memory_setup)(void); | ||
49 | void (*init_IRQ)(void); | ||
50 | |||
51 | void (*banner)(void); | ||
52 | |||
53 | unsigned long (*get_wallclock)(void); | ||
54 | int (*set_wallclock)(unsigned long); | ||
55 | void (*time_init)(void); | ||
56 | |||
57 | /* All the function pointers here are declared as "fastcall" | ||
58 | so that we get a specific register-based calling | ||
59 | convention. This makes it easier to implement inline | ||
60 | assembler replacements. */ | ||
61 | |||
62 | void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx, | ||
63 | unsigned int *ecx, unsigned int *edx); | ||
64 | |||
65 | unsigned long (fastcall *get_debugreg)(int regno); | ||
66 | void (fastcall *set_debugreg)(int regno, unsigned long value); | ||
67 | |||
68 | void (fastcall *clts)(void); | ||
69 | |||
70 | unsigned long (fastcall *read_cr0)(void); | ||
71 | void (fastcall *write_cr0)(unsigned long); | ||
72 | |||
73 | unsigned long (fastcall *read_cr2)(void); | ||
74 | void (fastcall *write_cr2)(unsigned long); | ||
75 | |||
76 | unsigned long (fastcall *read_cr3)(void); | ||
77 | void (fastcall *write_cr3)(unsigned long); | ||
78 | |||
79 | unsigned long (fastcall *read_cr4_safe)(void); | ||
80 | unsigned long (fastcall *read_cr4)(void); | ||
81 | void (fastcall *write_cr4)(unsigned long); | ||
82 | |||
83 | unsigned long (fastcall *save_fl)(void); | ||
84 | void (fastcall *restore_fl)(unsigned long); | ||
85 | void (fastcall *irq_disable)(void); | ||
86 | void (fastcall *irq_enable)(void); | ||
87 | void (fastcall *safe_halt)(void); | ||
88 | void (fastcall *halt)(void); | ||
89 | void (fastcall *wbinvd)(void); | ||
90 | |||
91 | /* err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ | ||
92 | u64 (fastcall *read_msr)(unsigned int msr, int *err); | ||
93 | int (fastcall *write_msr)(unsigned int msr, u64 val); | ||
94 | |||
95 | u64 (fastcall *read_tsc)(void); | ||
96 | u64 (fastcall *read_pmc)(void); | ||
97 | |||
98 | void (fastcall *load_tr_desc)(void); | ||
99 | void (fastcall *load_gdt)(const struct Xgt_desc_struct *); | ||
100 | void (fastcall *load_idt)(const struct Xgt_desc_struct *); | ||
101 | void (fastcall *store_gdt)(struct Xgt_desc_struct *); | ||
102 | void (fastcall *store_idt)(struct Xgt_desc_struct *); | ||
103 | void (fastcall *set_ldt)(const void *desc, unsigned entries); | ||
104 | unsigned long (fastcall *store_tr)(void); | ||
105 | void (fastcall *load_tls)(struct thread_struct *t, unsigned int cpu); | ||
106 | void (fastcall *write_ldt_entry)(void *dt, int entrynum, | ||
107 | u32 low, u32 high); | ||
108 | void (fastcall *write_gdt_entry)(void *dt, int entrynum, | ||
109 | u32 low, u32 high); | ||
110 | void (fastcall *write_idt_entry)(void *dt, int entrynum, | ||
111 | u32 low, u32 high); | ||
112 | void (fastcall *load_esp0)(struct tss_struct *tss, | ||
113 | struct thread_struct *thread); | ||
114 | |||
115 | void (fastcall *set_iopl_mask)(unsigned mask); | ||
116 | |||
117 | void (fastcall *io_delay)(void); | ||
118 | void (*const_udelay)(unsigned long loops); | ||
119 | |||
120 | #ifdef CONFIG_X86_LOCAL_APIC | ||
121 | void (fastcall *apic_write)(unsigned long reg, unsigned long v); | ||
122 | void (fastcall *apic_write_atomic)(unsigned long reg, unsigned long v); | ||
123 | unsigned long (fastcall *apic_read)(unsigned long reg); | ||
124 | #endif | ||
125 | |||
126 | void (fastcall *flush_tlb_user)(void); | ||
127 | void (fastcall *flush_tlb_kernel)(void); | ||
128 | void (fastcall *flush_tlb_single)(u32 addr); | ||
129 | |||
130 | void (fastcall *set_pte)(pte_t *ptep, pte_t pteval); | ||
131 | void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval); | ||
132 | void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval); | ||
133 | void (fastcall *pte_update)(struct mm_struct *mm, u32 addr, pte_t *ptep); | ||
134 | void (fastcall *pte_update_defer)(struct mm_struct *mm, u32 addr, pte_t *ptep); | ||
135 | #ifdef CONFIG_X86_PAE | ||
136 | void (fastcall *set_pte_atomic)(pte_t *ptep, pte_t pteval); | ||
137 | void (fastcall *set_pte_present)(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); | ||
138 | void (fastcall *set_pud)(pud_t *pudp, pud_t pudval); | ||
139 | void (fastcall *pte_clear)(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
140 | void (fastcall *pmd_clear)(pmd_t *pmdp); | ||
141 | #endif | ||
142 | |||
143 | /* These two are jmp to, not actually called. */ | ||
144 | void (fastcall *irq_enable_sysexit)(void); | ||
145 | void (fastcall *iret)(void); | ||
146 | }; | ||
147 | |||
148 | /* Mark a paravirt probe function. */ | ||
149 | #define paravirt_probe(fn) \ | ||
150 | static asmlinkage void (*__paravirtprobe_##fn)(void) __attribute_used__ \ | ||
151 | __attribute__((__section__(".paravirtprobe"))) = fn | ||
152 | |||
153 | extern struct paravirt_ops paravirt_ops; | ||
154 | |||
155 | #define paravirt_enabled() (paravirt_ops.paravirt_enabled) | ||
156 | |||
157 | static inline void load_esp0(struct tss_struct *tss, | ||
158 | struct thread_struct *thread) | ||
159 | { | ||
160 | paravirt_ops.load_esp0(tss, thread); | ||
161 | } | ||
162 | |||
163 | #define ARCH_SETUP paravirt_ops.arch_setup(); | ||
164 | static inline unsigned long get_wallclock(void) | ||
165 | { | ||
166 | return paravirt_ops.get_wallclock(); | ||
167 | } | ||
168 | |||
169 | static inline int set_wallclock(unsigned long nowtime) | ||
170 | { | ||
171 | return paravirt_ops.set_wallclock(nowtime); | ||
172 | } | ||
173 | |||
174 | static inline void do_time_init(void) | ||
175 | { | ||
176 | return paravirt_ops.time_init(); | ||
177 | } | ||
178 | |||
179 | /* The paravirtualized CPUID instruction. */ | ||
180 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | ||
181 | unsigned int *ecx, unsigned int *edx) | ||
182 | { | ||
183 | paravirt_ops.cpuid(eax, ebx, ecx, edx); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * These special macros can be used to get or set a debugging register | ||
188 | */ | ||
189 | #define get_debugreg(var, reg) var = paravirt_ops.get_debugreg(reg) | ||
190 | #define set_debugreg(val, reg) paravirt_ops.set_debugreg(reg, val) | ||
191 | |||
192 | #define clts() paravirt_ops.clts() | ||
193 | |||
194 | #define read_cr0() paravirt_ops.read_cr0() | ||
195 | #define write_cr0(x) paravirt_ops.write_cr0(x) | ||
196 | |||
197 | #define read_cr2() paravirt_ops.read_cr2() | ||
198 | #define write_cr2(x) paravirt_ops.write_cr2(x) | ||
199 | |||
200 | #define read_cr3() paravirt_ops.read_cr3() | ||
201 | #define write_cr3(x) paravirt_ops.write_cr3(x) | ||
202 | |||
203 | #define read_cr4() paravirt_ops.read_cr4() | ||
204 | #define read_cr4_safe(x) paravirt_ops.read_cr4_safe() | ||
205 | #define write_cr4(x) paravirt_ops.write_cr4(x) | ||
206 | |||
207 | static inline void raw_safe_halt(void) | ||
208 | { | ||
209 | paravirt_ops.safe_halt(); | ||
210 | } | ||
211 | |||
212 | static inline void halt(void) | ||
213 | { | ||
214 | paravirt_ops.safe_halt(); | ||
215 | } | ||
216 | #define wbinvd() paravirt_ops.wbinvd() | ||
217 | |||
218 | #define get_kernel_rpl() (paravirt_ops.kernel_rpl) | ||
219 | |||
220 | #define rdmsr(msr,val1,val2) do { \ | ||
221 | int _err; \ | ||
222 | u64 _l = paravirt_ops.read_msr(msr,&_err); \ | ||
223 | val1 = (u32)_l; \ | ||
224 | val2 = _l >> 32; \ | ||
225 | } while(0) | ||
226 | |||
227 | #define wrmsr(msr,val1,val2) do { \ | ||
228 | u64 _l = ((u64)(val2) << 32) | (val1); \ | ||
229 | paravirt_ops.write_msr((msr), _l); \ | ||
230 | } while(0) | ||
231 | |||
232 | #define rdmsrl(msr,val) do { \ | ||
233 | int _err; \ | ||
234 | val = paravirt_ops.read_msr((msr),&_err); \ | ||
235 | } while(0) | ||
236 | |||
237 | #define wrmsrl(msr,val) (paravirt_ops.write_msr((msr),(val))) | ||
238 | #define wrmsr_safe(msr,a,b) ({ \ | ||
239 | u64 _l = ((u64)(b) << 32) | (a); \ | ||
240 | paravirt_ops.write_msr((msr),_l); \ | ||
241 | }) | ||
242 | |||
243 | /* rdmsr with exception handling */ | ||
244 | #define rdmsr_safe(msr,a,b) ({ \ | ||
245 | int _err; \ | ||
246 | u64 _l = paravirt_ops.read_msr(msr,&_err); \ | ||
247 | (*a) = (u32)_l; \ | ||
248 | (*b) = _l >> 32; \ | ||
249 | _err; }) | ||
250 | |||
251 | #define rdtsc(low,high) do { \ | ||
252 | u64 _l = paravirt_ops.read_tsc(); \ | ||
253 | low = (u32)_l; \ | ||
254 | high = _l >> 32; \ | ||
255 | } while(0) | ||
256 | |||
257 | #define rdtscl(low) do { \ | ||
258 | u64 _l = paravirt_ops.read_tsc(); \ | ||
259 | low = (int)_l; \ | ||
260 | } while(0) | ||
261 | |||
262 | #define rdtscll(val) (val = paravirt_ops.read_tsc()) | ||
263 | |||
264 | #define write_tsc(val1,val2) wrmsr(0x10, val1, val2) | ||
265 | |||
266 | #define rdpmc(counter,low,high) do { \ | ||
267 | u64 _l = paravirt_ops.read_pmc(); \ | ||
268 | low = (u32)_l; \ | ||
269 | high = _l >> 32; \ | ||
270 | } while(0) | ||
271 | |||
272 | #define load_TR_desc() (paravirt_ops.load_tr_desc()) | ||
273 | #define load_gdt(dtr) (paravirt_ops.load_gdt(dtr)) | ||
274 | #define load_idt(dtr) (paravirt_ops.load_idt(dtr)) | ||
275 | #define set_ldt(addr, entries) (paravirt_ops.set_ldt((addr), (entries))) | ||
276 | #define store_gdt(dtr) (paravirt_ops.store_gdt(dtr)) | ||
277 | #define store_idt(dtr) (paravirt_ops.store_idt(dtr)) | ||
278 | #define store_tr(tr) ((tr) = paravirt_ops.store_tr()) | ||
279 | #define load_TLS(t,cpu) (paravirt_ops.load_tls((t),(cpu))) | ||
280 | #define write_ldt_entry(dt, entry, low, high) \ | ||
281 | (paravirt_ops.write_ldt_entry((dt), (entry), (low), (high))) | ||
282 | #define write_gdt_entry(dt, entry, low, high) \ | ||
283 | (paravirt_ops.write_gdt_entry((dt), (entry), (low), (high))) | ||
284 | #define write_idt_entry(dt, entry, low, high) \ | ||
285 | (paravirt_ops.write_idt_entry((dt), (entry), (low), (high))) | ||
286 | #define set_iopl_mask(mask) (paravirt_ops.set_iopl_mask(mask)) | ||
287 | |||
288 | /* The paravirtualized I/O functions */ | ||
289 | static inline void slow_down_io(void) { | ||
290 | paravirt_ops.io_delay(); | ||
291 | #ifdef REALLY_SLOW_IO | ||
292 | paravirt_ops.io_delay(); | ||
293 | paravirt_ops.io_delay(); | ||
294 | paravirt_ops.io_delay(); | ||
295 | #endif | ||
296 | } | ||
297 | |||
298 | #ifdef CONFIG_X86_LOCAL_APIC | ||
299 | /* | ||
300 | * Basic functions accessing APICs. | ||
301 | */ | ||
302 | static inline void apic_write(unsigned long reg, unsigned long v) | ||
303 | { | ||
304 | paravirt_ops.apic_write(reg,v); | ||
305 | } | ||
306 | |||
307 | static inline void apic_write_atomic(unsigned long reg, unsigned long v) | ||
308 | { | ||
309 | paravirt_ops.apic_write_atomic(reg,v); | ||
310 | } | ||
311 | |||
312 | static inline unsigned long apic_read(unsigned long reg) | ||
313 | { | ||
314 | return paravirt_ops.apic_read(reg); | ||
315 | } | ||
316 | #endif | ||
317 | |||
318 | |||
319 | #define __flush_tlb() paravirt_ops.flush_tlb_user() | ||
320 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() | ||
321 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) | ||
322 | |||
323 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
324 | { | ||
325 | paravirt_ops.set_pte(ptep, pteval); | ||
326 | } | ||
327 | |||
328 | static inline void set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval) | ||
329 | { | ||
330 | paravirt_ops.set_pte_at(mm, addr, ptep, pteval); | ||
331 | } | ||
332 | |||
333 | static inline void set_pmd(pmd_t *pmdp, pmd_t pmdval) | ||
334 | { | ||
335 | paravirt_ops.set_pmd(pmdp, pmdval); | ||
336 | } | ||
337 | |||
338 | static inline void pte_update(struct mm_struct *mm, u32 addr, pte_t *ptep) | ||
339 | { | ||
340 | paravirt_ops.pte_update(mm, addr, ptep); | ||
341 | } | ||
342 | |||
343 | static inline void pte_update_defer(struct mm_struct *mm, u32 addr, pte_t *ptep) | ||
344 | { | ||
345 | paravirt_ops.pte_update_defer(mm, addr, ptep); | ||
346 | } | ||
347 | |||
348 | #ifdef CONFIG_X86_PAE | ||
349 | static inline void set_pte_atomic(pte_t *ptep, pte_t pteval) | ||
350 | { | ||
351 | paravirt_ops.set_pte_atomic(ptep, pteval); | ||
352 | } | ||
353 | |||
354 | static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) | ||
355 | { | ||
356 | paravirt_ops.set_pte_present(mm, addr, ptep, pte); | ||
357 | } | ||
358 | |||
359 | static inline void set_pud(pud_t *pudp, pud_t pudval) | ||
360 | { | ||
361 | paravirt_ops.set_pud(pudp, pudval); | ||
362 | } | ||
363 | |||
364 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
365 | { | ||
366 | paravirt_ops.pte_clear(mm, addr, ptep); | ||
367 | } | ||
368 | |||
369 | static inline void pmd_clear(pmd_t *pmdp) | ||
370 | { | ||
371 | paravirt_ops.pmd_clear(pmdp); | ||
372 | } | ||
373 | #endif | ||
374 | |||
375 | /* These all sit in the .parainstructions section to tell us what to patch. */ | ||
376 | struct paravirt_patch { | ||
377 | u8 *instr; /* original instructions */ | ||
378 | u8 instrtype; /* type of this instruction */ | ||
379 | u8 len; /* length of original instruction */ | ||
380 | u16 clobbers; /* what registers you may clobber */ | ||
381 | }; | ||
382 | |||
383 | #define paravirt_alt(insn_string, typenum, clobber) \ | ||
384 | "771:\n\t" insn_string "\n" "772:\n" \ | ||
385 | ".pushsection .parainstructions,\"a\"\n" \ | ||
386 | " .long 771b\n" \ | ||
387 | " .byte " __stringify(typenum) "\n" \ | ||
388 | " .byte 772b-771b\n" \ | ||
389 | " .short " __stringify(clobber) "\n" \ | ||
390 | ".popsection" | ||
391 | |||
392 | static inline unsigned long __raw_local_save_flags(void) | ||
393 | { | ||
394 | unsigned long f; | ||
395 | |||
396 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
397 | "call *%1;" | ||
398 | "popl %%edx; popl %%ecx", | ||
399 | PARAVIRT_SAVE_FLAGS, CLBR_NONE) | ||
400 | : "=a"(f): "m"(paravirt_ops.save_fl) | ||
401 | : "memory", "cc"); | ||
402 | return f; | ||
403 | } | ||
404 | |||
405 | static inline void raw_local_irq_restore(unsigned long f) | ||
406 | { | ||
407 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
408 | "call *%1;" | ||
409 | "popl %%edx; popl %%ecx", | ||
410 | PARAVIRT_RESTORE_FLAGS, CLBR_EAX) | ||
411 | : "=a"(f) : "m" (paravirt_ops.restore_fl), "0"(f) | ||
412 | : "memory", "cc"); | ||
413 | } | ||
414 | |||
415 | static inline void raw_local_irq_disable(void) | ||
416 | { | ||
417 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
418 | "call *%0;" | ||
419 | "popl %%edx; popl %%ecx", | ||
420 | PARAVIRT_IRQ_DISABLE, CLBR_EAX) | ||
421 | : : "m" (paravirt_ops.irq_disable) | ||
422 | : "memory", "eax", "cc"); | ||
423 | } | ||
424 | |||
425 | static inline void raw_local_irq_enable(void) | ||
426 | { | ||
427 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
428 | "call *%0;" | ||
429 | "popl %%edx; popl %%ecx", | ||
430 | PARAVIRT_IRQ_ENABLE, CLBR_EAX) | ||
431 | : : "m" (paravirt_ops.irq_enable) | ||
432 | : "memory", "eax", "cc"); | ||
433 | } | ||
434 | |||
435 | static inline unsigned long __raw_local_irq_save(void) | ||
436 | { | ||
437 | unsigned long f; | ||
438 | |||
439 | __asm__ __volatile__(paravirt_alt( "pushl %%ecx; pushl %%edx;" | ||
440 | "call *%1; pushl %%eax;" | ||
441 | "call *%2; popl %%eax;" | ||
442 | "popl %%edx; popl %%ecx", | ||
443 | PARAVIRT_SAVE_FLAGS_IRQ_DISABLE, | ||
444 | CLBR_NONE) | ||
445 | : "=a"(f) | ||
446 | : "m" (paravirt_ops.save_fl), | ||
447 | "m" (paravirt_ops.irq_disable) | ||
448 | : "memory", "cc"); | ||
449 | return f; | ||
450 | } | ||
451 | |||
452 | #define CLI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
453 | "call *paravirt_ops+%c[irq_disable];" \ | ||
454 | "popl %%edx; popl %%ecx", \ | ||
455 | PARAVIRT_IRQ_DISABLE, CLBR_EAX) | ||
456 | |||
457 | #define STI_STRING paravirt_alt("pushl %%ecx; pushl %%edx;" \ | ||
458 | "call *paravirt_ops+%c[irq_enable];" \ | ||
459 | "popl %%edx; popl %%ecx", \ | ||
460 | PARAVIRT_IRQ_ENABLE, CLBR_EAX) | ||
461 | #define CLI_STI_CLOBBERS , "%eax" | ||
462 | #define CLI_STI_INPUT_ARGS \ | ||
463 | , \ | ||
464 | [irq_disable] "i" (offsetof(struct paravirt_ops, irq_disable)), \ | ||
465 | [irq_enable] "i" (offsetof(struct paravirt_ops, irq_enable)) | ||
466 | |||
467 | #else /* __ASSEMBLY__ */ | ||
468 | |||
469 | #define PARA_PATCH(ptype, clobbers, ops) \ | ||
470 | 771:; \ | ||
471 | ops; \ | ||
472 | 772:; \ | ||
473 | .pushsection .parainstructions,"a"; \ | ||
474 | .long 771b; \ | ||
475 | .byte ptype; \ | ||
476 | .byte 772b-771b; \ | ||
477 | .short clobbers; \ | ||
478 | .popsection | ||
479 | |||
480 | #define INTERRUPT_RETURN \ | ||
481 | PARA_PATCH(PARAVIRT_INTERRUPT_RETURN, CLBR_ANY, \ | ||
482 | jmp *%cs:paravirt_ops+PARAVIRT_iret) | ||
483 | |||
484 | #define DISABLE_INTERRUPTS(clobbers) \ | ||
485 | PARA_PATCH(PARAVIRT_IRQ_DISABLE, clobbers, \ | ||
486 | pushl %ecx; pushl %edx; \ | ||
487 | call *paravirt_ops+PARAVIRT_irq_disable; \ | ||
488 | popl %edx; popl %ecx) \ | ||
489 | |||
490 | #define ENABLE_INTERRUPTS(clobbers) \ | ||
491 | PARA_PATCH(PARAVIRT_IRQ_ENABLE, clobbers, \ | ||
492 | pushl %ecx; pushl %edx; \ | ||
493 | call *%cs:paravirt_ops+PARAVIRT_irq_enable; \ | ||
494 | popl %edx; popl %ecx) | ||
495 | |||
496 | #define ENABLE_INTERRUPTS_SYSEXIT \ | ||
497 | PARA_PATCH(PARAVIRT_STI_SYSEXIT, CLBR_ANY, \ | ||
498 | jmp *%cs:paravirt_ops+PARAVIRT_irq_enable_sysexit) | ||
499 | |||
500 | #define GET_CR0_INTO_EAX \ | ||
501 | call *paravirt_ops+PARAVIRT_read_cr0 | ||
502 | |||
503 | #endif /* __ASSEMBLY__ */ | ||
504 | #endif /* CONFIG_PARAVIRT */ | ||
505 | #endif /* __ASM_PARAVIRT_H */ | ||
diff --git a/include/asm-i386/pda.h b/include/asm-i386/pda.h new file mode 100644 index 000000000000..2ba2736aa109 --- /dev/null +++ b/include/asm-i386/pda.h | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | Per-processor Data Areas | ||
3 | Jeremy Fitzhardinge <jeremy@goop.org> 2006 | ||
4 | Based on asm-x86_64/pda.h by Andi Kleen. | ||
5 | */ | ||
6 | #ifndef _I386_PDA_H | ||
7 | #define _I386_PDA_H | ||
8 | |||
9 | #include <linux/stddef.h> | ||
10 | #include <linux/types.h> | ||
11 | |||
12 | struct i386_pda | ||
13 | { | ||
14 | struct i386_pda *_pda; /* pointer to self */ | ||
15 | |||
16 | int cpu_number; | ||
17 | struct task_struct *pcurrent; /* current process */ | ||
18 | struct pt_regs *irq_regs; | ||
19 | }; | ||
20 | |||
21 | extern struct i386_pda *_cpu_pda[]; | ||
22 | |||
23 | #define cpu_pda(i) (_cpu_pda[i]) | ||
24 | |||
25 | #define pda_offset(field) offsetof(struct i386_pda, field) | ||
26 | |||
27 | extern void __bad_pda_field(void); | ||
28 | |||
29 | /* This variable is never instantiated. It is only used as a stand-in | ||
30 | for the real per-cpu PDA memory, so that gcc can understand what | ||
31 | memory operations the inline asms() below are performing. This | ||
32 | eliminates the need to make the asms volatile or have memory | ||
33 | clobbers, so gcc can readily analyse them. */ | ||
34 | extern struct i386_pda _proxy_pda; | ||
35 | |||
36 | #define pda_to_op(op,field,val) \ | ||
37 | do { \ | ||
38 | typedef typeof(_proxy_pda.field) T__; \ | ||
39 | if (0) { T__ tmp__; tmp__ = (val); } \ | ||
40 | switch (sizeof(_proxy_pda.field)) { \ | ||
41 | case 1: \ | ||
42 | asm(op "b %1,%%gs:%c2" \ | ||
43 | : "+m" (_proxy_pda.field) \ | ||
44 | :"ri" ((T__)val), \ | ||
45 | "i"(pda_offset(field))); \ | ||
46 | break; \ | ||
47 | case 2: \ | ||
48 | asm(op "w %1,%%gs:%c2" \ | ||
49 | : "+m" (_proxy_pda.field) \ | ||
50 | :"ri" ((T__)val), \ | ||
51 | "i"(pda_offset(field))); \ | ||
52 | break; \ | ||
53 | case 4: \ | ||
54 | asm(op "l %1,%%gs:%c2" \ | ||
55 | : "+m" (_proxy_pda.field) \ | ||
56 | :"ri" ((T__)val), \ | ||
57 | "i"(pda_offset(field))); \ | ||
58 | break; \ | ||
59 | default: __bad_pda_field(); \ | ||
60 | } \ | ||
61 | } while (0) | ||
62 | |||
63 | #define pda_from_op(op,field) \ | ||
64 | ({ \ | ||
65 | typeof(_proxy_pda.field) ret__; \ | ||
66 | switch (sizeof(_proxy_pda.field)) { \ | ||
67 | case 1: \ | ||
68 | asm(op "b %%gs:%c1,%0" \ | ||
69 | : "=r" (ret__) \ | ||
70 | : "i" (pda_offset(field)), \ | ||
71 | "m" (_proxy_pda.field)); \ | ||
72 | break; \ | ||
73 | case 2: \ | ||
74 | asm(op "w %%gs:%c1,%0" \ | ||
75 | : "=r" (ret__) \ | ||
76 | : "i" (pda_offset(field)), \ | ||
77 | "m" (_proxy_pda.field)); \ | ||
78 | break; \ | ||
79 | case 4: \ | ||
80 | asm(op "l %%gs:%c1,%0" \ | ||
81 | : "=r" (ret__) \ | ||
82 | : "i" (pda_offset(field)), \ | ||
83 | "m" (_proxy_pda.field)); \ | ||
84 | break; \ | ||
85 | default: __bad_pda_field(); \ | ||
86 | } \ | ||
87 | ret__; }) | ||
88 | |||
89 | /* Return a pointer to a pda field */ | ||
90 | #define pda_addr(field) \ | ||
91 | ((typeof(_proxy_pda.field) *)((unsigned char *)read_pda(_pda) + \ | ||
92 | pda_offset(field))) | ||
93 | |||
94 | #define read_pda(field) pda_from_op("mov",field) | ||
95 | #define write_pda(field,val) pda_to_op("mov",field,val) | ||
96 | #define add_pda(field,val) pda_to_op("add",field,val) | ||
97 | #define sub_pda(field,val) pda_to_op("sub",field,val) | ||
98 | #define or_pda(field,val) pda_to_op("or",field,val) | ||
99 | |||
100 | #endif /* _I386_PDA_H */ | ||
diff --git a/include/asm-i386/percpu.h b/include/asm-i386/percpu.h index 5764afa4b6a4..510ae1d3486c 100644 --- a/include/asm-i386/percpu.h +++ b/include/asm-i386/percpu.h | |||
@@ -1,6 +1,31 @@ | |||
1 | #ifndef __ARCH_I386_PERCPU__ | 1 | #ifndef __ARCH_I386_PERCPU__ |
2 | #define __ARCH_I386_PERCPU__ | 2 | #define __ARCH_I386_PERCPU__ |
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | ||
4 | #include <asm-generic/percpu.h> | 5 | #include <asm-generic/percpu.h> |
6 | #else | ||
7 | |||
8 | /* | ||
9 | * PER_CPU finds an address of a per-cpu variable. | ||
10 | * | ||
11 | * Args: | ||
12 | * var - variable name | ||
13 | * cpu - 32bit register containing the current CPU number | ||
14 | * | ||
15 | * The resulting address is stored in the "cpu" argument. | ||
16 | * | ||
17 | * Example: | ||
18 | * PER_CPU(cpu_gdt_descr, %ebx) | ||
19 | */ | ||
20 | #ifdef CONFIG_SMP | ||
21 | #define PER_CPU(var, cpu) \ | ||
22 | movl __per_cpu_offset(,cpu,4), cpu; \ | ||
23 | addl $per_cpu__/**/var, cpu; | ||
24 | #else /* ! SMP */ | ||
25 | #define PER_CPU(var, cpu) \ | ||
26 | movl $per_cpu__/**/var, cpu; | ||
27 | #endif /* SMP */ | ||
28 | |||
29 | #endif /* !__ASSEMBLY__ */ | ||
5 | 30 | ||
6 | #endif /* __ARCH_I386_PERCPU__ */ | 31 | #endif /* __ARCH_I386_PERCPU__ */ |
diff --git a/include/asm-i386/pgtable-2level.h b/include/asm-i386/pgtable-2level.h index 8d8d3b9ecdb0..38c3fcc0676d 100644 --- a/include/asm-i386/pgtable-2level.h +++ b/include/asm-i386/pgtable-2level.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _I386_PGTABLE_2LEVEL_H | 1 | #ifndef _I386_PGTABLE_2LEVEL_H |
2 | #define _I386_PGTABLE_2LEVEL_H | 2 | #define _I386_PGTABLE_2LEVEL_H |
3 | 3 | ||
4 | #include <asm-generic/pgtable-nopmd.h> | ||
5 | |||
6 | #define pte_ERROR(e) \ | 4 | #define pte_ERROR(e) \ |
7 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) | 5 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low) |
8 | #define pgd_ERROR(e) \ | 6 | #define pgd_ERROR(e) \ |
@@ -13,17 +11,19 @@ | |||
13 | * within a page table are directly modified. Thus, the following | 11 | * within a page table are directly modified. Thus, the following |
14 | * hook is made available. | 12 | * hook is made available. |
15 | */ | 13 | */ |
14 | #ifndef CONFIG_PARAVIRT | ||
16 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | 15 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) |
17 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | 16 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
17 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | ||
18 | #endif | ||
19 | |||
18 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) | 20 | #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) |
19 | #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) | 21 | #define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) |
20 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval)) | ||
21 | 22 | ||
22 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 23 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
23 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | 24 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) |
24 | 25 | ||
25 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 26 | #define raw_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte_low, 0)) |
26 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte_low, 0)) | ||
27 | 27 | ||
28 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 28 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
29 | #define pte_none(x) (!(x).pte_low) | 29 | #define pte_none(x) (!(x).pte_low) |
diff --git a/include/asm-i386/pgtable-3level.h b/include/asm-i386/pgtable-3level.h index c2d701ea35be..7a2318f38303 100644 --- a/include/asm-i386/pgtable-3level.h +++ b/include/asm-i386/pgtable-3level.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef _I386_PGTABLE_3LEVEL_H | 1 | #ifndef _I386_PGTABLE_3LEVEL_H |
2 | #define _I386_PGTABLE_3LEVEL_H | 2 | #define _I386_PGTABLE_3LEVEL_H |
3 | 3 | ||
4 | #include <asm-generic/pgtable-nopud.h> | ||
5 | |||
6 | /* | 4 | /* |
7 | * Intel Physical Address Extension (PAE) Mode - three-level page | 5 | * Intel Physical Address Extension (PAE) Mode - three-level page |
8 | * tables on PPro+ CPUs. | 6 | * tables on PPro+ CPUs. |
@@ -44,6 +42,7 @@ static inline int pte_exec_kernel(pte_t pte) | |||
44 | return pte_x(pte); | 42 | return pte_x(pte); |
45 | } | 43 | } |
46 | 44 | ||
45 | #ifndef CONFIG_PARAVIRT | ||
47 | /* Rules for using set_pte: the pte being assigned *must* be | 46 | /* Rules for using set_pte: the pte being assigned *must* be |
48 | * either not present or in a state where the hardware will | 47 | * either not present or in a state where the hardware will |
49 | * not attempt to update the pte. In places where this is | 48 | * not attempt to update the pte. In places where this is |
@@ -81,25 +80,6 @@ static inline void set_pte_present(struct mm_struct *mm, unsigned long addr, pte | |||
81 | (*(pudptr) = (pudval)) | 80 | (*(pudptr) = (pudval)) |
82 | 81 | ||
83 | /* | 82 | /* |
84 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | ||
85 | * the TLB via cr3 if the top-level pgd is changed... | ||
86 | * We do not let the generic code free and clear pgd entries due to | ||
87 | * this erratum. | ||
88 | */ | ||
89 | static inline void pud_clear (pud_t * pud) { } | ||
90 | |||
91 | #define pud_page(pud) \ | ||
92 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | ||
93 | |||
94 | #define pud_page_vaddr(pud) \ | ||
95 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | ||
96 | |||
97 | |||
98 | /* Find an entry in the second-level page table.. */ | ||
99 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | ||
100 | pmd_index(address)) | ||
101 | |||
102 | /* | ||
103 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table | 83 | * For PTEs and PDEs, we must clear the P-bit first when clearing a page table |
104 | * entry, so clear the bottom half first and enforce ordering with a compiler | 84 | * entry, so clear the bottom half first and enforce ordering with a compiler |
105 | * barrier. | 85 | * barrier. |
@@ -118,9 +98,28 @@ static inline void pmd_clear(pmd_t *pmd) | |||
118 | smp_wmb(); | 98 | smp_wmb(); |
119 | *(tmp + 1) = 0; | 99 | *(tmp + 1) = 0; |
120 | } | 100 | } |
101 | #endif | ||
102 | |||
103 | /* | ||
104 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | ||
105 | * the TLB via cr3 if the top-level pgd is changed... | ||
106 | * We do not let the generic code free and clear pgd entries due to | ||
107 | * this erratum. | ||
108 | */ | ||
109 | static inline void pud_clear (pud_t * pud) { } | ||
110 | |||
111 | #define pud_page(pud) \ | ||
112 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | ||
113 | |||
114 | #define pud_page_vaddr(pud) \ | ||
115 | ((unsigned long) __va(pud_val(pud) & PAGE_MASK)) | ||
116 | |||
117 | |||
118 | /* Find an entry in the second-level page table.. */ | ||
119 | #define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \ | ||
120 | pmd_index(address)) | ||
121 | 121 | ||
122 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 122 | static inline pte_t raw_ptep_get_and_clear(pte_t *ptep) |
123 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
124 | { | 123 | { |
125 | pte_t res; | 124 | pte_t res; |
126 | 125 | ||
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index bfee7ddfff53..e6a4723f0eb1 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
16 | #include <asm/fixmap.h> | 16 | #include <asm/fixmap.h> |
17 | #include <linux/threads.h> | 17 | #include <linux/threads.h> |
18 | #include <asm/paravirt.h> | ||
18 | 19 | ||
19 | #ifndef _I386_BITOPS_H | 20 | #ifndef _I386_BITOPS_H |
20 | #include <asm/bitops.h> | 21 | #include <asm/bitops.h> |
@@ -246,6 +247,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
246 | # include <asm/pgtable-2level.h> | 247 | # include <asm/pgtable-2level.h> |
247 | #endif | 248 | #endif |
248 | 249 | ||
250 | #ifndef CONFIG_PARAVIRT | ||
249 | /* | 251 | /* |
250 | * Rules for using pte_update - it must be called after any PTE update which | 252 | * Rules for using pte_update - it must be called after any PTE update which |
251 | * has not been done using the set_pte / clear_pte interfaces. It is used by | 253 | * has not been done using the set_pte / clear_pte interfaces. It is used by |
@@ -261,7 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
261 | */ | 263 | */ |
262 | #define pte_update(mm, addr, ptep) do { } while (0) | 264 | #define pte_update(mm, addr, ptep) do { } while (0) |
263 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | 265 | #define pte_update_defer(mm, addr, ptep) do { } while (0) |
264 | 266 | #endif | |
265 | 267 | ||
266 | /* | 268 | /* |
267 | * We only update the dirty/accessed state if we set | 269 | * We only update the dirty/accessed state if we set |
@@ -275,7 +277,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p | |||
275 | do { \ | 277 | do { \ |
276 | if (dirty) { \ | 278 | if (dirty) { \ |
277 | (ptep)->pte_low = (entry).pte_low; \ | 279 | (ptep)->pte_low = (entry).pte_low; \ |
278 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | 280 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
279 | flush_tlb_page(vma, address); \ | 281 | flush_tlb_page(vma, address); \ |
280 | } \ | 282 | } \ |
281 | } while (0) | 283 | } while (0) |
@@ -305,7 +307,7 @@ do { \ | |||
305 | __dirty = pte_dirty(*(ptep)); \ | 307 | __dirty = pte_dirty(*(ptep)); \ |
306 | if (__dirty) { \ | 308 | if (__dirty) { \ |
307 | clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ | 309 | clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ |
308 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | 310 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
309 | flush_tlb_page(vma, address); \ | 311 | flush_tlb_page(vma, address); \ |
310 | } \ | 312 | } \ |
311 | __dirty; \ | 313 | __dirty; \ |
@@ -318,12 +320,20 @@ do { \ | |||
318 | __young = pte_young(*(ptep)); \ | 320 | __young = pte_young(*(ptep)); \ |
319 | if (__young) { \ | 321 | if (__young) { \ |
320 | clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ | 322 | clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ |
321 | pte_update_defer((vma)->vm_mm, (addr), (ptep)); \ | 323 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ |
322 | flush_tlb_page(vma, address); \ | 324 | flush_tlb_page(vma, address); \ |
323 | } \ | 325 | } \ |
324 | __young; \ | 326 | __young; \ |
325 | }) | 327 | }) |
326 | 328 | ||
329 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
330 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
331 | { | ||
332 | pte_t pte = raw_ptep_get_and_clear(ptep); | ||
333 | pte_update(mm, addr, ptep); | ||
334 | return pte; | ||
335 | } | ||
336 | |||
327 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 337 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
328 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 338 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
329 | { | 339 | { |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index e0ddca94d50c..a52d65440429 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/threads.h> | 20 | #include <linux/threads.h> |
21 | #include <asm/percpu.h> | 21 | #include <asm/percpu.h> |
22 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
23 | #include <linux/init.h> | ||
23 | 24 | ||
24 | /* flag for disabling the tsc */ | 25 | /* flag for disabling the tsc */ |
25 | extern int tsc_disable; | 26 | extern int tsc_disable; |
@@ -72,6 +73,7 @@ struct cpuinfo_x86 { | |||
72 | #endif | 73 | #endif |
73 | unsigned char x86_max_cores; /* cpuid returned max cores value */ | 74 | unsigned char x86_max_cores; /* cpuid returned max cores value */ |
74 | unsigned char apicid; | 75 | unsigned char apicid; |
76 | unsigned short x86_clflush_size; | ||
75 | #ifdef CONFIG_SMP | 77 | #ifdef CONFIG_SMP |
76 | unsigned char booted_cores; /* number of cores as seen by OS */ | 78 | unsigned char booted_cores; /* number of cores as seen by OS */ |
77 | __u8 phys_proc_id; /* Physical processor id. */ | 79 | __u8 phys_proc_id; /* Physical processor id. */ |
@@ -111,6 +113,8 @@ extern struct cpuinfo_x86 cpu_data[]; | |||
111 | extern int cpu_llc_id[NR_CPUS]; | 113 | extern int cpu_llc_id[NR_CPUS]; |
112 | extern char ignore_fpu_irq; | 114 | extern char ignore_fpu_irq; |
113 | 115 | ||
116 | void __init cpu_detect(struct cpuinfo_x86 *c); | ||
117 | |||
114 | extern void identify_cpu(struct cpuinfo_x86 *); | 118 | extern void identify_cpu(struct cpuinfo_x86 *); |
115 | extern void print_cpu_info(struct cpuinfo_x86 *); | 119 | extern void print_cpu_info(struct cpuinfo_x86 *); |
116 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); | 120 | extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c); |
@@ -143,8 +147,8 @@ static inline void detect_ht(struct cpuinfo_x86 *c) {} | |||
143 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ | 147 | #define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */ |
144 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ | 148 | #define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */ |
145 | 149 | ||
146 | static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | 150 | static inline fastcall void native_cpuid(unsigned int *eax, unsigned int *ebx, |
147 | unsigned int *ecx, unsigned int *edx) | 151 | unsigned int *ecx, unsigned int *edx) |
148 | { | 152 | { |
149 | /* ecx is often an input as well as an output. */ | 153 | /* ecx is often an input as well as an output. */ |
150 | __asm__("cpuid" | 154 | __asm__("cpuid" |
@@ -155,59 +159,6 @@ static inline void __cpuid(unsigned int *eax, unsigned int *ebx, | |||
155 | : "0" (*eax), "2" (*ecx)); | 159 | : "0" (*eax), "2" (*ecx)); |
156 | } | 160 | } |
157 | 161 | ||
158 | /* | ||
159 | * Generic CPUID function | ||
160 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
161 | * resulting in stale register contents being returned. | ||
162 | */ | ||
163 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) | ||
164 | { | ||
165 | *eax = op; | ||
166 | *ecx = 0; | ||
167 | __cpuid(eax, ebx, ecx, edx); | ||
168 | } | ||
169 | |||
170 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
171 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | ||
172 | int *edx) | ||
173 | { | ||
174 | *eax = op; | ||
175 | *ecx = count; | ||
176 | __cpuid(eax, ebx, ecx, edx); | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * CPUID functions returning a single datum | ||
181 | */ | ||
182 | static inline unsigned int cpuid_eax(unsigned int op) | ||
183 | { | ||
184 | unsigned int eax, ebx, ecx, edx; | ||
185 | |||
186 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
187 | return eax; | ||
188 | } | ||
189 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
190 | { | ||
191 | unsigned int eax, ebx, ecx, edx; | ||
192 | |||
193 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
194 | return ebx; | ||
195 | } | ||
196 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
197 | { | ||
198 | unsigned int eax, ebx, ecx, edx; | ||
199 | |||
200 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
201 | return ecx; | ||
202 | } | ||
203 | static inline unsigned int cpuid_edx(unsigned int op) | ||
204 | { | ||
205 | unsigned int eax, ebx, ecx, edx; | ||
206 | |||
207 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
208 | return edx; | ||
209 | } | ||
210 | |||
211 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) | 162 | #define load_cr3(pgdir) write_cr3(__pa(pgdir)) |
212 | 163 | ||
213 | /* | 164 | /* |
@@ -473,6 +424,7 @@ struct thread_struct { | |||
473 | .vm86_info = NULL, \ | 424 | .vm86_info = NULL, \ |
474 | .sysenter_cs = __KERNEL_CS, \ | 425 | .sysenter_cs = __KERNEL_CS, \ |
475 | .io_bitmap_ptr = NULL, \ | 426 | .io_bitmap_ptr = NULL, \ |
427 | .gs = __KERNEL_PDA, \ | ||
476 | } | 428 | } |
477 | 429 | ||
478 | /* | 430 | /* |
@@ -489,18 +441,9 @@ struct thread_struct { | |||
489 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ | 441 | .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \ |
490 | } | 442 | } |
491 | 443 | ||
492 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
493 | { | ||
494 | tss->esp0 = thread->esp0; | ||
495 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
496 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | ||
497 | tss->ss1 = thread->sysenter_cs; | ||
498 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
499 | } | ||
500 | } | ||
501 | |||
502 | #define start_thread(regs, new_eip, new_esp) do { \ | 444 | #define start_thread(regs, new_eip, new_esp) do { \ |
503 | __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \ | 445 | __asm__("movl %0,%%fs": :"r" (0)); \ |
446 | regs->xgs = 0; \ | ||
504 | set_fs(USER_DS); \ | 447 | set_fs(USER_DS); \ |
505 | regs->xds = __USER_DS; \ | 448 | regs->xds = __USER_DS; \ |
506 | regs->xes = __USER_DS; \ | 449 | regs->xes = __USER_DS; \ |
@@ -510,33 +453,6 @@ static inline void load_esp0(struct tss_struct *tss, struct thread_struct *threa | |||
510 | regs->esp = new_esp; \ | 453 | regs->esp = new_esp; \ |
511 | } while (0) | 454 | } while (0) |
512 | 455 | ||
513 | /* | ||
514 | * These special macros can be used to get or set a debugging register | ||
515 | */ | ||
516 | #define get_debugreg(var, register) \ | ||
517 | __asm__("movl %%db" #register ", %0" \ | ||
518 | :"=r" (var)) | ||
519 | #define set_debugreg(value, register) \ | ||
520 | __asm__("movl %0,%%db" #register \ | ||
521 | : /* no output */ \ | ||
522 | :"r" (value)) | ||
523 | |||
524 | /* | ||
525 | * Set IOPL bits in EFLAGS from given mask | ||
526 | */ | ||
527 | static inline void set_iopl_mask(unsigned mask) | ||
528 | { | ||
529 | unsigned int reg; | ||
530 | __asm__ __volatile__ ("pushfl;" | ||
531 | "popl %0;" | ||
532 | "andl %1, %0;" | ||
533 | "orl %2, %0;" | ||
534 | "pushl %0;" | ||
535 | "popfl" | ||
536 | : "=&r" (reg) | ||
537 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
538 | } | ||
539 | |||
540 | /* Forward declaration, a strange C thing */ | 456 | /* Forward declaration, a strange C thing */ |
541 | struct task_struct; | 457 | struct task_struct; |
542 | struct mm_struct; | 458 | struct mm_struct; |
@@ -628,6 +544,105 @@ static inline void rep_nop(void) | |||
628 | 544 | ||
629 | #define cpu_relax() rep_nop() | 545 | #define cpu_relax() rep_nop() |
630 | 546 | ||
547 | #ifdef CONFIG_PARAVIRT | ||
548 | #include <asm/paravirt.h> | ||
549 | #else | ||
550 | #define paravirt_enabled() 0 | ||
551 | #define __cpuid native_cpuid | ||
552 | |||
553 | static inline void load_esp0(struct tss_struct *tss, struct thread_struct *thread) | ||
554 | { | ||
555 | tss->esp0 = thread->esp0; | ||
556 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
557 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | ||
558 | tss->ss1 = thread->sysenter_cs; | ||
559 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | /* | ||
564 | * These special macros can be used to get or set a debugging register | ||
565 | */ | ||
566 | #define get_debugreg(var, register) \ | ||
567 | __asm__("movl %%db" #register ", %0" \ | ||
568 | :"=r" (var)) | ||
569 | #define set_debugreg(value, register) \ | ||
570 | __asm__("movl %0,%%db" #register \ | ||
571 | : /* no output */ \ | ||
572 | :"r" (value)) | ||
573 | |||
574 | #define set_iopl_mask native_set_iopl_mask | ||
575 | #endif /* CONFIG_PARAVIRT */ | ||
576 | |||
577 | /* | ||
578 | * Set IOPL bits in EFLAGS from given mask | ||
579 | */ | ||
580 | static fastcall inline void native_set_iopl_mask(unsigned mask) | ||
581 | { | ||
582 | unsigned int reg; | ||
583 | __asm__ __volatile__ ("pushfl;" | ||
584 | "popl %0;" | ||
585 | "andl %1, %0;" | ||
586 | "orl %2, %0;" | ||
587 | "pushl %0;" | ||
588 | "popfl" | ||
589 | : "=&r" (reg) | ||
590 | : "i" (~X86_EFLAGS_IOPL), "r" (mask)); | ||
591 | } | ||
592 | |||
593 | /* | ||
594 | * Generic CPUID function | ||
595 | * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx | ||
596 | * resulting in stale register contents being returned. | ||
597 | */ | ||
598 | static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx) | ||
599 | { | ||
600 | *eax = op; | ||
601 | *ecx = 0; | ||
602 | __cpuid(eax, ebx, ecx, edx); | ||
603 | } | ||
604 | |||
605 | /* Some CPUID calls want 'count' to be placed in ecx */ | ||
606 | static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx, | ||
607 | int *edx) | ||
608 | { | ||
609 | *eax = op; | ||
610 | *ecx = count; | ||
611 | __cpuid(eax, ebx, ecx, edx); | ||
612 | } | ||
613 | |||
614 | /* | ||
615 | * CPUID functions returning a single datum | ||
616 | */ | ||
617 | static inline unsigned int cpuid_eax(unsigned int op) | ||
618 | { | ||
619 | unsigned int eax, ebx, ecx, edx; | ||
620 | |||
621 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
622 | return eax; | ||
623 | } | ||
624 | static inline unsigned int cpuid_ebx(unsigned int op) | ||
625 | { | ||
626 | unsigned int eax, ebx, ecx, edx; | ||
627 | |||
628 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
629 | return ebx; | ||
630 | } | ||
631 | static inline unsigned int cpuid_ecx(unsigned int op) | ||
632 | { | ||
633 | unsigned int eax, ebx, ecx, edx; | ||
634 | |||
635 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
636 | return ecx; | ||
637 | } | ||
638 | static inline unsigned int cpuid_edx(unsigned int op) | ||
639 | { | ||
640 | unsigned int eax, ebx, ecx, edx; | ||
641 | |||
642 | cpuid(op, &eax, &ebx, &ecx, &edx); | ||
643 | return edx; | ||
644 | } | ||
645 | |||
631 | /* generic versions from gas */ | 646 | /* generic versions from gas */ |
632 | #define GENERIC_NOP1 ".byte 0x90\n" | 647 | #define GENERIC_NOP1 ".byte 0x90\n" |
633 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" | 648 | #define GENERIC_NOP2 ".byte 0x89,0xf6\n" |
@@ -727,4 +742,7 @@ extern unsigned long boot_option_idle_override; | |||
727 | extern void enable_sep_cpu(void); | 742 | extern void enable_sep_cpu(void); |
728 | extern int sysenter_setup(void); | 743 | extern int sysenter_setup(void); |
729 | 744 | ||
745 | extern int init_gdt(int cpu, struct task_struct *idle); | ||
746 | extern void secondary_cpu_init(void); | ||
747 | |||
730 | #endif /* __ASM_I386_PROCESSOR_H */ | 748 | #endif /* __ASM_I386_PROCESSOR_H */ |
diff --git a/include/asm-i386/ptrace.h b/include/asm-i386/ptrace.h index d505f501077a..bdbc894339b4 100644 --- a/include/asm-i386/ptrace.h +++ b/include/asm-i386/ptrace.h | |||
@@ -16,6 +16,8 @@ struct pt_regs { | |||
16 | long eax; | 16 | long eax; |
17 | int xds; | 17 | int xds; |
18 | int xes; | 18 | int xes; |
19 | /* int xfs; */ | ||
20 | int xgs; | ||
19 | long orig_eax; | 21 | long orig_eax; |
20 | long eip; | 22 | long eip; |
21 | int xcs; | 23 | int xcs; |
diff --git a/include/asm-i386/segment.h b/include/asm-i386/segment.h index b7ab59685ba7..3c796af33776 100644 --- a/include/asm-i386/segment.h +++ b/include/asm-i386/segment.h | |||
@@ -39,7 +39,7 @@ | |||
39 | * 25 - APM BIOS support | 39 | * 25 - APM BIOS support |
40 | * | 40 | * |
41 | * 26 - ESPFIX small SS | 41 | * 26 - ESPFIX small SS |
42 | * 27 - unused | 42 | * 27 - PDA [ per-cpu private data area ] |
43 | * 28 - unused | 43 | * 28 - unused |
44 | * 29 - unused | 44 | * 29 - unused |
45 | * 30 - unused | 45 | * 30 - unused |
@@ -74,6 +74,9 @@ | |||
74 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) | 74 | #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) |
75 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) | 75 | #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) |
76 | 76 | ||
77 | #define GDT_ENTRY_PDA (GDT_ENTRY_KERNEL_BASE + 15) | ||
78 | #define __KERNEL_PDA (GDT_ENTRY_PDA * 8) | ||
79 | |||
77 | #define GDT_ENTRY_DOUBLEFAULT_TSS 31 | 80 | #define GDT_ENTRY_DOUBLEFAULT_TSS 31 |
78 | 81 | ||
79 | /* | 82 | /* |
@@ -128,5 +131,7 @@ | |||
128 | #define SEGMENT_LDT 0x4 | 131 | #define SEGMENT_LDT 0x4 |
129 | #define SEGMENT_GDT 0x0 | 132 | #define SEGMENT_GDT 0x0 |
130 | 133 | ||
134 | #ifndef CONFIG_PARAVIRT | ||
131 | #define get_kernel_rpl() 0 | 135 | #define get_kernel_rpl() 0 |
132 | #endif | 136 | #endif |
137 | #endif | ||
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index c5b504bfbaad..67659dbaf120 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h | |||
@@ -70,6 +70,7 @@ extern unsigned char boot_params[PARAM_SIZE]; | |||
70 | struct e820entry; | 70 | struct e820entry; |
71 | 71 | ||
72 | char * __init machine_specific_memory_setup(void); | 72 | char * __init machine_specific_memory_setup(void); |
73 | char *memory_setup(void); | ||
73 | 74 | ||
74 | int __init copy_e820_map(struct e820entry * biosmap, int nr_map); | 75 | int __init copy_e820_map(struct e820entry * biosmap, int nr_map); |
75 | int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); | 76 | int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map); |
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h index bd59c1508e71..64fe624c02ca 100644 --- a/include/asm-i386/smp.h +++ b/include/asm-i386/smp.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/threads.h> | 9 | #include <linux/threads.h> |
10 | #include <linux/cpumask.h> | 10 | #include <linux/cpumask.h> |
11 | #include <asm/pda.h> | ||
11 | #endif | 12 | #endif |
12 | 13 | ||
13 | #ifdef CONFIG_X86_LOCAL_APIC | 14 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -56,7 +57,7 @@ extern void cpu_uninit(void); | |||
56 | * from the initial startup. We map APIC_BASE very early in page_setup(), | 57 | * from the initial startup. We map APIC_BASE very early in page_setup(), |
57 | * so this is correct in the x86 case. | 58 | * so this is correct in the x86 case. |
58 | */ | 59 | */ |
59 | #define raw_smp_processor_id() (current_thread_info()->cpu) | 60 | #define raw_smp_processor_id() (read_pda(cpu_number)) |
60 | 61 | ||
61 | extern cpumask_t cpu_callout_map; | 62 | extern cpumask_t cpu_callout_map; |
62 | extern cpumask_t cpu_callin_map; | 63 | extern cpumask_t cpu_callin_map; |
diff --git a/include/asm-i386/spinlock.h b/include/asm-i386/spinlock.h index c18b71fae6b3..d3bcebed60ca 100644 --- a/include/asm-i386/spinlock.h +++ b/include/asm-i386/spinlock.h | |||
@@ -7,8 +7,14 @@ | |||
7 | #include <asm/processor.h> | 7 | #include <asm/processor.h> |
8 | #include <linux/compiler.h> | 8 | #include <linux/compiler.h> |
9 | 9 | ||
10 | #ifdef CONFIG_PARAVIRT | ||
11 | #include <asm/paravirt.h> | ||
12 | #else | ||
10 | #define CLI_STRING "cli" | 13 | #define CLI_STRING "cli" |
11 | #define STI_STRING "sti" | 14 | #define STI_STRING "sti" |
15 | #define CLI_STI_CLOBBERS | ||
16 | #define CLI_STI_INPUT_ARGS | ||
17 | #endif /* CONFIG_PARAVIRT */ | ||
12 | 18 | ||
13 | /* | 19 | /* |
14 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | 20 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
@@ -53,25 +59,28 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla | |||
53 | { | 59 | { |
54 | asm volatile( | 60 | asm volatile( |
55 | "\n1:\t" | 61 | "\n1:\t" |
56 | LOCK_PREFIX " ; decb %0\n\t" | 62 | LOCK_PREFIX " ; decb %[slock]\n\t" |
57 | "jns 5f\n" | 63 | "jns 5f\n" |
58 | "2:\t" | 64 | "2:\t" |
59 | "testl $0x200, %1\n\t" | 65 | "testl $0x200, %[flags]\n\t" |
60 | "jz 4f\n\t" | 66 | "jz 4f\n\t" |
61 | STI_STRING "\n" | 67 | STI_STRING "\n" |
62 | "3:\t" | 68 | "3:\t" |
63 | "rep;nop\n\t" | 69 | "rep;nop\n\t" |
64 | "cmpb $0, %0\n\t" | 70 | "cmpb $0, %[slock]\n\t" |
65 | "jle 3b\n\t" | 71 | "jle 3b\n\t" |
66 | CLI_STRING "\n\t" | 72 | CLI_STRING "\n\t" |
67 | "jmp 1b\n" | 73 | "jmp 1b\n" |
68 | "4:\t" | 74 | "4:\t" |
69 | "rep;nop\n\t" | 75 | "rep;nop\n\t" |
70 | "cmpb $0, %0\n\t" | 76 | "cmpb $0, %[slock]\n\t" |
71 | "jg 1b\n\t" | 77 | "jg 1b\n\t" |
72 | "jmp 4b\n" | 78 | "jmp 4b\n" |
73 | "5:\n\t" | 79 | "5:\n\t" |
74 | : "+m" (lock->slock) : "r" (flags) : "memory"); | 80 | : [slock] "+m" (lock->slock) |
81 | : [flags] "r" (flags) | ||
82 | CLI_STI_INPUT_ARGS | ||
83 | : "memory" CLI_STI_CLOBBERS); | ||
75 | } | 84 | } |
76 | #endif | 85 | #endif |
77 | 86 | ||
diff --git a/include/asm-i386/suspend.h b/include/asm-i386/suspend.h index c1da5caafaf7..8dbaafe611ff 100644 --- a/include/asm-i386/suspend.h +++ b/include/asm-i386/suspend.h | |||
@@ -12,12 +12,8 @@ static inline int arch_prepare_suspend(void) { return 0; } | |||
12 | struct saved_context { | 12 | struct saved_context { |
13 | u16 es, fs, gs, ss; | 13 | u16 es, fs, gs, ss; |
14 | unsigned long cr0, cr2, cr3, cr4; | 14 | unsigned long cr0, cr2, cr3, cr4; |
15 | u16 gdt_pad; | 15 | struct Xgt_desc_struct gdt; |
16 | u16 gdt_limit; | 16 | struct Xgt_desc_struct idt; |
17 | unsigned long gdt_base; | ||
18 | u16 idt_pad; | ||
19 | u16 idt_limit; | ||
20 | unsigned long idt_base; | ||
21 | u16 ldt; | 17 | u16 ldt; |
22 | u16 tss; | 18 | u16 tss; |
23 | unsigned long tr; | 19 | unsigned long tr; |
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index a6dabbcd6e6a..a6d20d9a1a30 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h | |||
@@ -88,6 +88,9 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |||
88 | #define savesegment(seg, value) \ | 88 | #define savesegment(seg, value) \ |
89 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) | 89 | asm volatile("mov %%" #seg ",%0":"=rm" (value)) |
90 | 90 | ||
91 | #ifdef CONFIG_PARAVIRT | ||
92 | #include <asm/paravirt.h> | ||
93 | #else | ||
91 | #define read_cr0() ({ \ | 94 | #define read_cr0() ({ \ |
92 | unsigned int __dummy; \ | 95 | unsigned int __dummy; \ |
93 | __asm__ __volatile__( \ | 96 | __asm__ __volatile__( \ |
@@ -139,17 +142,18 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \ | |||
139 | #define write_cr4(x) \ | 142 | #define write_cr4(x) \ |
140 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) | 143 | __asm__ __volatile__("movl %0,%%cr4": :"r" (x)) |
141 | 144 | ||
142 | /* | 145 | #define wbinvd() \ |
143 | * Clear and set 'TS' bit respectively | 146 | __asm__ __volatile__ ("wbinvd": : :"memory") |
144 | */ | 147 | |
148 | /* Clear the 'TS' bit */ | ||
145 | #define clts() __asm__ __volatile__ ("clts") | 149 | #define clts() __asm__ __volatile__ ("clts") |
150 | #endif/* CONFIG_PARAVIRT */ | ||
151 | |||
152 | /* Set the 'TS' bit */ | ||
146 | #define stts() write_cr0(8 | read_cr0()) | 153 | #define stts() write_cr0(8 | read_cr0()) |
147 | 154 | ||
148 | #endif /* __KERNEL__ */ | 155 | #endif /* __KERNEL__ */ |
149 | 156 | ||
150 | #define wbinvd() \ | ||
151 | __asm__ __volatile__ ("wbinvd": : :"memory") | ||
152 | |||
153 | static inline unsigned long get_limit(unsigned long segment) | 157 | static inline unsigned long get_limit(unsigned long segment) |
154 | { | 158 | { |
155 | unsigned long __limit; | 159 | unsigned long __limit; |
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h index 54d6d7aea938..46d32ad92082 100644 --- a/include/asm-i386/thread_info.h +++ b/include/asm-i386/thread_info.h | |||
@@ -95,15 +95,7 @@ static inline struct thread_info *current_thread_info(void) | |||
95 | 95 | ||
96 | /* thread information allocation */ | 96 | /* thread information allocation */ |
97 | #ifdef CONFIG_DEBUG_STACK_USAGE | 97 | #ifdef CONFIG_DEBUG_STACK_USAGE |
98 | #define alloc_thread_info(tsk) \ | 98 | #define alloc_thread_info(tsk) kzalloc(THREAD_SIZE, GFP_KERNEL) |
99 | ({ \ | ||
100 | struct thread_info *ret; \ | ||
101 | \ | ||
102 | ret = kmalloc(THREAD_SIZE, GFP_KERNEL); \ | ||
103 | if (ret) \ | ||
104 | memset(ret, 0, THREAD_SIZE); \ | ||
105 | ret; \ | ||
106 | }) | ||
107 | #else | 99 | #else |
108 | #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) | 100 | #define alloc_thread_info(tsk) kmalloc(THREAD_SIZE, GFP_KERNEL) |
109 | #endif | 101 | #endif |
diff --git a/include/asm-i386/time.h b/include/asm-i386/time.h new file mode 100644 index 000000000000..ea8065af825a --- /dev/null +++ b/include/asm-i386/time.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef _ASMi386_TIME_H | ||
2 | #define _ASMi386_TIME_H | ||
3 | |||
4 | #include <linux/efi.h> | ||
5 | #include "mach_time.h" | ||
6 | |||
7 | static inline unsigned long native_get_wallclock(void) | ||
8 | { | ||
9 | unsigned long retval; | ||
10 | |||
11 | if (efi_enabled) | ||
12 | retval = efi_get_time(); | ||
13 | else | ||
14 | retval = mach_get_cmos_time(); | ||
15 | |||
16 | return retval; | ||
17 | } | ||
18 | |||
19 | static inline int native_set_wallclock(unsigned long nowtime) | ||
20 | { | ||
21 | int retval; | ||
22 | |||
23 | if (efi_enabled) | ||
24 | retval = efi_set_rtc_mmss(nowtime); | ||
25 | else | ||
26 | retval = mach_set_rtc_mmss(nowtime); | ||
27 | |||
28 | return retval; | ||
29 | } | ||
30 | |||
31 | #ifdef CONFIG_PARAVIRT | ||
32 | #include <asm/paravirt.h> | ||
33 | #else /* !CONFIG_PARAVIRT */ | ||
34 | |||
35 | #define get_wallclock() native_get_wallclock() | ||
36 | #define set_wallclock(x) native_set_wallclock(x) | ||
37 | #define do_time_init() time_init_hook() | ||
38 | |||
39 | #endif /* CONFIG_PARAVIRT */ | ||
40 | |||
41 | #endif | ||
diff --git a/include/asm-i386/tlbflush.h b/include/asm-i386/tlbflush.h index 360648b0f2b3..4dd82840d53b 100644 --- a/include/asm-i386/tlbflush.h +++ b/include/asm-i386/tlbflush.h | |||
@@ -4,7 +4,15 @@ | |||
4 | #include <linux/mm.h> | 4 | #include <linux/mm.h> |
5 | #include <asm/processor.h> | 5 | #include <asm/processor.h> |
6 | 6 | ||
7 | #define __flush_tlb() \ | 7 | #ifdef CONFIG_PARAVIRT |
8 | #include <asm/paravirt.h> | ||
9 | #else | ||
10 | #define __flush_tlb() __native_flush_tlb() | ||
11 | #define __flush_tlb_global() __native_flush_tlb_global() | ||
12 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | ||
13 | #endif | ||
14 | |||
15 | #define __native_flush_tlb() \ | ||
8 | do { \ | 16 | do { \ |
9 | unsigned int tmpreg; \ | 17 | unsigned int tmpreg; \ |
10 | \ | 18 | \ |
@@ -19,7 +27,7 @@ | |||
19 | * Global pages have to be flushed a bit differently. Not a real | 27 | * Global pages have to be flushed a bit differently. Not a real |
20 | * performance problem because this does not happen often. | 28 | * performance problem because this does not happen often. |
21 | */ | 29 | */ |
22 | #define __flush_tlb_global() \ | 30 | #define __native_flush_tlb_global() \ |
23 | do { \ | 31 | do { \ |
24 | unsigned int tmpreg, cr4, cr4_orig; \ | 32 | unsigned int tmpreg, cr4, cr4_orig; \ |
25 | \ | 33 | \ |
@@ -36,6 +44,9 @@ | |||
36 | : "memory"); \ | 44 | : "memory"); \ |
37 | } while (0) | 45 | } while (0) |
38 | 46 | ||
47 | #define __native_flush_tlb_single(addr) \ | ||
48 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") | ||
49 | |||
39 | # define __flush_tlb_all() \ | 50 | # define __flush_tlb_all() \ |
40 | do { \ | 51 | do { \ |
41 | if (cpu_has_pge) \ | 52 | if (cpu_has_pge) \ |
@@ -46,9 +57,6 @@ | |||
46 | 57 | ||
47 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) | 58 | #define cpu_has_invlpg (boot_cpu_data.x86 > 3) |
48 | 59 | ||
49 | #define __flush_tlb_single(addr) \ | ||
50 | __asm__ __volatile__("invlpg (%0)" ::"r" (addr) : "memory") | ||
51 | |||
52 | #ifdef CONFIG_X86_INVLPG | 60 | #ifdef CONFIG_X86_INVLPG |
53 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) | 61 | # define __flush_tlb_one(addr) __flush_tlb_single(addr) |
54 | #else | 62 | #else |
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h index 5031d693b89d..aa2c931e30db 100644 --- a/include/asm-i386/unwind.h +++ b/include/asm-i386/unwind.h | |||
@@ -71,6 +71,7 @@ static inline void arch_unw_init_blocked(struct unwind_frame_info *info) | |||
71 | info->regs.xss = __KERNEL_DS; | 71 | info->regs.xss = __KERNEL_DS; |
72 | info->regs.xds = __USER_DS; | 72 | info->regs.xds = __USER_DS; |
73 | info->regs.xes = __USER_DS; | 73 | info->regs.xes = __USER_DS; |
74 | info->regs.xgs = __KERNEL_PDA; | ||
74 | } | 75 | } |
75 | 76 | ||
76 | extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, | 77 | extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, |
@@ -78,17 +79,13 @@ extern asmlinkage int arch_unwind_init_running(struct unwind_frame_info *, | |||
78 | void *arg), | 79 | void *arg), |
79 | void *arg); | 80 | void *arg); |
80 | 81 | ||
81 | static inline int arch_unw_user_mode(const struct unwind_frame_info *info) | 82 | static inline int arch_unw_user_mode(/*const*/ struct unwind_frame_info *info) |
82 | { | 83 | { |
83 | #if 0 /* This can only work when selector register and EFLAGS saves/restores | 84 | return user_mode_vm(&info->regs) |
84 | are properly annotated (and tracked in UNW_REGISTER_INFO). */ | 85 | || info->regs.eip < PAGE_OFFSET |
85 | return user_mode_vm(&info->regs); | ||
86 | #else | ||
87 | return info->regs.eip < PAGE_OFFSET | ||
88 | || (info->regs.eip >= __fix_to_virt(FIX_VDSO) | 86 | || (info->regs.eip >= __fix_to_virt(FIX_VDSO) |
89 | && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) | 87 | && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE) |
90 | || info->regs.esp < PAGE_OFFSET; | 88 | || info->regs.esp < PAGE_OFFSET; |
91 | #endif | ||
92 | } | 89 | } |
93 | 90 | ||
94 | #else | 91 | #else |
diff --git a/include/asm-i386/vm86.h b/include/asm-i386/vm86.h index 952fd6957380..a5edf517b992 100644 --- a/include/asm-i386/vm86.h +++ b/include/asm-i386/vm86.h | |||
@@ -145,26 +145,13 @@ struct vm86plus_struct { | |||
145 | * at the end of the structure. Look at ptrace.h to see the "normal" | 145 | * at the end of the structure. Look at ptrace.h to see the "normal" |
146 | * setup. For user space layout see 'struct vm86_regs' above. | 146 | * setup. For user space layout see 'struct vm86_regs' above. |
147 | */ | 147 | */ |
148 | #include <asm/ptrace.h> | ||
148 | 149 | ||
149 | struct kernel_vm86_regs { | 150 | struct kernel_vm86_regs { |
150 | /* | 151 | /* |
151 | * normal regs, with special meaning for the segment descriptors.. | 152 | * normal regs, with special meaning for the segment descriptors.. |
152 | */ | 153 | */ |
153 | long ebx; | 154 | struct pt_regs pt; |
154 | long ecx; | ||
155 | long edx; | ||
156 | long esi; | ||
157 | long edi; | ||
158 | long ebp; | ||
159 | long eax; | ||
160 | long __null_ds; | ||
161 | long __null_es; | ||
162 | long orig_eax; | ||
163 | long eip; | ||
164 | unsigned short cs, __csh; | ||
165 | long eflags; | ||
166 | long esp; | ||
167 | unsigned short ss, __ssh; | ||
168 | /* | 155 | /* |
169 | * these are specific to v86 mode: | 156 | * these are specific to v86 mode: |
170 | */ | 157 | */ |