diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2006-12-06 20:14:07 -0500 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-12-06 20:14:07 -0500 |
commit | d3561b7fa0fb0fc583bab0eeda32bec9e4c4056d (patch) | |
tree | 39d835965878622d052ef3b3c7b759d83b6bc327 /arch/i386 | |
parent | db91b882aabd0b3b55a87cbfb344f2798bb740b4 (diff) |
[PATCH] paravirt: header and stubs for paravirtualisation
Create a paravirt.h header for all the critical operations which need to be
replaced with hypervisor calls, and include that instead of defining native
operations, when CONFIG_PARAVIRT.
This patch does the dumbest possible replacement of paravirtualized
instructions: calls through a "paravirt_ops" structure. Currently these are
function implementations of native hardware: hypervisors will override the ops
structure with their own variants.
All the pv-ops functions are declared "fastcall" so that a specific
register-based ABI is used, to make inlining assember easier.
And:
+From: Andy Whitcroft <apw@shadowen.org>
The paravirt ops introduce a 'weak' attribute onto memory_setup().
Code ordering leads to the following warnings on x86:
arch/i386/kernel/setup.c:651: warning: weak declaration of
`memory_setup' after first use results in unspecified behavior
Move memory_setup() to avoid this.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Zachary Amsden <zach@vmware.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Andy Whitcroft <apw@shadowen.org>
Diffstat (limited to 'arch/i386')
-rw-r--r-- | arch/i386/Kconfig | 11 | ||||
-rw-r--r-- | arch/i386/boot/compressed/misc.c | 1 | ||||
-rw-r--r-- | arch/i386/kernel/Makefile | 1 | ||||
-rw-r--r-- | arch/i386/kernel/asm-offsets.c | 10 | ||||
-rw-r--r-- | arch/i386/kernel/entry.S | 34 | ||||
-rw-r--r-- | arch/i386/kernel/i8259.c | 5 | ||||
-rw-r--r-- | arch/i386/kernel/paravirt.c | 404 | ||||
-rw-r--r-- | arch/i386/kernel/setup.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 5 | ||||
-rw-r--r-- | arch/i386/kernel/time.c | 15 | ||||
-rw-r--r-- | arch/i386/power/cpu.c | 8 |
11 files changed, 479 insertions, 23 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 1f0f7b60995e..bb1fa061c6cf 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -182,6 +182,17 @@ config X86_ES7000 | |||
182 | 182 | ||
183 | endchoice | 183 | endchoice |
184 | 184 | ||
185 | config PARAVIRT | ||
186 | bool "Paravirtualization support (EXPERIMENTAL)" | ||
187 | depends on EXPERIMENTAL | ||
188 | help | ||
189 | Paravirtualization is a way of running multiple instances of | ||
190 | Linux on the same machine, under a hypervisor. This option | ||
191 | changes the kernel so it can modify itself when it is run | ||
192 | under a hypervisor, improving performance significantly. | ||
193 | However, when run without a hypervisor the kernel is | ||
194 | theoretically slower. If in doubt, say N. | ||
195 | |||
185 | config ACPI_SRAT | 196 | config ACPI_SRAT |
186 | bool | 197 | bool |
187 | default y | 198 | default y |
diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c index dc1538931555..c6798c75c67d 100644 --- a/arch/i386/boot/compressed/misc.c +++ b/arch/i386/boot/compressed/misc.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 | 9 | * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996 |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #undef CONFIG_PARAVIRT | ||
12 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
13 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
14 | #include <linux/screen_info.h> | 15 | #include <linux/screen_info.h> |
diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index f614854bd71f..406612136049 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile | |||
@@ -39,6 +39,7 @@ obj-$(CONFIG_VM86) += vm86.o | |||
39 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 39 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
40 | obj-$(CONFIG_HPET_TIMER) += hpet.o | 40 | obj-$(CONFIG_HPET_TIMER) += hpet.o |
41 | obj-$(CONFIG_K8_NB) += k8.o | 41 | obj-$(CONFIG_K8_NB) += k8.o |
42 | obj-$(CONFIG_PARAVIRT) += paravirt.o | ||
42 | 43 | ||
43 | EXTRA_AFLAGS := -traditional | 44 | EXTRA_AFLAGS := -traditional |
44 | 45 | ||
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c index 0666eb0ed7bc..1b2f3cd33270 100644 --- a/arch/i386/kernel/asm-offsets.c +++ b/arch/i386/kernel/asm-offsets.c | |||
@@ -101,4 +101,14 @@ void foo(void) | |||
101 | BLANK(); | 101 | BLANK(); |
102 | OFFSET(PDA_cpu, i386_pda, cpu_number); | 102 | OFFSET(PDA_cpu, i386_pda, cpu_number); |
103 | OFFSET(PDA_pcurrent, i386_pda, pcurrent); | 103 | OFFSET(PDA_pcurrent, i386_pda, pcurrent); |
104 | |||
105 | #ifdef CONFIG_PARAVIRT | ||
106 | BLANK(); | ||
107 | OFFSET(PARAVIRT_enabled, paravirt_ops, paravirt_enabled); | ||
108 | OFFSET(PARAVIRT_irq_disable, paravirt_ops, irq_disable); | ||
109 | OFFSET(PARAVIRT_irq_enable, paravirt_ops, irq_enable); | ||
110 | OFFSET(PARAVIRT_irq_enable_sysexit, paravirt_ops, irq_enable_sysexit); | ||
111 | OFFSET(PARAVIRT_iret, paravirt_ops, iret); | ||
112 | OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0); | ||
113 | #endif | ||
104 | } | 114 | } |
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S index 0220bc8cbb43..d274612e05cd 100644 --- a/arch/i386/kernel/entry.S +++ b/arch/i386/kernel/entry.S | |||
@@ -62,13 +62,6 @@ DF_MASK = 0x00000400 | |||
62 | NT_MASK = 0x00004000 | 62 | NT_MASK = 0x00004000 |
63 | VM_MASK = 0x00020000 | 63 | VM_MASK = 0x00020000 |
64 | 64 | ||
65 | /* These are replaces for paravirtualization */ | ||
66 | #define DISABLE_INTERRUPTS cli | ||
67 | #define ENABLE_INTERRUPTS sti | ||
68 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
69 | #define INTERRUPT_RETURN iret | ||
70 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
71 | |||
72 | #ifdef CONFIG_PREEMPT | 65 | #ifdef CONFIG_PREEMPT |
73 | #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF | 66 | #define preempt_stop DISABLE_INTERRUPTS; TRACE_IRQS_OFF |
74 | #else | 67 | #else |
@@ -416,6 +409,20 @@ ldt_ss: | |||
416 | jnz restore_nocheck | 409 | jnz restore_nocheck |
417 | testl $0x00400000, %eax # returning to 32bit stack? | 410 | testl $0x00400000, %eax # returning to 32bit stack? |
418 | jnz restore_nocheck # allright, normal return | 411 | jnz restore_nocheck # allright, normal return |
412 | |||
413 | #ifdef CONFIG_PARAVIRT | ||
414 | /* | ||
415 | * The kernel can't run on a non-flat stack if paravirt mode | ||
416 | * is active. Rather than try to fixup the high bits of | ||
417 | * ESP, bypass this code entirely. This may break DOSemu | ||
418 | * and/or Wine support in a paravirt VM, although the option | ||
419 | * is still available to implement the setting of the high | ||
420 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | ||
421 | */ | ||
422 | cmpl $0, paravirt_ops+PARAVIRT_enabled | ||
423 | jne restore_nocheck | ||
424 | #endif | ||
425 | |||
419 | /* If returning to userspace with 16bit stack, | 426 | /* If returning to userspace with 16bit stack, |
420 | * try to fix the higher word of ESP, as the CPU | 427 | * try to fix the higher word of ESP, as the CPU |
421 | * won't restore it. | 428 | * won't restore it. |
@@ -833,6 +840,19 @@ nmi_espfix_stack: | |||
833 | .previous | 840 | .previous |
834 | KPROBE_END(nmi) | 841 | KPROBE_END(nmi) |
835 | 842 | ||
843 | #ifdef CONFIG_PARAVIRT | ||
844 | ENTRY(native_iret) | ||
845 | 1: iret | ||
846 | .section __ex_table,"a" | ||
847 | .align 4 | ||
848 | .long 1b,iret_exc | ||
849 | .previous | ||
850 | |||
851 | ENTRY(native_irq_enable_sysexit) | ||
852 | sti | ||
853 | sysexit | ||
854 | #endif | ||
855 | |||
836 | KPROBE_ENTRY(int3) | 856 | KPROBE_ENTRY(int3) |
837 | RING0_INT_FRAME | 857 | RING0_INT_FRAME |
838 | pushl $-1 # mark this as an int | 858 | pushl $-1 # mark this as an int |
diff --git a/arch/i386/kernel/i8259.c b/arch/i386/kernel/i8259.c index 62996cd17084..c8d45821c788 100644 --- a/arch/i386/kernel/i8259.c +++ b/arch/i386/kernel/i8259.c | |||
@@ -381,7 +381,10 @@ void __init init_ISA_irqs (void) | |||
381 | } | 381 | } |
382 | } | 382 | } |
383 | 383 | ||
384 | void __init init_IRQ(void) | 384 | /* Overridden in paravirt.c */ |
385 | void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ"))); | ||
386 | |||
387 | void __init native_init_IRQ(void) | ||
385 | { | 388 | { |
386 | int i; | 389 | int i; |
387 | 390 | ||
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c new file mode 100644 index 000000000000..478192cd4b90 --- /dev/null +++ b/arch/i386/kernel/paravirt.c | |||
@@ -0,0 +1,404 @@ | |||
1 | /* Paravirtualization interfaces | ||
2 | Copyright (C) 2006 Rusty Russell IBM Corporation | ||
3 | |||
4 | This program is free software; you can redistribute it and/or modify | ||
5 | it under the terms of the GNU General Public License as published by | ||
6 | the Free Software Foundation; either version 2 of the License, or | ||
7 | (at your option) any later version. | ||
8 | |||
9 | This program is distributed in the hope that it will be useful, | ||
10 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | GNU General Public License for more details. | ||
13 | |||
14 | You should have received a copy of the GNU General Public License | ||
15 | along with this program; if not, write to the Free Software | ||
16 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/efi.h> | ||
21 | #include <linux/bcd.h> | ||
22 | |||
23 | #include <asm/bug.h> | ||
24 | #include <asm/paravirt.h> | ||
25 | #include <asm/desc.h> | ||
26 | #include <asm/setup.h> | ||
27 | #include <asm/arch_hooks.h> | ||
28 | #include <asm/time.h> | ||
29 | #include <asm/irq.h> | ||
30 | #include <asm/delay.h> | ||
31 | |||
32 | /* nop stub */ | ||
33 | static void native_nop(void) | ||
34 | { | ||
35 | } | ||
36 | |||
37 | static void __init default_banner(void) | ||
38 | { | ||
39 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | ||
40 | paravirt_ops.name); | ||
41 | } | ||
42 | |||
43 | char *memory_setup(void) | ||
44 | { | ||
45 | return paravirt_ops.memory_setup(); | ||
46 | } | ||
47 | |||
48 | static fastcall unsigned long native_get_debugreg(int regno) | ||
49 | { | ||
50 | unsigned long val = 0; /* Damn you, gcc! */ | ||
51 | |||
52 | switch (regno) { | ||
53 | case 0: | ||
54 | asm("movl %%db0, %0" :"=r" (val)); break; | ||
55 | case 1: | ||
56 | asm("movl %%db1, %0" :"=r" (val)); break; | ||
57 | case 2: | ||
58 | asm("movl %%db2, %0" :"=r" (val)); break; | ||
59 | case 3: | ||
60 | asm("movl %%db3, %0" :"=r" (val)); break; | ||
61 | case 6: | ||
62 | asm("movl %%db6, %0" :"=r" (val)); break; | ||
63 | case 7: | ||
64 | asm("movl %%db7, %0" :"=r" (val)); break; | ||
65 | default: | ||
66 | BUG(); | ||
67 | } | ||
68 | return val; | ||
69 | } | ||
70 | |||
71 | static fastcall void native_set_debugreg(int regno, unsigned long value) | ||
72 | { | ||
73 | switch (regno) { | ||
74 | case 0: | ||
75 | asm("movl %0,%%db0" : /* no output */ :"r" (value)); | ||
76 | break; | ||
77 | case 1: | ||
78 | asm("movl %0,%%db1" : /* no output */ :"r" (value)); | ||
79 | break; | ||
80 | case 2: | ||
81 | asm("movl %0,%%db2" : /* no output */ :"r" (value)); | ||
82 | break; | ||
83 | case 3: | ||
84 | asm("movl %0,%%db3" : /* no output */ :"r" (value)); | ||
85 | break; | ||
86 | case 6: | ||
87 | asm("movl %0,%%db6" : /* no output */ :"r" (value)); | ||
88 | break; | ||
89 | case 7: | ||
90 | asm("movl %0,%%db7" : /* no output */ :"r" (value)); | ||
91 | break; | ||
92 | default: | ||
93 | BUG(); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | void init_IRQ(void) | ||
98 | { | ||
99 | paravirt_ops.init_IRQ(); | ||
100 | } | ||
101 | |||
102 | static fastcall void native_clts(void) | ||
103 | { | ||
104 | asm volatile ("clts"); | ||
105 | } | ||
106 | |||
107 | static fastcall unsigned long native_read_cr0(void) | ||
108 | { | ||
109 | unsigned long val; | ||
110 | asm volatile("movl %%cr0,%0\n\t" :"=r" (val)); | ||
111 | return val; | ||
112 | } | ||
113 | |||
114 | static fastcall void native_write_cr0(unsigned long val) | ||
115 | { | ||
116 | asm volatile("movl %0,%%cr0": :"r" (val)); | ||
117 | } | ||
118 | |||
119 | static fastcall unsigned long native_read_cr2(void) | ||
120 | { | ||
121 | unsigned long val; | ||
122 | asm volatile("movl %%cr2,%0\n\t" :"=r" (val)); | ||
123 | return val; | ||
124 | } | ||
125 | |||
126 | static fastcall void native_write_cr2(unsigned long val) | ||
127 | { | ||
128 | asm volatile("movl %0,%%cr2": :"r" (val)); | ||
129 | } | ||
130 | |||
131 | static fastcall unsigned long native_read_cr3(void) | ||
132 | { | ||
133 | unsigned long val; | ||
134 | asm volatile("movl %%cr3,%0\n\t" :"=r" (val)); | ||
135 | return val; | ||
136 | } | ||
137 | |||
138 | static fastcall void native_write_cr3(unsigned long val) | ||
139 | { | ||
140 | asm volatile("movl %0,%%cr3": :"r" (val)); | ||
141 | } | ||
142 | |||
143 | static fastcall unsigned long native_read_cr4(void) | ||
144 | { | ||
145 | unsigned long val; | ||
146 | asm volatile("movl %%cr4,%0\n\t" :"=r" (val)); | ||
147 | return val; | ||
148 | } | ||
149 | |||
150 | static fastcall unsigned long native_read_cr4_safe(void) | ||
151 | { | ||
152 | unsigned long val; | ||
153 | /* This could fault if %cr4 does not exist */ | ||
154 | asm("1: movl %%cr4, %0 \n" | ||
155 | "2: \n" | ||
156 | ".section __ex_table,\"a\" \n" | ||
157 | ".long 1b,2b \n" | ||
158 | ".previous \n" | ||
159 | : "=r" (val): "0" (0)); | ||
160 | return val; | ||
161 | } | ||
162 | |||
163 | static fastcall void native_write_cr4(unsigned long val) | ||
164 | { | ||
165 | asm volatile("movl %0,%%cr4": :"r" (val)); | ||
166 | } | ||
167 | |||
168 | static fastcall unsigned long native_save_fl(void) | ||
169 | { | ||
170 | unsigned long f; | ||
171 | asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); | ||
172 | return f; | ||
173 | } | ||
174 | |||
175 | static fastcall void native_restore_fl(unsigned long f) | ||
176 | { | ||
177 | asm volatile("pushl %0 ; popfl": /* no output */ | ||
178 | :"g" (f) | ||
179 | :"memory", "cc"); | ||
180 | } | ||
181 | |||
182 | static fastcall void native_irq_disable(void) | ||
183 | { | ||
184 | asm volatile("cli": : :"memory"); | ||
185 | } | ||
186 | |||
187 | static fastcall void native_irq_enable(void) | ||
188 | { | ||
189 | asm volatile("sti": : :"memory"); | ||
190 | } | ||
191 | |||
192 | static fastcall void native_safe_halt(void) | ||
193 | { | ||
194 | asm volatile("sti; hlt": : :"memory"); | ||
195 | } | ||
196 | |||
197 | static fastcall void native_halt(void) | ||
198 | { | ||
199 | asm volatile("hlt": : :"memory"); | ||
200 | } | ||
201 | |||
202 | static fastcall void native_wbinvd(void) | ||
203 | { | ||
204 | asm volatile("wbinvd": : :"memory"); | ||
205 | } | ||
206 | |||
207 | static fastcall unsigned long long native_read_msr(unsigned int msr, int *err) | ||
208 | { | ||
209 | unsigned long long val; | ||
210 | |||
211 | asm volatile("2: rdmsr ; xorl %0,%0\n" | ||
212 | "1:\n\t" | ||
213 | ".section .fixup,\"ax\"\n\t" | ||
214 | "3: movl %3,%0 ; jmp 1b\n\t" | ||
215 | ".previous\n\t" | ||
216 | ".section __ex_table,\"a\"\n" | ||
217 | " .align 4\n\t" | ||
218 | " .long 2b,3b\n\t" | ||
219 | ".previous" | ||
220 | : "=r" (*err), "=A" (val) | ||
221 | : "c" (msr), "i" (-EFAULT)); | ||
222 | |||
223 | return val; | ||
224 | } | ||
225 | |||
226 | static fastcall int native_write_msr(unsigned int msr, unsigned long long val) | ||
227 | { | ||
228 | int err; | ||
229 | asm volatile("2: wrmsr ; xorl %0,%0\n" | ||
230 | "1:\n\t" | ||
231 | ".section .fixup,\"ax\"\n\t" | ||
232 | "3: movl %4,%0 ; jmp 1b\n\t" | ||
233 | ".previous\n\t" | ||
234 | ".section __ex_table,\"a\"\n" | ||
235 | " .align 4\n\t" | ||
236 | " .long 2b,3b\n\t" | ||
237 | ".previous" | ||
238 | : "=a" (err) | ||
239 | : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)), | ||
240 | "i" (-EFAULT)); | ||
241 | return err; | ||
242 | } | ||
243 | |||
244 | static fastcall unsigned long long native_read_tsc(void) | ||
245 | { | ||
246 | unsigned long long val; | ||
247 | asm volatile("rdtsc" : "=A" (val)); | ||
248 | return val; | ||
249 | } | ||
250 | |||
251 | static fastcall unsigned long long native_read_pmc(void) | ||
252 | { | ||
253 | unsigned long long val; | ||
254 | asm volatile("rdpmc" : "=A" (val)); | ||
255 | return val; | ||
256 | } | ||
257 | |||
258 | static fastcall void native_load_tr_desc(void) | ||
259 | { | ||
260 | asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); | ||
261 | } | ||
262 | |||
263 | static fastcall void native_load_gdt(const struct Xgt_desc_struct *dtr) | ||
264 | { | ||
265 | asm volatile("lgdt %0"::"m" (*dtr)); | ||
266 | } | ||
267 | |||
268 | static fastcall void native_load_idt(const struct Xgt_desc_struct *dtr) | ||
269 | { | ||
270 | asm volatile("lidt %0"::"m" (*dtr)); | ||
271 | } | ||
272 | |||
273 | static fastcall void native_store_gdt(struct Xgt_desc_struct *dtr) | ||
274 | { | ||
275 | asm ("sgdt %0":"=m" (*dtr)); | ||
276 | } | ||
277 | |||
278 | static fastcall void native_store_idt(struct Xgt_desc_struct *dtr) | ||
279 | { | ||
280 | asm ("sidt %0":"=m" (*dtr)); | ||
281 | } | ||
282 | |||
283 | static fastcall unsigned long native_store_tr(void) | ||
284 | { | ||
285 | unsigned long tr; | ||
286 | asm ("str %0":"=r" (tr)); | ||
287 | return tr; | ||
288 | } | ||
289 | |||
290 | static fastcall void native_load_tls(struct thread_struct *t, unsigned int cpu) | ||
291 | { | ||
292 | #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i] | ||
293 | C(0); C(1); C(2); | ||
294 | #undef C | ||
295 | } | ||
296 | |||
297 | static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high) | ||
298 | { | ||
299 | u32 *lp = (u32 *)((char *)dt + entry*8); | ||
300 | lp[0] = entry_low; | ||
301 | lp[1] = entry_high; | ||
302 | } | ||
303 | |||
304 | static fastcall void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high) | ||
305 | { | ||
306 | native_write_dt_entry(dt, entrynum, low, high); | ||
307 | } | ||
308 | |||
309 | static fastcall void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high) | ||
310 | { | ||
311 | native_write_dt_entry(dt, entrynum, low, high); | ||
312 | } | ||
313 | |||
314 | static fastcall void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high) | ||
315 | { | ||
316 | native_write_dt_entry(dt, entrynum, low, high); | ||
317 | } | ||
318 | |||
319 | static fastcall void native_load_esp0(struct tss_struct *tss, | ||
320 | struct thread_struct *thread) | ||
321 | { | ||
322 | tss->esp0 = thread->esp0; | ||
323 | |||
324 | /* This can only happen when SEP is enabled, no need to test "SEP"arately */ | ||
325 | if (unlikely(tss->ss1 != thread->sysenter_cs)) { | ||
326 | tss->ss1 = thread->sysenter_cs; | ||
327 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | static fastcall void native_io_delay(void) | ||
332 | { | ||
333 | asm volatile("outb %al,$0x80"); | ||
334 | } | ||
335 | |||
336 | /* These are in entry.S */ | ||
337 | extern fastcall void native_iret(void); | ||
338 | extern fastcall void native_irq_enable_sysexit(void); | ||
339 | |||
340 | static int __init print_banner(void) | ||
341 | { | ||
342 | paravirt_ops.banner(); | ||
343 | return 0; | ||
344 | } | ||
345 | core_initcall(print_banner); | ||
346 | |||
347 | struct paravirt_ops paravirt_ops = { | ||
348 | .name = "bare hardware", | ||
349 | .paravirt_enabled = 0, | ||
350 | .kernel_rpl = 0, | ||
351 | |||
352 | .banner = default_banner, | ||
353 | .arch_setup = native_nop, | ||
354 | .memory_setup = machine_specific_memory_setup, | ||
355 | .get_wallclock = native_get_wallclock, | ||
356 | .set_wallclock = native_set_wallclock, | ||
357 | .time_init = time_init_hook, | ||
358 | .init_IRQ = native_init_IRQ, | ||
359 | |||
360 | .cpuid = native_cpuid, | ||
361 | .get_debugreg = native_get_debugreg, | ||
362 | .set_debugreg = native_set_debugreg, | ||
363 | .clts = native_clts, | ||
364 | .read_cr0 = native_read_cr0, | ||
365 | .write_cr0 = native_write_cr0, | ||
366 | .read_cr2 = native_read_cr2, | ||
367 | .write_cr2 = native_write_cr2, | ||
368 | .read_cr3 = native_read_cr3, | ||
369 | .write_cr3 = native_write_cr3, | ||
370 | .read_cr4 = native_read_cr4, | ||
371 | .read_cr4_safe = native_read_cr4_safe, | ||
372 | .write_cr4 = native_write_cr4, | ||
373 | .save_fl = native_save_fl, | ||
374 | .restore_fl = native_restore_fl, | ||
375 | .irq_disable = native_irq_disable, | ||
376 | .irq_enable = native_irq_enable, | ||
377 | .safe_halt = native_safe_halt, | ||
378 | .halt = native_halt, | ||
379 | .wbinvd = native_wbinvd, | ||
380 | .read_msr = native_read_msr, | ||
381 | .write_msr = native_write_msr, | ||
382 | .read_tsc = native_read_tsc, | ||
383 | .read_pmc = native_read_pmc, | ||
384 | .load_tr_desc = native_load_tr_desc, | ||
385 | .set_ldt = native_set_ldt, | ||
386 | .load_gdt = native_load_gdt, | ||
387 | .load_idt = native_load_idt, | ||
388 | .store_gdt = native_store_gdt, | ||
389 | .store_idt = native_store_idt, | ||
390 | .store_tr = native_store_tr, | ||
391 | .load_tls = native_load_tls, | ||
392 | .write_ldt_entry = native_write_ldt_entry, | ||
393 | .write_gdt_entry = native_write_gdt_entry, | ||
394 | .write_idt_entry = native_write_idt_entry, | ||
395 | .load_esp0 = native_load_esp0, | ||
396 | |||
397 | .set_iopl_mask = native_set_iopl_mask, | ||
398 | .io_delay = native_io_delay, | ||
399 | .const_udelay = __const_udelay, | ||
400 | |||
401 | .irq_enable_sysexit = native_irq_enable_sysexit, | ||
402 | .iret = native_iret, | ||
403 | }; | ||
404 | EXPORT_SYMBOL(paravirt_ops); | ||
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index e5bb87aa5a45..695d53fd14de 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -495,6 +495,12 @@ static void set_mca_bus(int x) | |||
495 | static void set_mca_bus(int x) { } | 495 | static void set_mca_bus(int x) { } |
496 | #endif | 496 | #endif |
497 | 497 | ||
498 | /* Overridden in paravirt.c if CONFIG_PARAVIRT */ | ||
499 | char * __attribute__((weak)) memory_setup(void) | ||
500 | { | ||
501 | return machine_specific_memory_setup(); | ||
502 | } | ||
503 | |||
498 | /* | 504 | /* |
499 | * Determine if we were loaded by an EFI loader. If so, then we have also been | 505 | * Determine if we were loaded by an EFI loader. If so, then we have also been |
500 | * passed the efi memmap, systab, etc., so we should use these data structures | 506 | * passed the efi memmap, systab, etc., so we should use these data structures |
@@ -547,7 +553,7 @@ void __init setup_arch(char **cmdline_p) | |||
547 | efi_init(); | 553 | efi_init(); |
548 | else { | 554 | else { |
549 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); | 555 | printk(KERN_INFO "BIOS-provided physical RAM map:\n"); |
550 | print_memory_map(machine_specific_memory_setup()); | 556 | print_memory_map(memory_setup()); |
551 | } | 557 | } |
552 | 558 | ||
553 | copy_edd(); | 559 | copy_edd(); |
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 095636620fa2..cd7de9c9654b 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -33,6 +33,11 @@ | |||
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. | 33 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ | 34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. */ |
35 | 35 | ||
36 | |||
37 | /* SMP boot always wants to use real time delay to allow sufficient time for | ||
38 | * the APs to come online */ | ||
39 | #define USE_REAL_TIME_DELAY | ||
40 | |||
36 | #include <linux/module.h> | 41 | #include <linux/module.h> |
37 | #include <linux/init.h> | 42 | #include <linux/init.h> |
38 | #include <linux/kernel.h> | 43 | #include <linux/kernel.h> |
diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 78af572fd17c..c505b16c0990 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <asm/uaccess.h> | 56 | #include <asm/uaccess.h> |
57 | #include <asm/processor.h> | 57 | #include <asm/processor.h> |
58 | #include <asm/timer.h> | 58 | #include <asm/timer.h> |
59 | #include <asm/time.h> | ||
59 | 60 | ||
60 | #include "mach_time.h" | 61 | #include "mach_time.h" |
61 | 62 | ||
@@ -116,10 +117,7 @@ static int set_rtc_mmss(unsigned long nowtime) | |||
116 | /* gets recalled with irq locally disabled */ | 117 | /* gets recalled with irq locally disabled */ |
117 | /* XXX - does irqsave resolve this? -johnstul */ | 118 | /* XXX - does irqsave resolve this? -johnstul */ |
118 | spin_lock_irqsave(&rtc_lock, flags); | 119 | spin_lock_irqsave(&rtc_lock, flags); |
119 | if (efi_enabled) | 120 | retval = set_wallclock(nowtime); |
120 | retval = efi_set_rtc_mmss(nowtime); | ||
121 | else | ||
122 | retval = mach_set_rtc_mmss(nowtime); | ||
123 | spin_unlock_irqrestore(&rtc_lock, flags); | 121 | spin_unlock_irqrestore(&rtc_lock, flags); |
124 | 122 | ||
125 | return retval; | 123 | return retval; |
@@ -223,10 +221,7 @@ unsigned long get_cmos_time(void) | |||
223 | 221 | ||
224 | spin_lock_irqsave(&rtc_lock, flags); | 222 | spin_lock_irqsave(&rtc_lock, flags); |
225 | 223 | ||
226 | if (efi_enabled) | 224 | retval = get_wallclock(); |
227 | retval = efi_get_time(); | ||
228 | else | ||
229 | retval = mach_get_cmos_time(); | ||
230 | 225 | ||
231 | spin_unlock_irqrestore(&rtc_lock, flags); | 226 | spin_unlock_irqrestore(&rtc_lock, flags); |
232 | 227 | ||
@@ -370,7 +365,7 @@ static void __init hpet_time_init(void) | |||
370 | printk("Using HPET for base-timer\n"); | 365 | printk("Using HPET for base-timer\n"); |
371 | } | 366 | } |
372 | 367 | ||
373 | time_init_hook(); | 368 | do_time_init(); |
374 | } | 369 | } |
375 | #endif | 370 | #endif |
376 | 371 | ||
@@ -392,5 +387,5 @@ void __init time_init(void) | |||
392 | 387 | ||
393 | do_settimeofday(&ts); | 388 | do_settimeofday(&ts); |
394 | 389 | ||
395 | time_init_hook(); | 390 | do_time_init(); |
396 | } | 391 | } |
diff --git a/arch/i386/power/cpu.c b/arch/i386/power/cpu.c index 5a1abeff033b..2c15500f8713 100644 --- a/arch/i386/power/cpu.c +++ b/arch/i386/power/cpu.c | |||
@@ -26,8 +26,8 @@ void __save_processor_state(struct saved_context *ctxt) | |||
26 | /* | 26 | /* |
27 | * descriptor tables | 27 | * descriptor tables |
28 | */ | 28 | */ |
29 | store_gdt(&ctxt->gdt_limit); | 29 | store_gdt(&ctxt->gdt); |
30 | store_idt(&ctxt->idt_limit); | 30 | store_idt(&ctxt->idt); |
31 | store_tr(ctxt->tr); | 31 | store_tr(ctxt->tr); |
32 | 32 | ||
33 | /* | 33 | /* |
@@ -99,8 +99,8 @@ void __restore_processor_state(struct saved_context *ctxt) | |||
99 | * now restore the descriptor tables to their proper values | 99 | * now restore the descriptor tables to their proper values |
100 | * ltr is done i fix_processor_context(). | 100 | * ltr is done i fix_processor_context(). |
101 | */ | 101 | */ |
102 | load_gdt(&ctxt->gdt_limit); | 102 | load_gdt(&ctxt->gdt); |
103 | load_idt(&ctxt->idt_limit); | 103 | load_idt(&ctxt->idt); |
104 | 104 | ||
105 | /* | 105 | /* |
106 | * segment registers | 106 | * segment registers |