diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 19:16:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 19:16:27 -0400 |
commit | be15f9d63b97da0065187696962331de6cd9de9e (patch) | |
tree | cc85c72e92afccfdcdfa851c4694a93f4ea22b84 /arch/x86/kernel | |
parent | 595dc54a1da91408a52c4b962f3deeb1109aaca0 (diff) | |
parent | a789ed5fb6d0256c4177c2cc27e06520ddbe4d4c (diff) |
Merge branch 'x86-xen-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-xen-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (42 commits)
xen: cache cr0 value to avoid trap'n'emulate for read_cr0
xen/x86-64: clean up warnings about IST-using traps
xen/x86-64: fix breakpoints and hardware watchpoints
xen: reserve Xen start_info rather than e820 reserving
xen: add FIX_TEXT_POKE to fixmap
lguest: update lazy mmu changes to match lguest's use of kvm hypercalls
xen: honour VCPU availability on boot
xen: add "capabilities" file
xen: drop kexec bits from /sys/hypervisor since kexec isn't implemented yet
xen/sys/hypervisor: change writable_pt to features
xen: add /sys/hypervisor support
xen/xenbus: export xenbus_dev_changed
xen: use device model for suspending xenbus devices
xen: remove suspend_cancel hook
xen/dev-evtchn: clean up locking in evtchn
xen: export ioctl headers to userspace
xen: add /dev/xen/evtchn driver
xen: add irq_from_evtchn
xen: clean up gate trap/interrupt constants
xen: set _PAGE_NX in __supported_pte_mask before pagetable construction
...
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/entry_64.S | 5 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 56 | ||||
-rw-r--r-- | arch/x86/kernel/process_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/process_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 20 |
6 files changed, 47 insertions, 40 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 38946c6e8433..bb01ce080b80 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -1379,6 +1379,11 @@ END(xen_failsafe_callback) | |||
1379 | paranoidzeroentry_ist debug do_debug DEBUG_STACK | 1379 | paranoidzeroentry_ist debug do_debug DEBUG_STACK |
1380 | paranoidzeroentry_ist int3 do_int3 DEBUG_STACK | 1380 | paranoidzeroentry_ist int3 do_int3 DEBUG_STACK |
1381 | paranoiderrorentry stack_segment do_stack_segment | 1381 | paranoiderrorentry stack_segment do_stack_segment |
1382 | #ifdef CONFIG_XEN | ||
1383 | zeroentry xen_debug do_debug | ||
1384 | zeroentry xen_int3 do_int3 | ||
1385 | errorentry xen_stack_segment do_stack_segment | ||
1386 | #endif | ||
1382 | errorentry general_protection do_general_protection | 1387 | errorentry general_protection do_general_protection |
1383 | errorentry page_fault do_page_fault | 1388 | errorentry page_fault do_page_fault |
1384 | #ifdef CONFIG_X86_MCE | 1389 | #ifdef CONFIG_X86_MCE |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 33019ddb56b4..6551dedee20c 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -195,7 +195,7 @@ static void kvm_leave_lazy_mmu(void) | |||
195 | struct kvm_para_state *state = kvm_para_state(); | 195 | struct kvm_para_state *state = kvm_para_state(); |
196 | 196 | ||
197 | mmu_queue_flush(state); | 197 | mmu_queue_flush(state); |
198 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | 198 | paravirt_leave_lazy_mmu(); |
199 | state->mode = paravirt_get_lazy_mode(); | 199 | state->mode = paravirt_get_lazy_mode(); |
200 | } | 200 | } |
201 | 201 | ||
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 9faf43bea336..70ec9b951d76 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c | |||
@@ -248,18 +248,16 @@ static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LA | |||
248 | 248 | ||
249 | static inline void enter_lazy(enum paravirt_lazy_mode mode) | 249 | static inline void enter_lazy(enum paravirt_lazy_mode mode) |
250 | { | 250 | { |
251 | BUG_ON(__get_cpu_var(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); | 251 | BUG_ON(percpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); |
252 | BUG_ON(preemptible()); | ||
253 | 252 | ||
254 | __get_cpu_var(paravirt_lazy_mode) = mode; | 253 | percpu_write(paravirt_lazy_mode, mode); |
255 | } | 254 | } |
256 | 255 | ||
257 | void paravirt_leave_lazy(enum paravirt_lazy_mode mode) | 256 | static void leave_lazy(enum paravirt_lazy_mode mode) |
258 | { | 257 | { |
259 | BUG_ON(__get_cpu_var(paravirt_lazy_mode) != mode); | 258 | BUG_ON(percpu_read(paravirt_lazy_mode) != mode); |
260 | BUG_ON(preemptible()); | ||
261 | 259 | ||
262 | __get_cpu_var(paravirt_lazy_mode) = PARAVIRT_LAZY_NONE; | 260 | percpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); |
263 | } | 261 | } |
264 | 262 | ||
265 | void paravirt_enter_lazy_mmu(void) | 263 | void paravirt_enter_lazy_mmu(void) |
@@ -269,22 +267,36 @@ void paravirt_enter_lazy_mmu(void) | |||
269 | 267 | ||
270 | void paravirt_leave_lazy_mmu(void) | 268 | void paravirt_leave_lazy_mmu(void) |
271 | { | 269 | { |
272 | paravirt_leave_lazy(PARAVIRT_LAZY_MMU); | 270 | leave_lazy(PARAVIRT_LAZY_MMU); |
273 | } | 271 | } |
274 | 272 | ||
275 | void paravirt_enter_lazy_cpu(void) | 273 | void paravirt_start_context_switch(struct task_struct *prev) |
276 | { | 274 | { |
275 | BUG_ON(preemptible()); | ||
276 | |||
277 | if (percpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) { | ||
278 | arch_leave_lazy_mmu_mode(); | ||
279 | set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); | ||
280 | } | ||
277 | enter_lazy(PARAVIRT_LAZY_CPU); | 281 | enter_lazy(PARAVIRT_LAZY_CPU); |
278 | } | 282 | } |
279 | 283 | ||
280 | void paravirt_leave_lazy_cpu(void) | 284 | void paravirt_end_context_switch(struct task_struct *next) |
281 | { | 285 | { |
282 | paravirt_leave_lazy(PARAVIRT_LAZY_CPU); | 286 | BUG_ON(preemptible()); |
287 | |||
288 | leave_lazy(PARAVIRT_LAZY_CPU); | ||
289 | |||
290 | if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) | ||
291 | arch_enter_lazy_mmu_mode(); | ||
283 | } | 292 | } |
284 | 293 | ||
285 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) | 294 | enum paravirt_lazy_mode paravirt_get_lazy_mode(void) |
286 | { | 295 | { |
287 | return __get_cpu_var(paravirt_lazy_mode); | 296 | if (in_interrupt()) |
297 | return PARAVIRT_LAZY_NONE; | ||
298 | |||
299 | return percpu_read(paravirt_lazy_mode); | ||
288 | } | 300 | } |
289 | 301 | ||
290 | void arch_flush_lazy_mmu_mode(void) | 302 | void arch_flush_lazy_mmu_mode(void) |
@@ -292,7 +304,6 @@ void arch_flush_lazy_mmu_mode(void) | |||
292 | preempt_disable(); | 304 | preempt_disable(); |
293 | 305 | ||
294 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | 306 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { |
295 | WARN_ON(preempt_count() == 1); | ||
296 | arch_leave_lazy_mmu_mode(); | 307 | arch_leave_lazy_mmu_mode(); |
297 | arch_enter_lazy_mmu_mode(); | 308 | arch_enter_lazy_mmu_mode(); |
298 | } | 309 | } |
@@ -300,19 +311,6 @@ void arch_flush_lazy_mmu_mode(void) | |||
300 | preempt_enable(); | 311 | preempt_enable(); |
301 | } | 312 | } |
302 | 313 | ||
303 | void arch_flush_lazy_cpu_mode(void) | ||
304 | { | ||
305 | preempt_disable(); | ||
306 | |||
307 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) { | ||
308 | WARN_ON(preempt_count() == 1); | ||
309 | arch_leave_lazy_cpu_mode(); | ||
310 | arch_enter_lazy_cpu_mode(); | ||
311 | } | ||
312 | |||
313 | preempt_enable(); | ||
314 | } | ||
315 | |||
316 | struct pv_info pv_info = { | 314 | struct pv_info pv_info = { |
317 | .name = "bare hardware", | 315 | .name = "bare hardware", |
318 | .paravirt_enabled = 0, | 316 | .paravirt_enabled = 0, |
@@ -404,10 +402,8 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
404 | .set_iopl_mask = native_set_iopl_mask, | 402 | .set_iopl_mask = native_set_iopl_mask, |
405 | .io_delay = native_io_delay, | 403 | .io_delay = native_io_delay, |
406 | 404 | ||
407 | .lazy_mode = { | 405 | .start_context_switch = paravirt_nop, |
408 | .enter = paravirt_nop, | 406 | .end_context_switch = paravirt_nop, |
409 | .leave = paravirt_nop, | ||
410 | }, | ||
411 | }; | 407 | }; |
412 | 408 | ||
413 | struct pv_apic_ops pv_apic_ops = { | 409 | struct pv_apic_ops pv_apic_ops = { |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 56d50b7d71df..c60924b5d123 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -404,7 +404,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
404 | * done before math_state_restore, so the TS bit is up | 404 | * done before math_state_restore, so the TS bit is up |
405 | * to date. | 405 | * to date. |
406 | */ | 406 | */ |
407 | arch_leave_lazy_cpu_mode(); | 407 | arch_end_context_switch(next_p); |
408 | 408 | ||
409 | /* If the task has used fpu the last 5 timeslices, just do a full | 409 | /* If the task has used fpu the last 5 timeslices, just do a full |
410 | * restore of the math state immediately to avoid the trap; the | 410 | * restore of the math state immediately to avoid the trap; the |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9d6b20e6cd80..45f010fb2e20 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
@@ -425,7 +425,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
425 | * done before math_state_restore, so the TS bit is up | 425 | * done before math_state_restore, so the TS bit is up |
426 | * to date. | 426 | * to date. |
427 | */ | 427 | */ |
428 | arch_leave_lazy_cpu_mode(); | 428 | arch_end_context_switch(next_p); |
429 | 429 | ||
430 | /* | 430 | /* |
431 | * Switch FS and GS. | 431 | * Switch FS and GS. |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 95deb9f2211e..b263423fbe2a 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -462,22 +462,28 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip, | |||
462 | } | 462 | } |
463 | #endif | 463 | #endif |
464 | 464 | ||
465 | static void vmi_enter_lazy_cpu(void) | 465 | static void vmi_start_context_switch(struct task_struct *prev) |
466 | { | 466 | { |
467 | paravirt_enter_lazy_cpu(); | 467 | paravirt_start_context_switch(prev); |
468 | vmi_ops.set_lazy_mode(2); | 468 | vmi_ops.set_lazy_mode(2); |
469 | } | 469 | } |
470 | 470 | ||
471 | static void vmi_end_context_switch(struct task_struct *next) | ||
472 | { | ||
473 | vmi_ops.set_lazy_mode(0); | ||
474 | paravirt_end_context_switch(next); | ||
475 | } | ||
476 | |||
471 | static void vmi_enter_lazy_mmu(void) | 477 | static void vmi_enter_lazy_mmu(void) |
472 | { | 478 | { |
473 | paravirt_enter_lazy_mmu(); | 479 | paravirt_enter_lazy_mmu(); |
474 | vmi_ops.set_lazy_mode(1); | 480 | vmi_ops.set_lazy_mode(1); |
475 | } | 481 | } |
476 | 482 | ||
477 | static void vmi_leave_lazy(void) | 483 | static void vmi_leave_lazy_mmu(void) |
478 | { | 484 | { |
479 | paravirt_leave_lazy(paravirt_get_lazy_mode()); | ||
480 | vmi_ops.set_lazy_mode(0); | 485 | vmi_ops.set_lazy_mode(0); |
486 | paravirt_leave_lazy_mmu(); | ||
481 | } | 487 | } |
482 | 488 | ||
483 | static inline int __init check_vmi_rom(struct vrom_header *rom) | 489 | static inline int __init check_vmi_rom(struct vrom_header *rom) |
@@ -711,14 +717,14 @@ static inline int __init activate_vmi(void) | |||
711 | para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); | 717 | para_fill(pv_cpu_ops.set_iopl_mask, SetIOPLMask); |
712 | para_fill(pv_cpu_ops.io_delay, IODelay); | 718 | para_fill(pv_cpu_ops.io_delay, IODelay); |
713 | 719 | ||
714 | para_wrap(pv_cpu_ops.lazy_mode.enter, vmi_enter_lazy_cpu, | 720 | para_wrap(pv_cpu_ops.start_context_switch, vmi_start_context_switch, |
715 | set_lazy_mode, SetLazyMode); | 721 | set_lazy_mode, SetLazyMode); |
716 | para_wrap(pv_cpu_ops.lazy_mode.leave, vmi_leave_lazy, | 722 | para_wrap(pv_cpu_ops.end_context_switch, vmi_end_context_switch, |
717 | set_lazy_mode, SetLazyMode); | 723 | set_lazy_mode, SetLazyMode); |
718 | 724 | ||
719 | para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, | 725 | para_wrap(pv_mmu_ops.lazy_mode.enter, vmi_enter_lazy_mmu, |
720 | set_lazy_mode, SetLazyMode); | 726 | set_lazy_mode, SetLazyMode); |
721 | para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy, | 727 | para_wrap(pv_mmu_ops.lazy_mode.leave, vmi_leave_lazy_mmu, |
722 | set_lazy_mode, SetLazyMode); | 728 | set_lazy_mode, SetLazyMode); |
723 | 729 | ||
724 | /* user and kernel flush are just handled with different flags to FlushTLB */ | 730 | /* user and kernel flush are just handled with different flags to FlushTLB */ |