aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/vmi.c
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2007-03-05 03:30:34 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-05 10:57:52 -0500
commit7507ba34e827ca3c6bbcd34d20a8df8ba365fca6 (patch)
treeafa186f38ab55667074a06ade7ad6a1d1ec84d9e /arch/i386/kernel/vmi.c
parent0dc952dc3e6d96d554a19fa7bee3f3b1d55e3cff (diff)
[PATCH] vmi: timer fixes round two
Critical bugfixes for the VMI-Timer code. 1) Do not setup a one shot alarm if we are keeping the periodic alarm armed. Additionally, since the periodic alarm can be run at a lower rate than HZ, let's fixup the guard to the no-idle-hz mode appropriately. This fixes the bug where the no-idle-hz mode might have a higher interrupt rate than the non-idle case. 2) The interrupt handler can no longer adjust xtime due to nested lock acquisition. Drop this. We don't need to check for wallclock time at every tick, it can be done in userspace instead. 3) Add a bypass to disable noidle operation. This is useful as a last minute workaround, or testing measure. 4) The code to skip the IO_APIC timer testing (no_timer_check) should be conditional on IO_APIC, not SMP, since UP kernels can have this configured in as well. Signed-off-by: Dan Hecht <dhecht@vmware.com> Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/i386/kernel/vmi.c')
-rw-r--r--arch/i386/kernel/vmi.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index bb5a7abf949c..8417f741fac8 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -54,6 +54,7 @@ static int disable_pse;
54static int disable_sep; 54static int disable_sep;
55static int disable_tsc; 55static int disable_tsc;
56static int disable_mtrr; 56static int disable_mtrr;
57static int disable_noidle;
57 58
58/* Cached VMI operations */ 59/* Cached VMI operations */
59struct { 60struct {
@@ -255,7 +256,6 @@ static void vmi_nop(void)
255} 256}
256 257
257/* For NO_IDLE_HZ, we stop the clock when halting the kernel */ 258/* For NO_IDLE_HZ, we stop the clock when halting the kernel */
258#ifdef CONFIG_NO_IDLE_HZ
259static fastcall void vmi_safe_halt(void) 259static fastcall void vmi_safe_halt(void)
260{ 260{
261 int idle = vmi_stop_hz_timer(); 261 int idle = vmi_stop_hz_timer();
@@ -266,7 +266,6 @@ static fastcall void vmi_safe_halt(void)
266 local_irq_enable(); 266 local_irq_enable();
267 } 267 }
268} 268}
269#endif
270 269
271#ifdef CONFIG_DEBUG_PAGE_TYPE 270#ifdef CONFIG_DEBUG_PAGE_TYPE
272 271
@@ -742,12 +741,7 @@ static inline int __init activate_vmi(void)
742 (char *)paravirt_ops.save_fl); 741 (char *)paravirt_ops.save_fl);
743 patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE], 742 patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
744 (char *)paravirt_ops.irq_disable); 743 (char *)paravirt_ops.irq_disable);
745#ifndef CONFIG_NO_IDLE_HZ 744
746 para_fill(safe_halt, Halt);
747#else
748 vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
749 paravirt_ops.safe_halt = vmi_safe_halt;
750#endif
751 para_fill(wbinvd, WBINVD); 745 para_fill(wbinvd, WBINVD);
752 /* paravirt_ops.read_msr = vmi_rdmsr */ 746 /* paravirt_ops.read_msr = vmi_rdmsr */
753 /* paravirt_ops.write_msr = vmi_wrmsr */ 747 /* paravirt_ops.write_msr = vmi_wrmsr */
@@ -881,6 +875,12 @@ static inline int __init activate_vmi(void)
881#endif 875#endif
882 custom_sched_clock = vmi_sched_clock; 876 custom_sched_clock = vmi_sched_clock;
883 } 877 }
878 if (!disable_noidle)
879 para_fill(safe_halt, Halt);
880 else {
881 vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
882 paravirt_ops.safe_halt = vmi_safe_halt;
883 }
884 884
885 /* 885 /*
886 * Alternative instruction rewriting doesn't happen soon enough 886 * Alternative instruction rewriting doesn't happen soon enough
@@ -914,9 +914,11 @@ void __init vmi_init(void)
914 914
915 local_irq_save(flags); 915 local_irq_save(flags);
916 activate_vmi(); 916 activate_vmi();
917#ifdef CONFIG_SMP 917
918#ifdef CONFIG_X86_IO_APIC
918 no_timer_check = 1; 919 no_timer_check = 1;
919#endif 920#endif
921
920 local_irq_restore(flags & X86_EFLAGS_IF); 922 local_irq_restore(flags & X86_EFLAGS_IF);
921} 923}
922 924
@@ -942,7 +944,8 @@ static int __init parse_vmi(char *arg)
942 } else if (!strcmp(arg, "disable_mtrr")) { 944 } else if (!strcmp(arg, "disable_mtrr")) {
943 clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability); 945 clear_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability);
944 disable_mtrr = 1; 946 disable_mtrr = 1;
945 } 947 } else if (!strcmp(arg, "disable_noidle"))
948 disable_noidle = 1;
946 return 0; 949 return 0;
947} 950}
948 951