aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/vmi.c
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2007-02-13 07:26:21 -0500
committerAndi Kleen <andi@basil.nowhere.org>2007-02-13 07:26:21 -0500
commitbbab4f3bb7f528d2b8ccb5de9ae5f6ff3fb29684 (patch)
tree141d035b9d79711e6679fadc31c9583f908dfedb /arch/i386/kernel/vmi.c
parent7ce0bcfd1667736f1293cff845139bbee53186de (diff)
[PATCH] i386: vMI timer patches
VMI timer code. It works by taking over the local APIC clock when APIC is configured, which requires a couple hooks into the APIC code. The backend timer code could be commonized into the timer infrastructure, but there are some pieces missing (stolen time, in particular), and the exact semantics of when to do accounting for NO_IDLE need to be shared between different hypervisors as well. So for now, VMI timer is a separate module. [Adrian Bunk: cleanups] Subject: VMI timer patches Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andi Kleen <ak@suse.de> Cc: Andi Kleen <ak@suse.de> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Chris Wright <chrisw@sous-sol.org> Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'arch/i386/kernel/vmi.c')
-rw-r--r--arch/i386/kernel/vmi.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index a94d64b10f75..bb5a7abf949c 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -34,6 +34,7 @@
34#include <asm/apic.h> 34#include <asm/apic.h>
35#include <asm/processor.h> 35#include <asm/processor.h>
36#include <asm/timer.h> 36#include <asm/timer.h>
37#include <asm/vmi_time.h>
37 38
38/* Convenient for calling VMI functions indirectly in the ROM */ 39/* Convenient for calling VMI functions indirectly in the ROM */
39typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void); 40typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
@@ -67,6 +68,7 @@ struct {
67 void (*set_linear_mapping)(int, u32, u32, u32); 68 void (*set_linear_mapping)(int, u32, u32, u32);
68 void (*flush_tlb)(int); 69 void (*flush_tlb)(int);
69 void (*set_initial_ap_state)(int, int); 70 void (*set_initial_ap_state)(int, int);
71 void (*halt)(void);
70} vmi_ops; 72} vmi_ops;
71 73
72/* XXX move this to alternative.h */ 74/* XXX move this to alternative.h */
@@ -252,6 +254,19 @@ static void vmi_nop(void)
252{ 254{
253} 255}
254 256
257/* For NO_IDLE_HZ, we stop the clock when halting the kernel */
258#ifdef CONFIG_NO_IDLE_HZ
259static fastcall void vmi_safe_halt(void)
260{
261 int idle = vmi_stop_hz_timer();
262 vmi_ops.halt();
263 if (idle) {
264 local_irq_disable();
265 vmi_account_time_restart_hz_timer();
266 local_irq_enable();
267 }
268}
269#endif
255 270
256#ifdef CONFIG_DEBUG_PAGE_TYPE 271#ifdef CONFIG_DEBUG_PAGE_TYPE
257 272
@@ -727,7 +742,12 @@ static inline int __init activate_vmi(void)
727 (char *)paravirt_ops.save_fl); 742 (char *)paravirt_ops.save_fl);
728 patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE], 743 patch_offset(&irq_save_disable_callout[IRQ_PATCH_DISABLE],
729 (char *)paravirt_ops.irq_disable); 744 (char *)paravirt_ops.irq_disable);
745#ifndef CONFIG_NO_IDLE_HZ
730 para_fill(safe_halt, Halt); 746 para_fill(safe_halt, Halt);
747#else
748 vmi_ops.halt = vmi_get_function(VMI_CALL_Halt);
749 paravirt_ops.safe_halt = vmi_safe_halt;
750#endif
731 para_fill(wbinvd, WBINVD); 751 para_fill(wbinvd, WBINVD);
732 /* paravirt_ops.read_msr = vmi_rdmsr */ 752 /* paravirt_ops.read_msr = vmi_rdmsr */
733 /* paravirt_ops.write_msr = vmi_wrmsr */ 753 /* paravirt_ops.write_msr = vmi_wrmsr */
@@ -838,6 +858,31 @@ static inline int __init activate_vmi(void)
838#endif 858#endif
839 859
840 /* 860 /*
861 * Check for VMI timer functionality by probing for a cycle frequency method
862 */
863 reloc = call_vrom_long_func(vmi_rom, get_reloc, VMI_CALL_GetCycleFrequency);
864 if (rel->type != VMI_RELOCATION_NONE) {
865 vmi_timer_ops.get_cycle_frequency = (void *)rel->eip;
866 vmi_timer_ops.get_cycle_counter =
867 vmi_get_function(VMI_CALL_GetCycleCounter);
868 vmi_timer_ops.get_wallclock =
869 vmi_get_function(VMI_CALL_GetWallclockTime);
870 vmi_timer_ops.wallclock_updated =
871 vmi_get_function(VMI_CALL_WallclockUpdated);
872 vmi_timer_ops.set_alarm = vmi_get_function(VMI_CALL_SetAlarm);
873 vmi_timer_ops.cancel_alarm =
874 vmi_get_function(VMI_CALL_CancelAlarm);
875 paravirt_ops.time_init = vmi_time_init;
876 paravirt_ops.get_wallclock = vmi_get_wallclock;
877 paravirt_ops.set_wallclock = vmi_set_wallclock;
878#ifdef CONFIG_X86_LOCAL_APIC
879 paravirt_ops.setup_boot_clock = vmi_timer_setup_boot_alarm;
880 paravirt_ops.setup_secondary_clock = vmi_timer_setup_secondary_alarm;
881#endif
882 custom_sched_clock = vmi_sched_clock;
883 }
884
885 /*
841 * Alternative instruction rewriting doesn't happen soon enough 886 * Alternative instruction rewriting doesn't happen soon enough
842 * to convert VMI_IRET to a call instead of a jump; so we have 887 * to convert VMI_IRET to a call instead of a jump; so we have
843 * to do this before IRQs get reenabled. Fortunately, it is 888 * to do this before IRQs get reenabled. Fortunately, it is