diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-07-17 21:37:04 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-07-18 11:47:42 -0400 |
commit | 688340ea34c61ad12473ccd837325b59aada9a93 (patch) | |
tree | 2862f4dca8d47fc4e6ecfaba2243d813344e3cd2 /arch/i386/kernel | |
parent | d572929cdd12a60732c3522f7cf011bfa29165cf (diff) |
Add a sched_clock paravirt_op
The tsc-based get_scheduled_cycles interface is not a good match for
Xen's runstate accounting, which reports everything in nanoseconds.
This patch replaces this interface with a sched_clock interface, which
matches both Xen and VMI's requirements.
In order to do this, we:
1. replace get_scheduled_cycles with sched_clock
2. hoist cycles_2_ns into a common header
3. update vmi accordingly
One thing to note: because sched_clock is implemented as a weak
function in kernel/sched.c, we must define a real function in order to
override this weak binding. This means the usual paravirt_ops
technique of using an inline function won't work in this case.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Zachary Amsden <zach@vmware.com>
Cc: Dan Hecht <dhecht@vmware.com>
Cc: john stultz <johnstul@us.ibm.com>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r-- | arch/i386/kernel/paravirt.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/tsc.c | 23 | ||||
-rw-r--r-- | arch/i386/kernel/vmi.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/vmiclock.c | 6 |
4 files changed, 20 insertions, 13 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index 60e08b9b50a4..53f07a8275e3 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c | |||
@@ -302,7 +302,7 @@ struct paravirt_ops paravirt_ops = { | |||
302 | .write_msr = native_write_msr_safe, | 302 | .write_msr = native_write_msr_safe, |
303 | .read_tsc = native_read_tsc, | 303 | .read_tsc = native_read_tsc, |
304 | .read_pmc = native_read_pmc, | 304 | .read_pmc = native_read_pmc, |
305 | .get_scheduled_cycles = native_read_tsc, | 305 | .sched_clock = native_sched_clock, |
306 | .get_cpu_khz = native_calculate_cpu_khz, | 306 | .get_cpu_khz = native_calculate_cpu_khz, |
307 | .load_tr_desc = native_load_tr_desc, | 307 | .load_tr_desc = native_load_tr_desc, |
308 | .set_ldt = native_set_ldt, | 308 | .set_ldt = native_set_ldt, |
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c index ea63a30ca3e8..252f9010f283 100644 --- a/arch/i386/kernel/tsc.c +++ b/arch/i386/kernel/tsc.c | |||
@@ -84,7 +84,7 @@ static inline int check_tsc_unstable(void) | |||
84 | * | 84 | * |
85 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | 85 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" |
86 | */ | 86 | */ |
87 | static unsigned long cyc2ns_scale __read_mostly; | 87 | unsigned long cyc2ns_scale __read_mostly; |
88 | 88 | ||
89 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | 89 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ |
90 | 90 | ||
@@ -93,15 +93,10 @@ static inline void set_cyc2ns_scale(unsigned long cpu_khz) | |||
93 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; | 93 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; |
94 | } | 94 | } |
95 | 95 | ||
96 | static inline unsigned long long cycles_2_ns(unsigned long long cyc) | ||
97 | { | ||
98 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | ||
99 | } | ||
100 | |||
101 | /* | 96 | /* |
102 | * Scheduler clock - returns current time in nanosec units. | 97 | * Scheduler clock - returns current time in nanosec units. |
103 | */ | 98 | */ |
104 | unsigned long long sched_clock(void) | 99 | unsigned long long native_sched_clock(void) |
105 | { | 100 | { |
106 | unsigned long long this_offset; | 101 | unsigned long long this_offset; |
107 | 102 | ||
@@ -118,12 +113,24 @@ unsigned long long sched_clock(void) | |||
118 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); | 113 | return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); |
119 | 114 | ||
120 | /* read the Time Stamp Counter: */ | 115 | /* read the Time Stamp Counter: */ |
121 | get_scheduled_cycles(this_offset); | 116 | rdtscll(this_offset); |
122 | 117 | ||
123 | /* return the value in ns */ | 118 | /* return the value in ns */ |
124 | return cycles_2_ns(this_offset); | 119 | return cycles_2_ns(this_offset); |
125 | } | 120 | } |
126 | 121 | ||
122 | /* We need to define a real function for sched_clock, to override the | ||
123 | weak default version */ | ||
124 | #ifdef CONFIG_PARAVIRT | ||
125 | unsigned long long sched_clock(void) | ||
126 | { | ||
127 | return paravirt_sched_clock(); | ||
128 | } | ||
129 | #else | ||
130 | unsigned long long sched_clock(void) | ||
131 | __attribute__((alias("native_sched_clock"))); | ||
132 | #endif | ||
133 | |||
127 | unsigned long native_calculate_cpu_khz(void) | 134 | unsigned long native_calculate_cpu_khz(void) |
128 | { | 135 | { |
129 | unsigned long long start, end; | 136 | unsigned long long start, end; |
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index 234bd6ff518d..72042bb7ec94 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c | |||
@@ -891,7 +891,7 @@ static inline int __init activate_vmi(void) | |||
891 | paravirt_ops.setup_boot_clock = vmi_time_bsp_init; | 891 | paravirt_ops.setup_boot_clock = vmi_time_bsp_init; |
892 | paravirt_ops.setup_secondary_clock = vmi_time_ap_init; | 892 | paravirt_ops.setup_secondary_clock = vmi_time_ap_init; |
893 | #endif | 893 | #endif |
894 | paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles; | 894 | paravirt_ops.sched_clock = vmi_sched_clock; |
895 | paravirt_ops.get_cpu_khz = vmi_cpu_khz; | 895 | paravirt_ops.get_cpu_khz = vmi_cpu_khz; |
896 | 896 | ||
897 | /* We have true wallclock functions; disable CMOS clock sync */ | 897 | /* We have true wallclock functions; disable CMOS clock sync */ |
diff --git a/arch/i386/kernel/vmiclock.c b/arch/i386/kernel/vmiclock.c index 26a37f8a8762..f9b845f4e692 100644 --- a/arch/i386/kernel/vmiclock.c +++ b/arch/i386/kernel/vmiclock.c | |||
@@ -64,10 +64,10 @@ int vmi_set_wallclock(unsigned long now) | |||
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
66 | 66 | ||
67 | /* paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles */ | 67 | /* paravirt_ops.sched_clock = vmi_sched_clock */ |
68 | unsigned long long vmi_get_sched_cycles(void) | 68 | unsigned long long vmi_sched_clock(void) |
69 | { | 69 | { |
70 | return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE); | 70 | return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE)); |
71 | } | 71 | } |
72 | 72 | ||
73 | /* paravirt_ops.get_cpu_khz = vmi_cpu_khz */ | 73 | /* paravirt_ops.get_cpu_khz = vmi_cpu_khz */ |