aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/paravirt.c2
-rw-r--r--arch/i386/kernel/tsc.c23
-rw-r--r--arch/i386/kernel/vmi.c2
-rw-r--r--arch/i386/kernel/vmiclock.c6
4 files changed, 20 insertions, 13 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 60e08b9b50a4..53f07a8275e3 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -302,7 +302,7 @@ struct paravirt_ops paravirt_ops = {
302 .write_msr = native_write_msr_safe, 302 .write_msr = native_write_msr_safe,
303 .read_tsc = native_read_tsc, 303 .read_tsc = native_read_tsc,
304 .read_pmc = native_read_pmc, 304 .read_pmc = native_read_pmc,
305 .get_scheduled_cycles = native_read_tsc, 305 .sched_clock = native_sched_clock,
306 .get_cpu_khz = native_calculate_cpu_khz, 306 .get_cpu_khz = native_calculate_cpu_khz,
307 .load_tr_desc = native_load_tr_desc, 307 .load_tr_desc = native_load_tr_desc,
308 .set_ldt = native_set_ldt, 308 .set_ldt = native_set_ldt,
diff --git a/arch/i386/kernel/tsc.c b/arch/i386/kernel/tsc.c
index ea63a30ca3e8..252f9010f283 100644
--- a/arch/i386/kernel/tsc.c
+++ b/arch/i386/kernel/tsc.c
@@ -84,7 +84,7 @@ static inline int check_tsc_unstable(void)
84 * 84 *
85 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 85 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
86 */ 86 */
87static unsigned long cyc2ns_scale __read_mostly; 87unsigned long cyc2ns_scale __read_mostly;
88 88
89#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 89#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
90 90
@@ -93,15 +93,10 @@ static inline void set_cyc2ns_scale(unsigned long cpu_khz)
93 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz; 93 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
94} 94}
95 95
96static inline unsigned long long cycles_2_ns(unsigned long long cyc)
97{
98 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
99}
100
101/* 96/*
102 * Scheduler clock - returns current time in nanosec units. 97 * Scheduler clock - returns current time in nanosec units.
103 */ 98 */
104unsigned long long sched_clock(void) 99unsigned long long native_sched_clock(void)
105{ 100{
106 unsigned long long this_offset; 101 unsigned long long this_offset;
107 102
@@ -118,12 +113,24 @@ unsigned long long sched_clock(void)
118 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ); 113 return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
119 114
120 /* read the Time Stamp Counter: */ 115 /* read the Time Stamp Counter: */
121 get_scheduled_cycles(this_offset); 116 rdtscll(this_offset);
122 117
123 /* return the value in ns */ 118 /* return the value in ns */
124 return cycles_2_ns(this_offset); 119 return cycles_2_ns(this_offset);
125} 120}
126 121
122/* We need to define a real function for sched_clock, to override the
123 weak default version */
124#ifdef CONFIG_PARAVIRT
125unsigned long long sched_clock(void)
126{
127 return paravirt_sched_clock();
128}
129#else
130unsigned long long sched_clock(void)
131 __attribute__((alias("native_sched_clock")));
132#endif
133
127unsigned long native_calculate_cpu_khz(void) 134unsigned long native_calculate_cpu_khz(void)
128{ 135{
129 unsigned long long start, end; 136 unsigned long long start, end;
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index 234bd6ff518d..72042bb7ec94 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -891,7 +891,7 @@ static inline int __init activate_vmi(void)
891 paravirt_ops.setup_boot_clock = vmi_time_bsp_init; 891 paravirt_ops.setup_boot_clock = vmi_time_bsp_init;
892 paravirt_ops.setup_secondary_clock = vmi_time_ap_init; 892 paravirt_ops.setup_secondary_clock = vmi_time_ap_init;
893#endif 893#endif
894 paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles; 894 paravirt_ops.sched_clock = vmi_sched_clock;
895 paravirt_ops.get_cpu_khz = vmi_cpu_khz; 895 paravirt_ops.get_cpu_khz = vmi_cpu_khz;
896 896
897 /* We have true wallclock functions; disable CMOS clock sync */ 897 /* We have true wallclock functions; disable CMOS clock sync */
diff --git a/arch/i386/kernel/vmiclock.c b/arch/i386/kernel/vmiclock.c
index 26a37f8a8762..f9b845f4e692 100644
--- a/arch/i386/kernel/vmiclock.c
+++ b/arch/i386/kernel/vmiclock.c
@@ -64,10 +64,10 @@ int vmi_set_wallclock(unsigned long now)
64 return 0; 64 return 0;
65} 65}
66 66
67/* paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles */ 67/* paravirt_ops.sched_clock = vmi_sched_clock */
68unsigned long long vmi_get_sched_cycles(void) 68unsigned long long vmi_sched_clock(void)
69{ 69{
70 return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE); 70 return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
71} 71}
72 72
73/* paravirt_ops.get_cpu_khz = vmi_cpu_khz */ 73/* paravirt_ops.get_cpu_khz = vmi_cpu_khz */