diff options
| author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-07-12 14:49:59 -0400 |
|---|---|---|
| committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-08-04 17:47:29 -0400 |
| commit | 8a22b9996b001c88f2bfb54c6de6a05fc39e177a (patch) | |
| tree | faf40add399c0652c7d2b2cdf0f01d805a91e207 | |
| parent | 31de189f7d02da163f77d46a86d9e655a2d83124 (diff) | |
xen: drop xen_sched_clock in favour of using plain wallclock time
xen_sched_clock only counts unstolen time. In principle this should
be useful to the Linux scheduler so that it knows how much time a process
actually consumed. But in practice this doesn't work very well as the
scheduler expects the sched_clock time to be synchronized between
cpus. It also uses sched_clock to measure the time a task spends
sleeping, in which case "unstolen time" isn't meaningful.
So just use plain xen_clocksource_read to return wallclock nanoseconds
for sched_clock.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
| -rw-r--r-- | arch/x86/xen/enlighten.c | 2 | ||||
| -rw-r--r-- | arch/x86/xen/time.c | 39 |
2 files changed, 1 insertions, 40 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 399bed2de881..fef034a04c24 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -926,7 +926,7 @@ static const struct pv_init_ops xen_init_ops __initdata = { | |||
| 926 | }; | 926 | }; |
| 927 | 927 | ||
| 928 | static const struct pv_time_ops xen_time_ops __initdata = { | 928 | static const struct pv_time_ops xen_time_ops __initdata = { |
| 929 | .sched_clock = xen_sched_clock, | 929 | .sched_clock = xen_clocksource_read, |
| 930 | }; | 930 | }; |
| 931 | 931 | ||
| 932 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { | 932 | static const struct pv_cpu_ops xen_cpu_ops __initdata = { |
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 32764b8880b5..e90360ff4a08 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
| @@ -155,45 +155,6 @@ static void do_stolen_accounting(void) | |||
| 155 | account_idle_ticks(ticks); | 155 | account_idle_ticks(ticks); |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | /* | ||
| 159 | * Xen sched_clock implementation. Returns the number of unstolen | ||
| 160 | * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED | ||
| 161 | * states. | ||
| 162 | */ | ||
| 163 | unsigned long long xen_sched_clock(void) | ||
| 164 | { | ||
| 165 | struct vcpu_runstate_info state; | ||
| 166 | cycle_t now; | ||
| 167 | u64 ret; | ||
| 168 | s64 offset; | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Ideally sched_clock should be called on a per-cpu basis | ||
| 172 | * anyway, so preempt should already be disabled, but that's | ||
| 173 | * not current practice at the moment. | ||
| 174 | */ | ||
| 175 | preempt_disable(); | ||
| 176 | |||
| 177 | now = xen_clocksource_read(); | ||
| 178 | |||
| 179 | get_runstate_snapshot(&state); | ||
| 180 | |||
| 181 | WARN_ON(state.state != RUNSTATE_running); | ||
| 182 | |||
| 183 | offset = now - state.state_entry_time; | ||
| 184 | if (offset < 0) | ||
| 185 | offset = 0; | ||
| 186 | |||
| 187 | ret = state.time[RUNSTATE_blocked] + | ||
| 188 | state.time[RUNSTATE_running] + | ||
| 189 | offset; | ||
| 190 | |||
| 191 | preempt_enable(); | ||
| 192 | |||
| 193 | return ret; | ||
| 194 | } | ||
| 195 | |||
| 196 | |||
| 197 | /* Get the TSC speed from Xen */ | 158 | /* Get the TSC speed from Xen */ |
| 198 | unsigned long xen_tsc_khz(void) | 159 | unsigned long xen_tsc_khz(void) |
| 199 | { | 160 | { |
