aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/xen/time.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@xensource.com>2007-07-17 21:37:06 -0400
committerJeremy Fitzhardinge <jeremy@goop.org>2007-07-18 11:47:44 -0400
commitf120f13ea0dbb0b0d6675683d5f6faea71277e65 (patch)
tree6b525ab73bedfa78e43dee303ac991099377e9c5 /arch/i386/xen/time.c
parentf87e4cac4f4e940b328d3deb5b53e642e3881f43 (diff)
xen: Add support for preemption
Add Xen support for preemption. This is mostly a cleanup of existing preempt_enable/disable calls, or just comments to explain the current usage. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Diffstat (limited to 'arch/i386/xen/time.c')
-rw-r--r--arch/i386/xen/time.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/arch/i386/xen/time.c b/arch/i386/xen/time.c
index aeb04cf5dbf1..51fdabf1fd4d 100644
--- a/arch/i386/xen/time.c
+++ b/arch/i386/xen/time.c
@@ -88,7 +88,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
88 u64 state_time; 88 u64 state_time;
89 struct vcpu_runstate_info *state; 89 struct vcpu_runstate_info *state;
90 90
91 preempt_disable(); 91 BUG_ON(preemptible());
92 92
93 state = &__get_cpu_var(runstate); 93 state = &__get_cpu_var(runstate);
94 94
@@ -103,8 +103,6 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
103 *res = *state; 103 *res = *state;
104 barrier(); 104 barrier();
105 } while (get64(&state->state_entry_time) != state_time); 105 } while (get64(&state->state_entry_time) != state_time);
106
107 preempt_enable();
108} 106}
109 107
110static void setup_runstate_info(int cpu) 108static void setup_runstate_info(int cpu)
@@ -179,9 +177,19 @@ static void do_stolen_accounting(void)
179unsigned long long xen_sched_clock(void) 177unsigned long long xen_sched_clock(void)
180{ 178{
181 struct vcpu_runstate_info state; 179 struct vcpu_runstate_info state;
182 cycle_t now = xen_clocksource_read(); 180 cycle_t now;
181 u64 ret;
183 s64 offset; 182 s64 offset;
184 183
184 /*
185 * Ideally sched_clock should be called on a per-cpu basis
186 * anyway, so preempt should already be disabled, but that's
187 * not current practice at the moment.
188 */
189 preempt_disable();
190
191 now = xen_clocksource_read();
192
185 get_runstate_snapshot(&state); 193 get_runstate_snapshot(&state);
186 194
187 WARN_ON(state.state != RUNSTATE_running); 195 WARN_ON(state.state != RUNSTATE_running);
@@ -190,9 +198,13 @@ unsigned long long xen_sched_clock(void)
190 if (offset < 0) 198 if (offset < 0)
191 offset = 0; 199 offset = 0;
192 200
193 return state.time[RUNSTATE_blocked] + 201 ret = state.time[RUNSTATE_blocked] +
194 state.time[RUNSTATE_running] + 202 state.time[RUNSTATE_running] +
195 offset; 203 offset;
204
205 preempt_enable();
206
207 return ret;
196} 208}
197 209
198 210