aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/vtime.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2012-07-20 05:15:08 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-07-20 05:15:08 -0400
commit27f6b416626a240e1b46f646d2e0c5266f4eac95 (patch)
tree4549855d7996ce9d18e1586e2f0bfb5fa5835718 /arch/s390/kernel/vtime.c
parent921486b92bcb1b82ab6668dcbb36d05604966351 (diff)
s390/vtimer: rework virtual timer interface
The current virtual timer interface is inherently per-cpu and hard to use. The sole user of the interface is appldata which uses it to execute a function after a specific amount of cputime has been used over all cpus. Rework the virtual timer interface to hook into the cputime accounting. This makes the interface independent from the CPU timer interrupts, and makes the virtual timers global as opposed to per-cpu. Overall the code is greatly simplified. The downside is that the accuracy is not as good as the original implementation, but it is still good enough for appldata. Reviewed-by: Jan Glauber <jang@linux.vnet.ibm.com> Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/vtime.c')
-rw-r--r--arch/s390/kernel/vtime.c370
1 files changed, 131 insertions, 239 deletions
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 39ebff506946..4fc97b40a6e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -1,71 +1,82 @@
1/* 1/*
2 * arch/s390/kernel/vtime.c
3 * Virtual cpu timer based timer functions. 2 * Virtual cpu timer based timer functions.
4 * 3 *
5 * S390 version 4 * Copyright IBM Corp. 2004, 2012
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
8 */ 6 */
9 7
10#include <linux/module.h> 8#include <linux/kernel_stat.h>
9#include <linux/notifier.h>
10#include <linux/kprobes.h>
11#include <linux/export.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/time.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/smp.h>
16#include <linux/types.h>
17#include <linux/timex.h> 13#include <linux/timex.h>
18#include <linux/notifier.h> 14#include <linux/types.h>
19#include <linux/kernel_stat.h> 15#include <linux/time.h>
20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h>
22#include <linux/cpu.h> 16#include <linux/cpu.h>
23#include <linux/kprobes.h> 17#include <linux/smp.h>
24 18
25#include <asm/timer.h>
26#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
27#include <asm/cputime.h> 20#include <asm/cputime.h>
21#include <asm/vtimer.h>
28#include <asm/irq.h> 22#include <asm/irq.h>
29#include "entry.h" 23#include "entry.h"
30 24
31static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 25static void virt_timer_expire(void);
32 26
33DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 27DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
34 28
35static inline __u64 get_vtimer(void) 29static LIST_HEAD(virt_timer_list);
30static DEFINE_SPINLOCK(virt_timer_lock);
31static atomic64_t virt_timer_current;
32static atomic64_t virt_timer_elapsed;
33
34static inline u64 get_vtimer(void)
36{ 35{
37 __u64 timer; 36 u64 timer;
38 37
39 asm volatile("STPT %0" : "=m" (timer)); 38 asm volatile("stpt %0" : "=m" (timer));
40 return timer; 39 return timer;
41} 40}
42 41
43static inline void set_vtimer(__u64 expires) 42static inline void set_vtimer(u64 expires)
44{ 43{
45 __u64 timer; 44 u64 timer;
46 45
47 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 46 asm volatile(
48 " SPT %1" /* Set new value immediately afterwards */ 47 " stpt %0\n" /* Store current cpu timer value */
49 : "=m" (timer) : "m" (expires) ); 48 " spt %1" /* Set new value imm. afterwards */
49 : "=m" (timer) : "m" (expires));
50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
51 S390_lowcore.last_update_timer = expires; 51 S390_lowcore.last_update_timer = expires;
52} 52}
53 53
54static inline int virt_timer_forward(u64 elapsed)
55{
56 BUG_ON(!irqs_disabled());
57
58 if (list_empty(&virt_timer_list))
59 return 0;
60 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
61 return elapsed >= atomic64_read(&virt_timer_current);
62}
63
54/* 64/*
55 * Update process times based on virtual cpu times stored by entry.S 65 * Update process times based on virtual cpu times stored by entry.S
56 * to the lowcore fields user_timer, system_timer & steal_clock. 66 * to the lowcore fields user_timer, system_timer & steal_clock.
57 */ 67 */
58static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) 68static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
59{ 69{
60 struct thread_info *ti = task_thread_info(tsk); 70 struct thread_info *ti = task_thread_info(tsk);
61 __u64 timer, clock, user, system, steal; 71 u64 timer, clock, user, system, steal;
62 72
63 timer = S390_lowcore.last_update_timer; 73 timer = S390_lowcore.last_update_timer;
64 clock = S390_lowcore.last_update_clock; 74 clock = S390_lowcore.last_update_clock;
65 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 75 asm volatile(
66 " STCK %1" /* Store current tod clock value */ 76 " stpt %0\n" /* Store current cpu timer value */
67 : "=m" (S390_lowcore.last_update_timer), 77 " stck %1" /* Store current tod clock value */
68 "=m" (S390_lowcore.last_update_clock) ); 78 : "=m" (S390_lowcore.last_update_timer),
79 "=m" (S390_lowcore.last_update_clock));
69 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 80 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
70 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 81 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
71 82
@@ -84,6 +95,8 @@ static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
84 S390_lowcore.steal_timer = 0; 95 S390_lowcore.steal_timer = 0;
85 account_steal_time(steal); 96 account_steal_time(steal);
86 } 97 }
98
99 return virt_timer_forward(user + system);
87} 100}
88 101
89void account_vtime(struct task_struct *prev, struct task_struct *next) 102void account_vtime(struct task_struct *prev, struct task_struct *next)
@@ -101,7 +114,8 @@ void account_vtime(struct task_struct *prev, struct task_struct *next)
101 114
102void account_process_tick(struct task_struct *tsk, int user_tick) 115void account_process_tick(struct task_struct *tsk, int user_tick)
103{ 116{
104 do_account_vtime(tsk, HARDIRQ_OFFSET); 117 if (do_account_vtime(tsk, HARDIRQ_OFFSET))
118 virt_timer_expire();
105} 119}
106 120
107/* 121/*
@@ -111,7 +125,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
111void account_system_vtime(struct task_struct *tsk) 125void account_system_vtime(struct task_struct *tsk)
112{ 126{
113 struct thread_info *ti = task_thread_info(tsk); 127 struct thread_info *ti = task_thread_info(tsk);
114 __u64 timer, system; 128 u64 timer, system;
115 129
116 timer = S390_lowcore.last_update_timer; 130 timer = S390_lowcore.last_update_timer;
117 S390_lowcore.last_update_timer = get_vtimer(); 131 S390_lowcore.last_update_timer = get_vtimer();
@@ -121,13 +135,14 @@ void account_system_vtime(struct task_struct *tsk)
121 S390_lowcore.steal_timer -= system; 135 S390_lowcore.steal_timer -= system;
122 ti->system_timer = S390_lowcore.system_timer; 136 ti->system_timer = S390_lowcore.system_timer;
123 account_system_time(tsk, 0, system, system); 137 account_system_time(tsk, 0, system, system);
138
139 virt_timer_forward(system);
124} 140}
125EXPORT_SYMBOL_GPL(account_system_vtime); 141EXPORT_SYMBOL_GPL(account_system_vtime);
126 142
127void __kprobes vtime_stop_cpu(void) 143void __kprobes vtime_stop_cpu(void)
128{ 144{
129 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 145 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
130 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
131 unsigned long long idle_time; 146 unsigned long long idle_time;
132 unsigned long psw_mask; 147 unsigned long psw_mask;
133 148
@@ -141,7 +156,7 @@ void __kprobes vtime_stop_cpu(void)
141 idle->nohz_delay = 0; 156 idle->nohz_delay = 0;
142 157
143 /* Call the assembler magic in entry.S */ 158 /* Call the assembler magic in entry.S */
144 psw_idle(idle, vq, psw_mask, !list_empty(&vq->list)); 159 psw_idle(idle, psw_mask);
145 160
146 /* Reenable preemption tracer. */ 161 /* Reenable preemption tracer. */
147 start_critical_timings(); 162 start_critical_timings();
@@ -149,9 +164,9 @@ void __kprobes vtime_stop_cpu(void)
149 /* Account time spent with enabled wait psw loaded as idle time. */ 164 /* Account time spent with enabled wait psw loaded as idle time. */
150 idle->sequence++; 165 idle->sequence++;
151 smp_wmb(); 166 smp_wmb();
152 idle_time = idle->idle_exit - idle->idle_enter; 167 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
168 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
153 idle->idle_time += idle_time; 169 idle->idle_time += idle_time;
154 idle->idle_enter = idle->idle_exit = 0ULL;
155 idle->idle_count++; 170 idle->idle_count++;
156 account_idle_time(idle_time); 171 account_idle_time(idle_time);
157 smp_wmb(); 172 smp_wmb();
@@ -167,10 +182,10 @@ cputime64_t s390_get_idle_time(int cpu)
167 do { 182 do {
168 now = get_clock(); 183 now = get_clock();
169 sequence = ACCESS_ONCE(idle->sequence); 184 sequence = ACCESS_ONCE(idle->sequence);
170 idle_enter = ACCESS_ONCE(idle->idle_enter); 185 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
171 idle_exit = ACCESS_ONCE(idle->idle_exit); 186 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
172 } while ((sequence & 1) || (idle->sequence != sequence)); 187 } while ((sequence & 1) || (idle->sequence != sequence));
173 return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 188 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
174} 189}
175 190
176/* 191/*
@@ -179,11 +194,11 @@ cputime64_t s390_get_idle_time(int cpu)
179 */ 194 */
180static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 195static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
181{ 196{
182 struct vtimer_list *event; 197 struct vtimer_list *tmp;
183 198
184 list_for_each_entry(event, head, entry) { 199 list_for_each_entry(tmp, head, entry) {
185 if (event->expires > timer->expires) { 200 if (tmp->expires > timer->expires) {
186 list_add_tail(&timer->entry, &event->entry); 201 list_add_tail(&timer->entry, &tmp->entry);
187 return; 202 return;
188 } 203 }
189 } 204 }
@@ -191,82 +206,45 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
191} 206}
192 207
193/* 208/*
194 * Do the callback functions of expired vtimer events. 209 * Handler for expired virtual CPU timer.
195 * Called from within the interrupt handler.
196 */
197static void do_callbacks(struct list_head *cb_list)
198{
199 struct vtimer_queue *vq;
200 struct vtimer_list *event, *tmp;
201
202 if (list_empty(cb_list))
203 return;
204
205 vq = &__get_cpu_var(virt_cpu_timer);
206
207 list_for_each_entry_safe(event, tmp, cb_list, entry) {
208 list_del_init(&event->entry);
209 (event->function)(event->data);
210 if (event->interval) {
211 /* Recharge interval timer */
212 event->expires = event->interval + vq->elapsed;
213 spin_lock(&vq->lock);
214 list_add_sorted(event, &vq->list);
215 spin_unlock(&vq->lock);
216 }
217 }
218}
219
220/*
221 * Handler for the virtual CPU timer.
222 */ 210 */
223static void do_cpu_timer_interrupt(struct ext_code ext_code, 211static void virt_timer_expire(void)
224 unsigned int param32, unsigned long param64)
225{ 212{
226 struct vtimer_queue *vq; 213 struct vtimer_list *timer, *tmp;
227 struct vtimer_list *event, *tmp; 214 unsigned long elapsed;
228 struct list_head cb_list; /* the callback queue */ 215 LIST_HEAD(cb_list);
229 __u64 elapsed, next; 216
230 217 /* walk timer list, fire all expired timers */
231 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++; 218 spin_lock(&virt_timer_lock);
232 INIT_LIST_HEAD(&cb_list); 219 elapsed = atomic64_read(&virt_timer_elapsed);
233 vq = &__get_cpu_var(virt_cpu_timer); 220 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
234 221 if (timer->expires < elapsed)
235 /* walk timer list, fire all expired events */
236 spin_lock(&vq->lock);
237
238 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
239 BUG_ON((s64) elapsed < 0);
240 vq->elapsed = 0;
241 list_for_each_entry_safe(event, tmp, &vq->list, entry) {
242 if (event->expires < elapsed)
243 /* move expired timer to the callback queue */ 222 /* move expired timer to the callback queue */
244 list_move_tail(&event->entry, &cb_list); 223 list_move_tail(&timer->entry, &cb_list);
245 else 224 else
246 event->expires -= elapsed; 225 timer->expires -= elapsed;
247 } 226 }
248 spin_unlock(&vq->lock); 227 if (!list_empty(&virt_timer_list)) {
249 228 timer = list_first_entry(&virt_timer_list,
250 do_callbacks(&cb_list); 229 struct vtimer_list, entry);
251 230 atomic64_set(&virt_timer_current, timer->expires);
252 /* next event is first in list */ 231 }
253 next = VTIMER_MAX_SLICE; 232 atomic64_sub(elapsed, &virt_timer_elapsed);
254 spin_lock(&vq->lock); 233 spin_unlock(&virt_timer_lock);
255 if (!list_empty(&vq->list)) { 234
256 event = list_first_entry(&vq->list, struct vtimer_list, entry); 235 /* Do callbacks and recharge periodic timers */
257 next = event->expires; 236 list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
237 list_del_init(&timer->entry);
238 timer->function(timer->data);
239 if (timer->interval) {
240 /* Recharge interval timer */
241 timer->expires = timer->interval +
242 atomic64_read(&virt_timer_elapsed);
243 spin_lock(&virt_timer_lock);
244 list_add_sorted(timer, &virt_timer_list);
245 spin_unlock(&virt_timer_lock);
246 }
258 } 247 }
259 spin_unlock(&vq->lock);
260 /*
261 * To improve precision add the time spent by the
262 * interrupt handler to the elapsed time.
263 * Note: CPU timer counts down and we got an interrupt,
264 * the current content is negative
265 */
266 elapsed = S390_lowcore.async_enter_timer - get_vtimer();
267 set_vtimer(next - elapsed);
268 vq->timer = next - elapsed;
269 vq->elapsed = elapsed;
270} 248}
271 249
272void init_virt_timer(struct vtimer_list *timer) 250void init_virt_timer(struct vtimer_list *timer)
@@ -278,179 +256,108 @@ EXPORT_SYMBOL(init_virt_timer);
278 256
279static inline int vtimer_pending(struct vtimer_list *timer) 257static inline int vtimer_pending(struct vtimer_list *timer)
280{ 258{
281 return (!list_empty(&timer->entry)); 259 return !list_empty(&timer->entry);
282} 260}
283 261
284/*
285 * this function should only run on the specified CPU
286 */
287static void internal_add_vtimer(struct vtimer_list *timer) 262static void internal_add_vtimer(struct vtimer_list *timer)
288{ 263{
289 struct vtimer_queue *vq; 264 if (list_empty(&virt_timer_list)) {
290 unsigned long flags; 265 /* First timer, just program it. */
291 __u64 left, expires; 266 atomic64_set(&virt_timer_current, timer->expires);
292 267 atomic64_set(&virt_timer_elapsed, 0);
293 vq = &per_cpu(virt_cpu_timer, timer->cpu); 268 list_add(&timer->entry, &virt_timer_list);
294 spin_lock_irqsave(&vq->lock, flags);
295
296 BUG_ON(timer->cpu != smp_processor_id());
297
298 if (list_empty(&vq->list)) {
299 /* First timer on this cpu, just program it. */
300 list_add(&timer->entry, &vq->list);
301 set_vtimer(timer->expires);
302 vq->timer = timer->expires;
303 vq->elapsed = 0;
304 } else { 269 } else {
305 /* Check progress of old timers. */ 270 /* Update timer against current base. */
306 expires = timer->expires; 271 timer->expires += atomic64_read(&virt_timer_elapsed);
307 left = get_vtimer(); 272 if (likely((s64) timer->expires <
308 if (likely((s64) expires < (s64) left)) { 273 (s64) atomic64_read(&virt_timer_current)))
309 /* The new timer expires before the current timer. */ 274 /* The new timer expires before the current timer. */
310 set_vtimer(expires); 275 atomic64_set(&virt_timer_current, timer->expires);
311 vq->elapsed += vq->timer - left; 276 /* Insert new timer into the list. */
312 vq->timer = expires; 277 list_add_sorted(timer, &virt_timer_list);
313 } else {
314 vq->elapsed += vq->timer - left;
315 vq->timer = left;
316 }
317 /* Insert new timer into per cpu list. */
318 timer->expires += vq->elapsed;
319 list_add_sorted(timer, &vq->list);
320 } 278 }
321
322 spin_unlock_irqrestore(&vq->lock, flags);
323 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
324 put_cpu();
325} 279}
326 280
327static inline void prepare_vtimer(struct vtimer_list *timer) 281static void __add_vtimer(struct vtimer_list *timer, int periodic)
328{ 282{
329 BUG_ON(!timer->function); 283 unsigned long flags;
330 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); 284
331 BUG_ON(vtimer_pending(timer)); 285 timer->interval = periodic ? timer->expires : 0;
332 timer->cpu = get_cpu(); 286 spin_lock_irqsave(&virt_timer_lock, flags);
287 internal_add_vtimer(timer);
288 spin_unlock_irqrestore(&virt_timer_lock, flags);
333} 289}
334 290
335/* 291/*
336 * add_virt_timer - add an oneshot virtual CPU timer 292 * add_virt_timer - add an oneshot virtual CPU timer
337 */ 293 */
338void add_virt_timer(void *new) 294void add_virt_timer(struct vtimer_list *timer)
339{ 295{
340 struct vtimer_list *timer; 296 __add_vtimer(timer, 0);
341
342 timer = (struct vtimer_list *)new;
343 prepare_vtimer(timer);
344 timer->interval = 0;
345 internal_add_vtimer(timer);
346} 297}
347EXPORT_SYMBOL(add_virt_timer); 298EXPORT_SYMBOL(add_virt_timer);
348 299
349/* 300/*
350 * add_virt_timer_int - add an interval virtual CPU timer 301 * add_virt_timer_int - add an interval virtual CPU timer
351 */ 302 */
352void add_virt_timer_periodic(void *new) 303void add_virt_timer_periodic(struct vtimer_list *timer)
353{ 304{
354 struct vtimer_list *timer; 305 __add_vtimer(timer, 1);
355
356 timer = (struct vtimer_list *)new;
357 prepare_vtimer(timer);
358 timer->interval = timer->expires;
359 internal_add_vtimer(timer);
360} 306}
361EXPORT_SYMBOL(add_virt_timer_periodic); 307EXPORT_SYMBOL(add_virt_timer_periodic);
362 308
363static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) 309static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
364{ 310{
365 struct vtimer_queue *vq;
366 unsigned long flags; 311 unsigned long flags;
367 int cpu; 312 int rc;
368 313
369 BUG_ON(!timer->function); 314 BUG_ON(!timer->function);
370 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
371 315
372 if (timer->expires == expires && vtimer_pending(timer)) 316 if (timer->expires == expires && vtimer_pending(timer))
373 return 1; 317 return 1;
374 318 spin_lock_irqsave(&virt_timer_lock, flags);
375 cpu = get_cpu(); 319 rc = vtimer_pending(timer);
376 vq = &per_cpu(virt_cpu_timer, cpu); 320 if (rc)
377 321 list_del_init(&timer->entry);
378 /* disable interrupts before test if timer is pending */ 322 timer->interval = periodic ? expires : 0;
379 spin_lock_irqsave(&vq->lock, flags);
380
381 /* if timer isn't pending add it on the current CPU */
382 if (!vtimer_pending(timer)) {
383 spin_unlock_irqrestore(&vq->lock, flags);
384
385 if (periodic)
386 timer->interval = expires;
387 else
388 timer->interval = 0;
389 timer->expires = expires;
390 timer->cpu = cpu;
391 internal_add_vtimer(timer);
392 return 0;
393 }
394
395 /* check if we run on the right CPU */
396 BUG_ON(timer->cpu != cpu);
397
398 list_del_init(&timer->entry);
399 timer->expires = expires; 323 timer->expires = expires;
400 if (periodic)
401 timer->interval = expires;
402
403 /* the timer can't expire anymore so we can release the lock */
404 spin_unlock_irqrestore(&vq->lock, flags);
405 internal_add_vtimer(timer); 324 internal_add_vtimer(timer);
406 return 1; 325 spin_unlock_irqrestore(&virt_timer_lock, flags);
326 return rc;
407} 327}
408 328
409/* 329/*
410 * If we change a pending timer the function must be called on the CPU
411 * where the timer is running on.
412 *
413 * returns whether it has modified a pending timer (1) or not (0) 330 * returns whether it has modified a pending timer (1) or not (0)
414 */ 331 */
415int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 332int mod_virt_timer(struct vtimer_list *timer, u64 expires)
416{ 333{
417 return __mod_vtimer(timer, expires, 0); 334 return __mod_vtimer(timer, expires, 0);
418} 335}
419EXPORT_SYMBOL(mod_virt_timer); 336EXPORT_SYMBOL(mod_virt_timer);
420 337
421/* 338/*
422 * If we change a pending timer the function must be called on the CPU
423 * where the timer is running on.
424 *
425 * returns whether it has modified a pending timer (1) or not (0) 339 * returns whether it has modified a pending timer (1) or not (0)
426 */ 340 */
427int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) 341int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
428{ 342{
429 return __mod_vtimer(timer, expires, 1); 343 return __mod_vtimer(timer, expires, 1);
430} 344}
431EXPORT_SYMBOL(mod_virt_timer_periodic); 345EXPORT_SYMBOL(mod_virt_timer_periodic);
432 346
433/* 347/*
434 * delete a virtual timer 348 * Delete a virtual timer.
435 * 349 *
436 * returns whether the deleted timer was pending (1) or not (0) 350 * returns whether the deleted timer was pending (1) or not (0)
437 */ 351 */
438int del_virt_timer(struct vtimer_list *timer) 352int del_virt_timer(struct vtimer_list *timer)
439{ 353{
440 unsigned long flags; 354 unsigned long flags;
441 struct vtimer_queue *vq;
442 355
443 /* check if timer is pending */
444 if (!vtimer_pending(timer)) 356 if (!vtimer_pending(timer))
445 return 0; 357 return 0;
446 358 spin_lock_irqsave(&virt_timer_lock, flags);
447 vq = &per_cpu(virt_cpu_timer, timer->cpu);
448 spin_lock_irqsave(&vq->lock, flags);
449
450 /* we don't interrupt a running timer, just let it expire! */
451 list_del_init(&timer->entry); 359 list_del_init(&timer->entry);
452 360 spin_unlock_irqrestore(&virt_timer_lock, flags);
453 spin_unlock_irqrestore(&vq->lock, flags);
454 return 1; 361 return 1;
455} 362}
456EXPORT_SYMBOL(del_virt_timer); 363EXPORT_SYMBOL(del_virt_timer);
@@ -458,20 +365,10 @@ EXPORT_SYMBOL(del_virt_timer);
458/* 365/*
459 * Start the virtual CPU timer on the current CPU. 366 * Start the virtual CPU timer on the current CPU.
460 */ 367 */
461void init_cpu_vtimer(void) 368void __cpuinit init_cpu_vtimer(void)
462{ 369{
463 struct vtimer_queue *vq;
464
465 /* initialize per cpu vtimer structure */
466 vq = &__get_cpu_var(virt_cpu_timer);
467 INIT_LIST_HEAD(&vq->list);
468 spin_lock_init(&vq->lock);
469
470 /* enable cpu timer interrupts */
471 __ctl_set_bit(0,10);
472
473 /* set initial cpu timer */ 370 /* set initial cpu timer */
474 set_vtimer(0x7fffffffffffffffULL); 371 set_vtimer(VTIMER_MAX_SLICE);
475} 372}
476 373
477static int __cpuinit s390_nohz_notify(struct notifier_block *self, 374static int __cpuinit s390_nohz_notify(struct notifier_block *self,
@@ -493,12 +390,7 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
493 390
494void __init vtime_init(void) 391void __init vtime_init(void)
495{ 392{
496 /* request the cpu timer external interrupt */
497 if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
498 panic("Couldn't request external interrupt 0x1005");
499
500 /* Enable cpu timer interrupts on the boot cpu. */ 393 /* Enable cpu timer interrupts on the boot cpu. */
501 init_cpu_vtimer(); 394 init_cpu_vtimer();
502 cpu_notifier(s390_nohz_notify, 0); 395 cpu_notifier(s390_nohz_notify, 0);
503} 396}
504