diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/s390/kernel/vtime.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/s390/kernel/vtime.c')
-rw-r--r-- | arch/s390/kernel/vtime.c | 565 |
1 files changed, 565 insertions, 0 deletions
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c new file mode 100644 index 000000000000..bb6cf02418a2 --- /dev/null +++ b/arch/s390/kernel/vtime.c | |||
@@ -0,0 +1,565 @@ | |||
1 | /* | ||
2 | * arch/s390/kernel/vtime.c | ||
3 | * Virtual cpu timer based timer functions. | ||
4 | * | ||
5 | * S390 version | ||
6 | * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
7 | * Author(s): Jan Glauber <jan.glauber@de.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/config.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/time.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/timex.h> | ||
19 | #include <linux/notifier.h> | ||
20 | #include <linux/kernel_stat.h> | ||
21 | #include <linux/rcupdate.h> | ||
22 | #include <linux/posix-timers.h> | ||
23 | |||
24 | #include <asm/s390_ext.h> | ||
25 | #include <asm/timer.h> | ||
26 | |||
27 | #define VTIMER_MAGIC (TIMER_MAGIC + 1) | ||
28 | static ext_int_info_t ext_int_info_timer; | ||
29 | DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | ||
30 | |||
31 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
32 | /* | ||
33 | * Update process times based on virtual cpu times stored by entry.S | ||
34 | * to the lowcore fields user_timer, system_timer & steal_clock. | ||
35 | */ | ||
36 | void account_user_vtime(struct task_struct *tsk) | ||
37 | { | ||
38 | cputime_t cputime; | ||
39 | __u64 timer, clock; | ||
40 | int rcu_user_flag; | ||
41 | |||
42 | timer = S390_lowcore.last_update_timer; | ||
43 | clock = S390_lowcore.last_update_clock; | ||
44 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ | ||
45 | " STCK %1" /* Store current tod clock value */ | ||
46 | : "=m" (S390_lowcore.last_update_timer), | ||
47 | "=m" (S390_lowcore.last_update_clock) ); | ||
48 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | ||
49 | S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; | ||
50 | |||
51 | cputime = S390_lowcore.user_timer >> 12; | ||
52 | rcu_user_flag = cputime != 0; | ||
53 | S390_lowcore.user_timer -= cputime << 12; | ||
54 | S390_lowcore.steal_clock -= cputime << 12; | ||
55 | account_user_time(tsk, cputime); | ||
56 | |||
57 | cputime = S390_lowcore.system_timer >> 12; | ||
58 | S390_lowcore.system_timer -= cputime << 12; | ||
59 | S390_lowcore.steal_clock -= cputime << 12; | ||
60 | account_system_time(tsk, HARDIRQ_OFFSET, cputime); | ||
61 | |||
62 | cputime = S390_lowcore.steal_clock; | ||
63 | if ((__s64) cputime > 0) { | ||
64 | cputime >>= 12; | ||
65 | S390_lowcore.steal_clock -= cputime << 12; | ||
66 | account_steal_time(tsk, cputime); | ||
67 | } | ||
68 | |||
69 | run_local_timers(); | ||
70 | if (rcu_pending(smp_processor_id())) | ||
71 | rcu_check_callbacks(smp_processor_id(), rcu_user_flag); | ||
72 | scheduler_tick(); | ||
73 | run_posix_cpu_timers(tsk); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Update process times based on virtual cpu times stored by entry.S | ||
78 | * to the lowcore fields user_timer, system_timer & steal_clock. | ||
79 | */ | ||
80 | void account_system_vtime(struct task_struct *tsk) | ||
81 | { | ||
82 | cputime_t cputime; | ||
83 | __u64 timer; | ||
84 | |||
85 | timer = S390_lowcore.last_update_timer; | ||
86 | asm volatile (" STPT %0" /* Store current cpu timer value */ | ||
87 | : "=m" (S390_lowcore.last_update_timer) ); | ||
88 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | ||
89 | |||
90 | cputime = S390_lowcore.system_timer >> 12; | ||
91 | S390_lowcore.system_timer -= cputime << 12; | ||
92 | S390_lowcore.steal_clock -= cputime << 12; | ||
93 | account_system_time(tsk, 0, cputime); | ||
94 | } | ||
95 | |||
96 | static inline void set_vtimer(__u64 expires) | ||
97 | { | ||
98 | __u64 timer; | ||
99 | |||
100 | asm volatile (" STPT %0\n" /* Store current cpu timer value */ | ||
101 | " SPT %1" /* Set new value immediatly afterwards */ | ||
102 | : "=m" (timer) : "m" (expires) ); | ||
103 | S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; | ||
104 | S390_lowcore.last_update_timer = expires; | ||
105 | |||
106 | /* store expire time for this CPU timer */ | ||
107 | per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; | ||
108 | } | ||
109 | #else | ||
110 | static inline void set_vtimer(__u64 expires) | ||
111 | { | ||
112 | S390_lowcore.last_update_timer = expires; | ||
113 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
114 | |||
115 | /* store expire time for this CPU timer */ | ||
116 | per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; | ||
117 | } | ||
118 | #endif | ||
119 | |||
120 | static void start_cpu_timer(void) | ||
121 | { | ||
122 | struct vtimer_queue *vt_list; | ||
123 | |||
124 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
125 | set_vtimer(vt_list->idle); | ||
126 | } | ||
127 | |||
128 | static void stop_cpu_timer(void) | ||
129 | { | ||
130 | __u64 done; | ||
131 | struct vtimer_queue *vt_list; | ||
132 | |||
133 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
134 | |||
135 | /* nothing to do */ | ||
136 | if (list_empty(&vt_list->list)) { | ||
137 | vt_list->idle = VTIMER_MAX_SLICE; | ||
138 | goto fire; | ||
139 | } | ||
140 | |||
141 | /* store progress */ | ||
142 | asm volatile ("STPT %0" : "=m" (done)); | ||
143 | |||
144 | /* | ||
145 | * If done is negative we do not stop the CPU timer | ||
146 | * because we will get instantly an interrupt that | ||
147 | * will start the CPU timer again. | ||
148 | */ | ||
149 | if (done & 1LL<<63) | ||
150 | return; | ||
151 | else | ||
152 | vt_list->offset += vt_list->to_expire - done; | ||
153 | |||
154 | /* save the actual expire value */ | ||
155 | vt_list->idle = done; | ||
156 | |||
157 | /* | ||
158 | * We cannot halt the CPU timer, we just write a value that | ||
159 | * nearly never expires (only after 71 years) and re-write | ||
160 | * the stored expire value if we continue the timer | ||
161 | */ | ||
162 | fire: | ||
163 | set_vtimer(VTIMER_MAX_SLICE); | ||
164 | } | ||
165 | |||
166 | /* | ||
167 | * Sorted add to a list. List is linear searched until first bigger | ||
168 | * element is found. | ||
169 | */ | ||
170 | static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) | ||
171 | { | ||
172 | struct vtimer_list *event; | ||
173 | |||
174 | list_for_each_entry(event, head, entry) { | ||
175 | if (event->expires > timer->expires) { | ||
176 | list_add_tail(&timer->entry, &event->entry); | ||
177 | return; | ||
178 | } | ||
179 | } | ||
180 | list_add_tail(&timer->entry, head); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * Do the callback functions of expired vtimer events. | ||
185 | * Called from within the interrupt handler. | ||
186 | */ | ||
187 | static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) | ||
188 | { | ||
189 | struct vtimer_queue *vt_list; | ||
190 | struct vtimer_list *event, *tmp; | ||
191 | void (*fn)(unsigned long, struct pt_regs*); | ||
192 | unsigned long data; | ||
193 | |||
194 | if (list_empty(cb_list)) | ||
195 | return; | ||
196 | |||
197 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
198 | |||
199 | list_for_each_entry_safe(event, tmp, cb_list, entry) { | ||
200 | fn = event->function; | ||
201 | data = event->data; | ||
202 | fn(data, regs); | ||
203 | |||
204 | if (!event->interval) | ||
205 | /* delete one shot timer */ | ||
206 | list_del_init(&event->entry); | ||
207 | else { | ||
208 | /* move interval timer back to list */ | ||
209 | spin_lock(&vt_list->lock); | ||
210 | list_del_init(&event->entry); | ||
211 | list_add_sorted(event, &vt_list->list); | ||
212 | spin_unlock(&vt_list->lock); | ||
213 | } | ||
214 | } | ||
215 | } | ||
216 | |||
217 | /* | ||
218 | * Handler for the virtual CPU timer. | ||
219 | */ | ||
220 | static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) | ||
221 | { | ||
222 | int cpu; | ||
223 | __u64 next, delta; | ||
224 | struct vtimer_queue *vt_list; | ||
225 | struct vtimer_list *event, *tmp; | ||
226 | struct list_head *ptr; | ||
227 | /* the callback queue */ | ||
228 | struct list_head cb_list; | ||
229 | |||
230 | INIT_LIST_HEAD(&cb_list); | ||
231 | cpu = smp_processor_id(); | ||
232 | vt_list = &per_cpu(virt_cpu_timer, cpu); | ||
233 | |||
234 | /* walk timer list, fire all expired events */ | ||
235 | spin_lock(&vt_list->lock); | ||
236 | |||
237 | if (vt_list->to_expire < VTIMER_MAX_SLICE) | ||
238 | vt_list->offset += vt_list->to_expire; | ||
239 | |||
240 | list_for_each_entry_safe(event, tmp, &vt_list->list, entry) { | ||
241 | if (event->expires > vt_list->offset) | ||
242 | /* found first unexpired event, leave */ | ||
243 | break; | ||
244 | |||
245 | /* re-charge interval timer, we have to add the offset */ | ||
246 | if (event->interval) | ||
247 | event->expires = event->interval + vt_list->offset; | ||
248 | |||
249 | /* move expired timer to the callback queue */ | ||
250 | list_move_tail(&event->entry, &cb_list); | ||
251 | } | ||
252 | spin_unlock(&vt_list->lock); | ||
253 | do_callbacks(&cb_list, regs); | ||
254 | |||
255 | /* next event is first in list */ | ||
256 | spin_lock(&vt_list->lock); | ||
257 | if (!list_empty(&vt_list->list)) { | ||
258 | ptr = vt_list->list.next; | ||
259 | event = list_entry(ptr, struct vtimer_list, entry); | ||
260 | next = event->expires - vt_list->offset; | ||
261 | |||
262 | /* add the expired time from this interrupt handler | ||
263 | * and the callback functions | ||
264 | */ | ||
265 | asm volatile ("STPT %0" : "=m" (delta)); | ||
266 | delta = 0xffffffffffffffffLL - delta + 1; | ||
267 | vt_list->offset += delta; | ||
268 | next -= delta; | ||
269 | } else { | ||
270 | vt_list->offset = 0; | ||
271 | next = VTIMER_MAX_SLICE; | ||
272 | } | ||
273 | spin_unlock(&vt_list->lock); | ||
274 | set_vtimer(next); | ||
275 | } | ||
276 | |||
277 | void init_virt_timer(struct vtimer_list *timer) | ||
278 | { | ||
279 | timer->magic = VTIMER_MAGIC; | ||
280 | timer->function = NULL; | ||
281 | INIT_LIST_HEAD(&timer->entry); | ||
282 | spin_lock_init(&timer->lock); | ||
283 | } | ||
284 | EXPORT_SYMBOL(init_virt_timer); | ||
285 | |||
286 | static inline int check_vtimer(struct vtimer_list *timer) | ||
287 | { | ||
288 | if (timer->magic != VTIMER_MAGIC) | ||
289 | return -EINVAL; | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | static inline int vtimer_pending(struct vtimer_list *timer) | ||
294 | { | ||
295 | return (!list_empty(&timer->entry)); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * this function should only run on the specified CPU | ||
300 | */ | ||
301 | static void internal_add_vtimer(struct vtimer_list *timer) | ||
302 | { | ||
303 | unsigned long flags; | ||
304 | __u64 done; | ||
305 | struct vtimer_list *event; | ||
306 | struct vtimer_queue *vt_list; | ||
307 | |||
308 | vt_list = &per_cpu(virt_cpu_timer, timer->cpu); | ||
309 | spin_lock_irqsave(&vt_list->lock, flags); | ||
310 | |||
311 | if (timer->cpu != smp_processor_id()) | ||
312 | printk("internal_add_vtimer: BUG, running on wrong CPU"); | ||
313 | |||
314 | /* if list is empty we only have to set the timer */ | ||
315 | if (list_empty(&vt_list->list)) { | ||
316 | /* reset the offset, this may happen if the last timer was | ||
317 | * just deleted by mod_virt_timer and the interrupt | ||
318 | * didn't happen until here | ||
319 | */ | ||
320 | vt_list->offset = 0; | ||
321 | goto fire; | ||
322 | } | ||
323 | |||
324 | /* save progress */ | ||
325 | asm volatile ("STPT %0" : "=m" (done)); | ||
326 | |||
327 | /* calculate completed work */ | ||
328 | done = vt_list->to_expire - done + vt_list->offset; | ||
329 | vt_list->offset = 0; | ||
330 | |||
331 | list_for_each_entry(event, &vt_list->list, entry) | ||
332 | event->expires -= done; | ||
333 | |||
334 | fire: | ||
335 | list_add_sorted(timer, &vt_list->list); | ||
336 | |||
337 | /* get first element, which is the next vtimer slice */ | ||
338 | event = list_entry(vt_list->list.next, struct vtimer_list, entry); | ||
339 | |||
340 | set_vtimer(event->expires); | ||
341 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
342 | /* release CPU aquired in prepare_vtimer or mod_virt_timer() */ | ||
343 | put_cpu(); | ||
344 | } | ||
345 | |||
346 | static inline int prepare_vtimer(struct vtimer_list *timer) | ||
347 | { | ||
348 | if (check_vtimer(timer) || !timer->function) { | ||
349 | printk("add_virt_timer: uninitialized timer\n"); | ||
350 | return -EINVAL; | ||
351 | } | ||
352 | |||
353 | if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) { | ||
354 | printk("add_virt_timer: invalid timer expire value!\n"); | ||
355 | return -EINVAL; | ||
356 | } | ||
357 | |||
358 | if (vtimer_pending(timer)) { | ||
359 | printk("add_virt_timer: timer pending\n"); | ||
360 | return -EBUSY; | ||
361 | } | ||
362 | |||
363 | timer->cpu = get_cpu(); | ||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * add_virt_timer - add an oneshot virtual CPU timer | ||
369 | */ | ||
370 | void add_virt_timer(void *new) | ||
371 | { | ||
372 | struct vtimer_list *timer; | ||
373 | |||
374 | timer = (struct vtimer_list *)new; | ||
375 | |||
376 | if (prepare_vtimer(timer) < 0) | ||
377 | return; | ||
378 | |||
379 | timer->interval = 0; | ||
380 | internal_add_vtimer(timer); | ||
381 | } | ||
382 | EXPORT_SYMBOL(add_virt_timer); | ||
383 | |||
384 | /* | ||
385 | * add_virt_timer_int - add an interval virtual CPU timer | ||
386 | */ | ||
387 | void add_virt_timer_periodic(void *new) | ||
388 | { | ||
389 | struct vtimer_list *timer; | ||
390 | |||
391 | timer = (struct vtimer_list *)new; | ||
392 | |||
393 | if (prepare_vtimer(timer) < 0) | ||
394 | return; | ||
395 | |||
396 | timer->interval = timer->expires; | ||
397 | internal_add_vtimer(timer); | ||
398 | } | ||
399 | EXPORT_SYMBOL(add_virt_timer_periodic); | ||
400 | |||
401 | /* | ||
402 | * If we change a pending timer the function must be called on the CPU | ||
403 | * where the timer is running on, e.g. by smp_call_function_on() | ||
404 | * | ||
405 | * The original mod_timer adds the timer if it is not pending. For compatibility | ||
406 | * we do the same. The timer will be added on the current CPU as a oneshot timer. | ||
407 | * | ||
408 | * returns whether it has modified a pending timer (1) or not (0) | ||
409 | */ | ||
410 | int mod_virt_timer(struct vtimer_list *timer, __u64 expires) | ||
411 | { | ||
412 | struct vtimer_queue *vt_list; | ||
413 | unsigned long flags; | ||
414 | int cpu; | ||
415 | |||
416 | if (check_vtimer(timer) || !timer->function) { | ||
417 | printk("mod_virt_timer: uninitialized timer\n"); | ||
418 | return -EINVAL; | ||
419 | } | ||
420 | |||
421 | if (!expires || expires > VTIMER_MAX_SLICE) { | ||
422 | printk("mod_virt_timer: invalid expire range\n"); | ||
423 | return -EINVAL; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * This is a common optimization triggered by the | ||
428 | * networking code - if the timer is re-modified | ||
429 | * to be the same thing then just return: | ||
430 | */ | ||
431 | if (timer->expires == expires && vtimer_pending(timer)) | ||
432 | return 1; | ||
433 | |||
434 | cpu = get_cpu(); | ||
435 | vt_list = &per_cpu(virt_cpu_timer, cpu); | ||
436 | |||
437 | /* disable interrupts before test if timer is pending */ | ||
438 | spin_lock_irqsave(&vt_list->lock, flags); | ||
439 | |||
440 | /* if timer isn't pending add it on the current CPU */ | ||
441 | if (!vtimer_pending(timer)) { | ||
442 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
443 | /* we do not activate an interval timer with mod_virt_timer */ | ||
444 | timer->interval = 0; | ||
445 | timer->expires = expires; | ||
446 | timer->cpu = cpu; | ||
447 | internal_add_vtimer(timer); | ||
448 | return 0; | ||
449 | } | ||
450 | |||
451 | /* check if we run on the right CPU */ | ||
452 | if (timer->cpu != cpu) { | ||
453 | printk("mod_virt_timer: running on wrong CPU, check your code\n"); | ||
454 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
455 | put_cpu(); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | list_del_init(&timer->entry); | ||
460 | timer->expires = expires; | ||
461 | |||
462 | /* also change the interval if we have an interval timer */ | ||
463 | if (timer->interval) | ||
464 | timer->interval = expires; | ||
465 | |||
466 | /* the timer can't expire anymore so we can release the lock */ | ||
467 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
468 | internal_add_vtimer(timer); | ||
469 | return 1; | ||
470 | } | ||
471 | EXPORT_SYMBOL(mod_virt_timer); | ||
472 | |||
473 | /* | ||
474 | * delete a virtual timer | ||
475 | * | ||
476 | * returns whether the deleted timer was pending (1) or not (0) | ||
477 | */ | ||
478 | int del_virt_timer(struct vtimer_list *timer) | ||
479 | { | ||
480 | unsigned long flags; | ||
481 | struct vtimer_queue *vt_list; | ||
482 | |||
483 | if (check_vtimer(timer)) { | ||
484 | printk("del_virt_timer: timer not initialized\n"); | ||
485 | return -EINVAL; | ||
486 | } | ||
487 | |||
488 | /* check if timer is pending */ | ||
489 | if (!vtimer_pending(timer)) | ||
490 | return 0; | ||
491 | |||
492 | vt_list = &per_cpu(virt_cpu_timer, timer->cpu); | ||
493 | spin_lock_irqsave(&vt_list->lock, flags); | ||
494 | |||
495 | /* we don't interrupt a running timer, just let it expire! */ | ||
496 | list_del_init(&timer->entry); | ||
497 | |||
498 | /* last timer removed */ | ||
499 | if (list_empty(&vt_list->list)) { | ||
500 | vt_list->to_expire = 0; | ||
501 | vt_list->offset = 0; | ||
502 | } | ||
503 | |||
504 | spin_unlock_irqrestore(&vt_list->lock, flags); | ||
505 | return 1; | ||
506 | } | ||
507 | EXPORT_SYMBOL(del_virt_timer); | ||
508 | |||
509 | /* | ||
510 | * Start the virtual CPU timer on the current CPU. | ||
511 | */ | ||
512 | void init_cpu_vtimer(void) | ||
513 | { | ||
514 | struct vtimer_queue *vt_list; | ||
515 | unsigned long cr0; | ||
516 | |||
517 | /* kick the virtual timer */ | ||
518 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; | ||
519 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; | ||
520 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
521 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); | ||
522 | __ctl_store(cr0, 0, 0); | ||
523 | cr0 |= 0x400; | ||
524 | __ctl_load(cr0, 0, 0); | ||
525 | |||
526 | vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); | ||
527 | INIT_LIST_HEAD(&vt_list->list); | ||
528 | spin_lock_init(&vt_list->lock); | ||
529 | vt_list->to_expire = 0; | ||
530 | vt_list->offset = 0; | ||
531 | vt_list->idle = 0; | ||
532 | |||
533 | } | ||
534 | |||
535 | static int vtimer_idle_notify(struct notifier_block *self, | ||
536 | unsigned long action, void *hcpu) | ||
537 | { | ||
538 | switch (action) { | ||
539 | case CPU_IDLE: | ||
540 | stop_cpu_timer(); | ||
541 | break; | ||
542 | case CPU_NOT_IDLE: | ||
543 | start_cpu_timer(); | ||
544 | break; | ||
545 | } | ||
546 | return NOTIFY_OK; | ||
547 | } | ||
548 | |||
549 | static struct notifier_block vtimer_idle_nb = { | ||
550 | .notifier_call = vtimer_idle_notify, | ||
551 | }; | ||
552 | |||
553 | void __init vtime_init(void) | ||
554 | { | ||
555 | /* request the cpu timer external interrupt */ | ||
556 | if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, | ||
557 | &ext_int_info_timer) != 0) | ||
558 | panic("Couldn't request external interrupt 0x1005"); | ||
559 | |||
560 | if (register_idle_notifier(&vtimer_idle_nb)) | ||
561 | panic("Couldn't register idle notifier"); | ||
562 | |||
563 | init_cpu_vtimer(); | ||
564 | } | ||
565 | |||