diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-12-31 09:11:41 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-12-31 09:11:48 -0500 |
commit | 9cfb9b3c3a7361c793c031e9c3583b177ac5debd (patch) | |
tree | b735c71e6fee3fd1464c21ce53f93d98ceddf90d /arch/s390/include | |
parent | 6f43092441bda528dd38f2dc6c1e2522c5079fb7 (diff) |
[PATCH] improve idle cputime accounting
Distinguish the cputime of the idle process where idle is actually using
cpu cycles from the cputime where idle is sleeping on an enabled wait psw.
The former is accounted as system time, the later as idle time.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/include')
-rw-r--r-- | arch/s390/include/asm/cpu.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/timer.h | 16 |
2 files changed, 9 insertions, 11 deletions
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h index 89456df43c4a..d60a2eefb17b 100644 --- a/arch/s390/include/asm/cpu.h +++ b/arch/s390/include/asm/cpu.h | |||
@@ -21,12 +21,12 @@ struct s390_idle_data { | |||
21 | 21 | ||
22 | DECLARE_PER_CPU(struct s390_idle_data, s390_idle); | 22 | DECLARE_PER_CPU(struct s390_idle_data, s390_idle); |
23 | 23 | ||
24 | void s390_idle_leave(void); | 24 | void vtime_start_cpu(void); |
25 | 25 | ||
26 | static inline void s390_idle_check(void) | 26 | static inline void s390_idle_check(void) |
27 | { | 27 | { |
28 | if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) | 28 | if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL) |
29 | s390_idle_leave(); | 29 | vtime_start_cpu(); |
30 | } | 30 | } |
31 | 31 | ||
32 | #endif /* _ASM_S390_CPU_H_ */ | 32 | #endif /* _ASM_S390_CPU_H_ */ |
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h index 61705d60f995..e4bcab739c19 100644 --- a/arch/s390/include/asm/timer.h +++ b/arch/s390/include/asm/timer.h | |||
@@ -23,20 +23,18 @@ struct vtimer_list { | |||
23 | __u64 expires; | 23 | __u64 expires; |
24 | __u64 interval; | 24 | __u64 interval; |
25 | 25 | ||
26 | spinlock_t lock; | ||
27 | unsigned long magic; | ||
28 | |||
29 | void (*function)(unsigned long); | 26 | void (*function)(unsigned long); |
30 | unsigned long data; | 27 | unsigned long data; |
31 | }; | 28 | }; |
32 | 29 | ||
33 | /* the offset value will wrap after ca. 71 years */ | 30 | /* the vtimer value will wrap after ca. 71 years */ |
34 | struct vtimer_queue { | 31 | struct vtimer_queue { |
35 | struct list_head list; | 32 | struct list_head list; |
36 | spinlock_t lock; | 33 | spinlock_t lock; |
37 | __u64 to_expire; /* current event expire time */ | 34 | __u64 timer; /* last programmed timer */ |
38 | __u64 offset; /* list offset to zero */ | 35 | __u64 elapsed; /* elapsed time of timer expire values */ |
39 | __u64 idle; /* temp var for idle */ | 36 | __u64 idle; /* temp var for idle */ |
37 | int do_spt; /* =1: reprogram cpu timer in idle */ | ||
40 | }; | 38 | }; |
41 | 39 | ||
42 | extern void init_virt_timer(struct vtimer_list *timer); | 40 | extern void init_virt_timer(struct vtimer_list *timer); |
@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer); | |||
48 | extern void init_cpu_vtimer(void); | 46 | extern void init_cpu_vtimer(void); |
49 | extern void vtime_init(void); | 47 | extern void vtime_init(void); |
50 | 48 | ||
51 | extern void vtime_start_cpu_timer(void); | 49 | extern void vtime_stop_cpu(void); |
52 | extern void vtime_stop_cpu_timer(void); | 50 | extern void vtime_start_leave(void); |
53 | 51 | ||
54 | #endif /* __KERNEL__ */ | 52 | #endif /* __KERNEL__ */ |
55 | 53 | ||