diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-12-31 09:11:39 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-12-31 09:11:47 -0500 |
commit | aa5e97ce4bbc9d5daeec16b1d15bb3f6b7b4f4d4 (patch) | |
tree | 6bf29daaaca165108bcafa68105d18e64ef01ea2 /arch/s390 | |
parent | 79741dd35713ff4f6fd0eafd59fa94e8a4ba922d (diff) |
[PATCH] improve precision of process accounting.
The unit of the cputime accouting values that are stored per process is
currently a microsecond. The CPU timer has a maximum granularity of
2**-12 microseconds. There is no benefit in storing the per process values
in the lesser precision and there is the disadvantage that the backend
has to do the rounding to microseconds. The better solution is to use
the maximum granularity of the CPU timer as cputime unit.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/cputime.h | 42 | ||||
-rw-r--r-- | arch/s390/include/asm/lowcore.h | 40 | ||||
-rw-r--r-- | arch/s390/include/asm/system.h | 4 | ||||
-rw-r--r-- | arch/s390/include/asm/thread_info.h | 2 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 93 |
5 files changed, 85 insertions, 96 deletions
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 133ce054fc89..521726430afa 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #include <asm/div64.h> | 12 | #include <asm/div64.h> |
13 | 13 | ||
14 | /* We want to use micro-second resolution. */ | 14 | /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ |
15 | 15 | ||
16 | typedef unsigned long long cputime_t; | 16 | typedef unsigned long long cputime_t; |
17 | typedef unsigned long long cputime64_t; | 17 | typedef unsigned long long cputime64_t; |
@@ -53,9 +53,9 @@ __div(unsigned long long n, unsigned int base) | |||
53 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | 53 | #define cputime_ge(__a, __b) ((__a) >= (__b)) |
54 | #define cputime_lt(__a, __b) ((__a) < (__b)) | 54 | #define cputime_lt(__a, __b) ((__a) < (__b)) |
55 | #define cputime_le(__a, __b) ((__a) <= (__b)) | 55 | #define cputime_le(__a, __b) ((__a) <= (__b)) |
56 | #define cputime_to_jiffies(__ct) (__div((__ct), 1000000 / HZ)) | 56 | #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ)) |
57 | #define cputime_to_scaled(__ct) (__ct) | 57 | #define cputime_to_scaled(__ct) (__ct) |
58 | #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (1000000 / HZ)) | 58 | #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ)) |
59 | 59 | ||
60 | #define cputime64_zero (0ULL) | 60 | #define cputime64_zero (0ULL) |
61 | #define cputime64_add(__a, __b) ((__a) + (__b)) | 61 | #define cputime64_add(__a, __b) ((__a) + (__b)) |
@@ -64,7 +64,7 @@ __div(unsigned long long n, unsigned int base) | |||
64 | static inline u64 | 64 | static inline u64 |
65 | cputime64_to_jiffies64(cputime64_t cputime) | 65 | cputime64_to_jiffies64(cputime64_t cputime) |
66 | { | 66 | { |
67 | do_div(cputime, 1000000 / HZ); | 67 | do_div(cputime, 4096000000ULL / HZ); |
68 | return cputime; | 68 | return cputime; |
69 | } | 69 | } |
70 | 70 | ||
@@ -74,13 +74,13 @@ cputime64_to_jiffies64(cputime64_t cputime) | |||
74 | static inline unsigned int | 74 | static inline unsigned int |
75 | cputime_to_msecs(const cputime_t cputime) | 75 | cputime_to_msecs(const cputime_t cputime) |
76 | { | 76 | { |
77 | return __div(cputime, 1000); | 77 | return __div(cputime, 4096000); |
78 | } | 78 | } |
79 | 79 | ||
80 | static inline cputime_t | 80 | static inline cputime_t |
81 | msecs_to_cputime(const unsigned int m) | 81 | msecs_to_cputime(const unsigned int m) |
82 | { | 82 | { |
83 | return (cputime_t) m * 1000; | 83 | return (cputime_t) m * 4096000; |
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
@@ -89,13 +89,13 @@ msecs_to_cputime(const unsigned int m) | |||
89 | static inline unsigned int | 89 | static inline unsigned int |
90 | cputime_to_secs(const cputime_t cputime) | 90 | cputime_to_secs(const cputime_t cputime) |
91 | { | 91 | { |
92 | return __div(cputime, 1000000); | 92 | return __div(cputime, 2048000000) >> 1; |
93 | } | 93 | } |
94 | 94 | ||
95 | static inline cputime_t | 95 | static inline cputime_t |
96 | secs_to_cputime(const unsigned int s) | 96 | secs_to_cputime(const unsigned int s) |
97 | { | 97 | { |
98 | return (cputime_t) s * 1000000; | 98 | return (cputime_t) s * 4096000000ULL; |
99 | } | 99 | } |
100 | 100 | ||
101 | /* | 101 | /* |
@@ -104,7 +104,7 @@ secs_to_cputime(const unsigned int s) | |||
104 | static inline cputime_t | 104 | static inline cputime_t |
105 | timespec_to_cputime(const struct timespec *value) | 105 | timespec_to_cputime(const struct timespec *value) |
106 | { | 106 | { |
107 | return value->tv_nsec / 1000 + (u64) value->tv_sec * 1000000; | 107 | return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline void | 110 | static inline void |
@@ -114,12 +114,12 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) | |||
114 | register_pair rp; | 114 | register_pair rp; |
115 | 115 | ||
116 | rp.pair = cputime >> 1; | 116 | rp.pair = cputime >> 1; |
117 | asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1)); | 117 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); |
118 | value->tv_nsec = rp.subreg.even * 1000; | 118 | value->tv_nsec = rp.subreg.even * 1000 / 4096; |
119 | value->tv_sec = rp.subreg.odd; | 119 | value->tv_sec = rp.subreg.odd; |
120 | #else | 120 | #else |
121 | value->tv_nsec = (cputime % 1000000) * 1000; | 121 | value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; |
122 | value->tv_sec = cputime / 1000000; | 122 | value->tv_sec = cputime / 4096000000ULL; |
123 | #endif | 123 | #endif |
124 | } | 124 | } |
125 | 125 | ||
@@ -131,7 +131,7 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) | |||
131 | static inline cputime_t | 131 | static inline cputime_t |
132 | timeval_to_cputime(const struct timeval *value) | 132 | timeval_to_cputime(const struct timeval *value) |
133 | { | 133 | { |
134 | return value->tv_usec + (u64) value->tv_sec * 1000000; | 134 | return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; |
135 | } | 135 | } |
136 | 136 | ||
137 | static inline void | 137 | static inline void |
@@ -141,12 +141,12 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value) | |||
141 | register_pair rp; | 141 | register_pair rp; |
142 | 142 | ||
143 | rp.pair = cputime >> 1; | 143 | rp.pair = cputime >> 1; |
144 | asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1)); | 144 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); |
145 | value->tv_usec = rp.subreg.even; | 145 | value->tv_usec = rp.subreg.even / 4096; |
146 | value->tv_sec = rp.subreg.odd; | 146 | value->tv_sec = rp.subreg.odd; |
147 | #else | 147 | #else |
148 | value->tv_usec = cputime % 1000000; | 148 | value->tv_usec = cputime % 4096000000ULL; |
149 | value->tv_sec = cputime / 1000000; | 149 | value->tv_sec = cputime / 4096000000ULL; |
150 | #endif | 150 | #endif |
151 | } | 151 | } |
152 | 152 | ||
@@ -156,13 +156,13 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value) | |||
156 | static inline clock_t | 156 | static inline clock_t |
157 | cputime_to_clock_t(cputime_t cputime) | 157 | cputime_to_clock_t(cputime_t cputime) |
158 | { | 158 | { |
159 | return __div(cputime, 1000000 / USER_HZ); | 159 | return __div(cputime, 4096000000ULL / USER_HZ); |
160 | } | 160 | } |
161 | 161 | ||
162 | static inline cputime_t | 162 | static inline cputime_t |
163 | clock_t_to_cputime(unsigned long x) | 163 | clock_t_to_cputime(unsigned long x) |
164 | { | 164 | { |
165 | return (cputime_t) x * (1000000 / USER_HZ); | 165 | return (cputime_t) x * (4096000000ULL / USER_HZ); |
166 | } | 166 | } |
167 | 167 | ||
168 | /* | 168 | /* |
@@ -171,7 +171,7 @@ clock_t_to_cputime(unsigned long x) | |||
171 | static inline clock_t | 171 | static inline clock_t |
172 | cputime64_to_clock_t(cputime64_t cputime) | 172 | cputime64_to_clock_t(cputime64_t cputime) |
173 | { | 173 | { |
174 | return __div(cputime, 1000000 / USER_HZ); | 174 | return __div(cputime, 4096000000ULL / USER_HZ); |
175 | } | 175 | } |
176 | 176 | ||
177 | #endif /* _S390_CPUTIME_H */ | 177 | #endif /* _S390_CPUTIME_H */ |
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 0bc51d52a899..a547817cf1ab 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h | |||
@@ -67,11 +67,11 @@ | |||
67 | #define __LC_SYNC_ENTER_TIMER 0x248 | 67 | #define __LC_SYNC_ENTER_TIMER 0x248 |
68 | #define __LC_ASYNC_ENTER_TIMER 0x250 | 68 | #define __LC_ASYNC_ENTER_TIMER 0x250 |
69 | #define __LC_EXIT_TIMER 0x258 | 69 | #define __LC_EXIT_TIMER 0x258 |
70 | #define __LC_LAST_UPDATE_TIMER 0x260 | 70 | #define __LC_USER_TIMER 0x260 |
71 | #define __LC_USER_TIMER 0x268 | 71 | #define __LC_SYSTEM_TIMER 0x268 |
72 | #define __LC_SYSTEM_TIMER 0x270 | 72 | #define __LC_STEAL_TIMER 0x270 |
73 | #define __LC_LAST_UPDATE_CLOCK 0x278 | 73 | #define __LC_LAST_UPDATE_TIMER 0x278 |
74 | #define __LC_STEAL_CLOCK 0x280 | 74 | #define __LC_LAST_UPDATE_CLOCK 0x280 |
75 | #define __LC_RETURN_MCCK_PSW 0x288 | 75 | #define __LC_RETURN_MCCK_PSW 0x288 |
76 | #define __LC_KERNEL_STACK 0xC40 | 76 | #define __LC_KERNEL_STACK 0xC40 |
77 | #define __LC_THREAD_INFO 0xC44 | 77 | #define __LC_THREAD_INFO 0xC44 |
@@ -89,11 +89,11 @@ | |||
89 | #define __LC_SYNC_ENTER_TIMER 0x250 | 89 | #define __LC_SYNC_ENTER_TIMER 0x250 |
90 | #define __LC_ASYNC_ENTER_TIMER 0x258 | 90 | #define __LC_ASYNC_ENTER_TIMER 0x258 |
91 | #define __LC_EXIT_TIMER 0x260 | 91 | #define __LC_EXIT_TIMER 0x260 |
92 | #define __LC_LAST_UPDATE_TIMER 0x268 | 92 | #define __LC_USER_TIMER 0x268 |
93 | #define __LC_USER_TIMER 0x270 | 93 | #define __LC_SYSTEM_TIMER 0x270 |
94 | #define __LC_SYSTEM_TIMER 0x278 | 94 | #define __LC_STEAL_TIMER 0x278 |
95 | #define __LC_LAST_UPDATE_CLOCK 0x280 | 95 | #define __LC_LAST_UPDATE_TIMER 0x280 |
96 | #define __LC_STEAL_CLOCK 0x288 | 96 | #define __LC_LAST_UPDATE_CLOCK 0x288 |
97 | #define __LC_RETURN_MCCK_PSW 0x290 | 97 | #define __LC_RETURN_MCCK_PSW 0x290 |
98 | #define __LC_KERNEL_STACK 0xD40 | 98 | #define __LC_KERNEL_STACK 0xD40 |
99 | #define __LC_THREAD_INFO 0xD48 | 99 | #define __LC_THREAD_INFO 0xD48 |
@@ -252,11 +252,11 @@ struct _lowcore | |||
252 | __u64 sync_enter_timer; /* 0x248 */ | 252 | __u64 sync_enter_timer; /* 0x248 */ |
253 | __u64 async_enter_timer; /* 0x250 */ | 253 | __u64 async_enter_timer; /* 0x250 */ |
254 | __u64 exit_timer; /* 0x258 */ | 254 | __u64 exit_timer; /* 0x258 */ |
255 | __u64 last_update_timer; /* 0x260 */ | 255 | __u64 user_timer; /* 0x260 */ |
256 | __u64 user_timer; /* 0x268 */ | 256 | __u64 system_timer; /* 0x268 */ |
257 | __u64 system_timer; /* 0x270 */ | 257 | __u64 steal_timer; /* 0x270 */ |
258 | __u64 last_update_clock; /* 0x278 */ | 258 | __u64 last_update_timer; /* 0x278 */ |
259 | __u64 steal_clock; /* 0x280 */ | 259 | __u64 last_update_clock; /* 0x280 */ |
260 | psw_t return_mcck_psw; /* 0x288 */ | 260 | psw_t return_mcck_psw; /* 0x288 */ |
261 | __u8 pad8[0xc00-0x290]; /* 0x290 */ | 261 | __u8 pad8[0xc00-0x290]; /* 0x290 */ |
262 | 262 | ||
@@ -343,11 +343,11 @@ struct _lowcore | |||
343 | __u64 sync_enter_timer; /* 0x250 */ | 343 | __u64 sync_enter_timer; /* 0x250 */ |
344 | __u64 async_enter_timer; /* 0x258 */ | 344 | __u64 async_enter_timer; /* 0x258 */ |
345 | __u64 exit_timer; /* 0x260 */ | 345 | __u64 exit_timer; /* 0x260 */ |
346 | __u64 last_update_timer; /* 0x268 */ | 346 | __u64 user_timer; /* 0x268 */ |
347 | __u64 user_timer; /* 0x270 */ | 347 | __u64 system_timer; /* 0x270 */ |
348 | __u64 system_timer; /* 0x278 */ | 348 | __u64 steal_timer; /* 0x278 */ |
349 | __u64 last_update_clock; /* 0x280 */ | 349 | __u64 last_update_timer; /* 0x280 */ |
350 | __u64 steal_clock; /* 0x288 */ | 350 | __u64 last_update_clock; /* 0x288 */ |
351 | psw_t return_mcck_psw; /* 0x290 */ | 351 | psw_t return_mcck_psw; /* 0x290 */ |
352 | __u8 pad8[0xc00-0x2a0]; /* 0x2a0 */ | 352 | __u8 pad8[0xc00-0x2a0]; /* 0x2a0 */ |
353 | /* System info area */ | 353 | /* System info area */ |
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 024ef42ed6d7..3a8b26eb1f2e 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -99,7 +99,7 @@ static inline void restore_access_regs(unsigned int *acrs) | |||
99 | prev = __switch_to(prev,next); \ | 99 | prev = __switch_to(prev,next); \ |
100 | } while (0) | 100 | } while (0) |
101 | 101 | ||
102 | extern void account_vtime(struct task_struct *); | 102 | extern void account_vtime(struct task_struct *, struct task_struct *); |
103 | extern void account_tick_vtime(struct task_struct *); | 103 | extern void account_tick_vtime(struct task_struct *); |
104 | extern void account_system_vtime(struct task_struct *); | 104 | extern void account_system_vtime(struct task_struct *); |
105 | 105 | ||
@@ -121,7 +121,7 @@ static inline void cmma_init(void) { } | |||
121 | 121 | ||
122 | #define finish_arch_switch(prev) do { \ | 122 | #define finish_arch_switch(prev) do { \ |
123 | set_fs(current->thread.mm_segment); \ | 123 | set_fs(current->thread.mm_segment); \ |
124 | account_vtime(prev); \ | 124 | account_vtime(prev, current); \ |
125 | } while (0) | 125 | } while (0) |
126 | 126 | ||
127 | #define nop() asm volatile("nop") | 127 | #define nop() asm volatile("nop") |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index c1eaf9604da7..c544aa524535 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
@@ -47,6 +47,8 @@ struct thread_info { | |||
47 | unsigned int cpu; /* current CPU */ | 47 | unsigned int cpu; /* current CPU */ |
48 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 48 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
49 | struct restart_block restart_block; | 49 | struct restart_block restart_block; |
50 | __u64 user_timer; | ||
51 | __u64 system_timer; | ||
50 | }; | 52 | }; |
51 | 53 | ||
52 | /* | 54 | /* |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 4a4a34caec55..1254a4d0d762 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -31,11 +31,10 @@ static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | |||
31 | * Update process times based on virtual cpu times stored by entry.S | 31 | * Update process times based on virtual cpu times stored by entry.S |
32 | * to the lowcore fields user_timer, system_timer & steal_clock. | 32 | * to the lowcore fields user_timer, system_timer & steal_clock. |
33 | */ | 33 | */ |
34 | void account_process_tick(struct task_struct *tsk, int user_tick) | 34 | static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) |
35 | { | 35 | { |
36 | cputime_t cputime; | 36 | struct thread_info *ti = task_thread_info(tsk); |
37 | __u64 timer, clock; | 37 | __u64 timer, clock, user, system, steal; |
38 | int rcu_user_flag; | ||
39 | 38 | ||
40 | timer = S390_lowcore.last_update_timer; | 39 | timer = S390_lowcore.last_update_timer; |
41 | clock = S390_lowcore.last_update_clock; | 40 | clock = S390_lowcore.last_update_clock; |
@@ -44,59 +43,47 @@ void account_process_tick(struct task_struct *tsk, int user_tick) | |||
44 | : "=m" (S390_lowcore.last_update_timer), | 43 | : "=m" (S390_lowcore.last_update_timer), |
45 | "=m" (S390_lowcore.last_update_clock) ); | 44 | "=m" (S390_lowcore.last_update_clock) ); |
46 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 45 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
47 | S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; | 46 | S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; |
48 | 47 | ||
49 | cputime = S390_lowcore.user_timer >> 12; | 48 | user = S390_lowcore.user_timer - ti->user_timer; |
50 | rcu_user_flag = cputime != 0; | 49 | S390_lowcore.steal_timer -= user; |
51 | S390_lowcore.user_timer -= cputime << 12; | 50 | ti->user_timer = S390_lowcore.user_timer; |
52 | S390_lowcore.steal_clock -= cputime << 12; | 51 | account_user_time(tsk, user, user); |
53 | account_user_time(tsk, cputime, cputime); | ||
54 | 52 | ||
55 | cputime = S390_lowcore.system_timer >> 12; | 53 | system = S390_lowcore.system_timer - ti->system_timer; |
56 | S390_lowcore.system_timer -= cputime << 12; | 54 | S390_lowcore.steal_timer -= system; |
57 | S390_lowcore.steal_clock -= cputime << 12; | 55 | ti->system_timer = S390_lowcore.system_timer; |
58 | if (idle_task(smp_processor_id()) != current) | 56 | if (idle_task(smp_processor_id()) != current) |
59 | account_system_time(tsk, HARDIRQ_OFFSET, cputime, cputime); | 57 | account_system_time(tsk, hardirq_offset, system, system); |
60 | else | 58 | else |
61 | account_idle_time(cputime); | 59 | account_idle_time(system); |
62 | 60 | ||
63 | cputime = S390_lowcore.steal_clock; | 61 | steal = S390_lowcore.steal_timer; |
64 | if ((__s64) cputime > 0) { | 62 | if ((s64) steal > 0) { |
65 | cputime >>= 12; | 63 | S390_lowcore.steal_timer = 0; |
66 | S390_lowcore.steal_clock -= cputime << 12; | ||
67 | if (idle_task(smp_processor_id()) != current) | 64 | if (idle_task(smp_processor_id()) != current) |
68 | account_steal_time(cputime); | 65 | account_steal_time(steal); |
69 | else | 66 | else |
70 | account_idle_time(cputime); | 67 | account_idle_time(steal); |
71 | } | 68 | } |
72 | } | 69 | } |
73 | 70 | ||
74 | /* | 71 | void account_vtime(struct task_struct *prev, struct task_struct *next) |
75 | * Update process times based on virtual cpu times stored by entry.S | ||
76 | * to the lowcore fields user_timer, system_timer & steal_clock. | ||
77 | */ | ||
78 | void account_vtime(struct task_struct *tsk) | ||
79 | { | 72 | { |
80 | cputime_t cputime; | 73 | struct thread_info *ti; |
81 | __u64 timer; | 74 | |
82 | 75 | do_account_vtime(prev, 0); | |
83 | timer = S390_lowcore.last_update_timer; | 76 | ti = task_thread_info(prev); |
84 | asm volatile (" STPT %0" /* Store current cpu timer value */ | 77 | ti->user_timer = S390_lowcore.user_timer; |
85 | : "=m" (S390_lowcore.last_update_timer) ); | 78 | ti->system_timer = S390_lowcore.system_timer; |
86 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 79 | ti = task_thread_info(next); |
87 | 80 | S390_lowcore.user_timer = ti->user_timer; | |
88 | cputime = S390_lowcore.user_timer >> 12; | 81 | S390_lowcore.system_timer = ti->system_timer; |
89 | S390_lowcore.user_timer -= cputime << 12; | 82 | } |
90 | S390_lowcore.steal_clock -= cputime << 12; | ||
91 | account_user_time(tsk, cputime, cputime); | ||
92 | 83 | ||
93 | cputime = S390_lowcore.system_timer >> 12; | 84 | void account_process_tick(struct task_struct *tsk, int user_tick) |
94 | S390_lowcore.system_timer -= cputime << 12; | 85 | { |
95 | S390_lowcore.steal_clock -= cputime << 12; | 86 | do_account_vtime(tsk, HARDIRQ_OFFSET); |
96 | if (idle_task(smp_processor_id()) != current) | ||
97 | account_system_time(tsk, 0, cputime, cputime); | ||
98 | else | ||
99 | account_idle_time(cputime); | ||
100 | } | 87 | } |
101 | 88 | ||
102 | /* | 89 | /* |
@@ -105,21 +92,21 @@ void account_vtime(struct task_struct *tsk) | |||
105 | */ | 92 | */ |
106 | void account_system_vtime(struct task_struct *tsk) | 93 | void account_system_vtime(struct task_struct *tsk) |
107 | { | 94 | { |
108 | cputime_t cputime; | 95 | struct thread_info *ti = task_thread_info(tsk); |
109 | __u64 timer; | 96 | __u64 timer, system; |
110 | 97 | ||
111 | timer = S390_lowcore.last_update_timer; | 98 | timer = S390_lowcore.last_update_timer; |
112 | asm volatile (" STPT %0" /* Store current cpu timer value */ | 99 | asm volatile (" STPT %0" /* Store current cpu timer value */ |
113 | : "=m" (S390_lowcore.last_update_timer) ); | 100 | : "=m" (S390_lowcore.last_update_timer) ); |
114 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; | 101 | S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; |
115 | 102 | ||
116 | cputime = S390_lowcore.system_timer >> 12; | 103 | system = S390_lowcore.system_timer - ti->system_timer; |
117 | S390_lowcore.system_timer -= cputime << 12; | 104 | S390_lowcore.steal_timer -= system; |
118 | S390_lowcore.steal_clock -= cputime << 12; | 105 | ti->system_timer = S390_lowcore.system_timer; |
119 | if (in_irq() || idle_task(smp_processor_id()) != current) | 106 | if (in_irq() || idle_task(smp_processor_id()) != current) |
120 | account_system_time(tsk, 0, cputime, cputime); | 107 | account_system_time(tsk, 0, system, system); |
121 | else | 108 | else |
122 | account_idle_time(cputime); | 109 | account_idle_time(system); |
123 | } | 110 | } |
124 | EXPORT_SYMBOL_GPL(account_system_vtime); | 111 | EXPORT_SYMBOL_GPL(account_system_vtime); |
125 | 112 | ||
@@ -490,8 +477,8 @@ void init_cpu_vtimer(void) | |||
490 | /* kick the virtual timer */ | 477 | /* kick the virtual timer */ |
491 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; | 478 | S390_lowcore.exit_timer = VTIMER_MAX_SLICE; |
492 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; | 479 | S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; |
493 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
494 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); | 480 | asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); |
481 | asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); | ||
495 | 482 | ||
496 | /* enable cpu timer interrupts */ | 483 | /* enable cpu timer interrupts */ |
497 | __ctl_set_bit(0,10); | 484 | __ctl_set_bit(0,10); |