diff options
-rw-r--r-- | arch/ia64/include/asm/cputime.h | 69 | ||||
-rw-r--r-- | arch/powerpc/include/asm/cputime.h | 70 | ||||
-rw-r--r-- | arch/s390/include/asm/cputime.h | 140 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 29 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 33 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_stats.c | 5 | ||||
-rw-r--r-- | drivers/macintosh/rack-meter.c | 11 | ||||
-rw-r--r-- | fs/proc/array.c | 8 | ||||
-rw-r--r-- | fs/proc/stat.c | 27 | ||||
-rw-r--r-- | fs/proc/uptime.c | 4 | ||||
-rw-r--r-- | include/asm-generic/cputime.h | 62 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/acct.c | 4 | ||||
-rw-r--r-- | kernel/cpu.c | 3 | ||||
-rw-r--r-- | kernel/exit.c | 22 | ||||
-rw-r--r-- | kernel/fork.c | 14 | ||||
-rw-r--r-- | kernel/itimer.c | 15 | ||||
-rw-r--r-- | kernel/posix-cpu-timers.c | 132 | ||||
-rw-r--r-- | kernel/sched.c | 80 | ||||
-rw-r--r-- | kernel/sched_stats.h | 6 | ||||
-rw-r--r-- | kernel/signal.c | 6 | ||||
-rw-r--r-- | kernel/sys.c | 6 | ||||
-rw-r--r-- | kernel/tsacct.c | 2 |
23 files changed, 323 insertions, 429 deletions
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index 6073b187528a..461e52f0277f 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h | |||
@@ -26,59 +26,51 @@ | |||
26 | #include <linux/jiffies.h> | 26 | #include <linux/jiffies.h> |
27 | #include <asm/processor.h> | 27 | #include <asm/processor.h> |
28 | 28 | ||
29 | typedef u64 cputime_t; | 29 | typedef u64 __nocast cputime_t; |
30 | typedef u64 cputime64_t; | 30 | typedef u64 __nocast cputime64_t; |
31 | 31 | ||
32 | #define cputime_zero ((cputime_t)0) | ||
33 | #define cputime_one_jiffy jiffies_to_cputime(1) | 32 | #define cputime_one_jiffy jiffies_to_cputime(1) |
34 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) | ||
35 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
36 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
37 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
38 | #define cputime_halve(__a) ((__a) >> 1) | ||
39 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
40 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
41 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
42 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
43 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
44 | |||
45 | #define cputime64_zero ((cputime64_t)0) | ||
46 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
47 | #define cputime64_sub(__a, __b) ((__a) - (__b)) | ||
48 | #define cputime_to_cputime64(__ct) (__ct) | ||
49 | 33 | ||
50 | /* | 34 | /* |
51 | * Convert cputime <-> jiffies (HZ) | 35 | * Convert cputime <-> jiffies (HZ) |
52 | */ | 36 | */ |
53 | #define cputime_to_jiffies(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | 37 | #define cputime_to_jiffies(__ct) \ |
54 | #define jiffies_to_cputime(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | 38 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) |
55 | #define cputime64_to_jiffies64(__ct) ((__ct) / (NSEC_PER_SEC / HZ)) | 39 | #define jiffies_to_cputime(__jif) \ |
56 | #define jiffies64_to_cputime64(__jif) ((__jif) * (NSEC_PER_SEC / HZ)) | 40 | (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ)) |
41 | #define cputime64_to_jiffies64(__ct) \ | ||
42 | ((__force u64)(__ct) / (NSEC_PER_SEC / HZ)) | ||
43 | #define jiffies64_to_cputime64(__jif) \ | ||
44 | (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ)) | ||
57 | 45 | ||
58 | /* | 46 | /* |
59 | * Convert cputime <-> microseconds | 47 | * Convert cputime <-> microseconds |
60 | */ | 48 | */ |
61 | #define cputime_to_usecs(__ct) ((__ct) / NSEC_PER_USEC) | 49 | #define cputime_to_usecs(__ct) \ |
62 | #define usecs_to_cputime(__usecs) ((__usecs) * NSEC_PER_USEC) | 50 | ((__force u64)(__ct) / NSEC_PER_USEC) |
51 | #define usecs_to_cputime(__usecs) \ | ||
52 | (__force cputime_t)((__usecs) * NSEC_PER_USEC) | ||
63 | 53 | ||
64 | /* | 54 | /* |
65 | * Convert cputime <-> seconds | 55 | * Convert cputime <-> seconds |
66 | */ | 56 | */ |
67 | #define cputime_to_secs(__ct) ((__ct) / NSEC_PER_SEC) | 57 | #define cputime_to_secs(__ct) \ |
68 | #define secs_to_cputime(__secs) ((__secs) * NSEC_PER_SEC) | 58 | ((__force u64)(__ct) / NSEC_PER_SEC) |
59 | #define secs_to_cputime(__secs) \ | ||
60 | (__force cputime_t)((__secs) * NSEC_PER_SEC) | ||
69 | 61 | ||
70 | /* | 62 | /* |
71 | * Convert cputime <-> timespec (nsec) | 63 | * Convert cputime <-> timespec (nsec) |
72 | */ | 64 | */ |
73 | static inline cputime_t timespec_to_cputime(const struct timespec *val) | 65 | static inline cputime_t timespec_to_cputime(const struct timespec *val) |
74 | { | 66 | { |
75 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | 67 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec; |
76 | return (ret + val->tv_nsec); | 68 | return (__force cputime_t) ret; |
77 | } | 69 | } |
78 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | 70 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) |
79 | { | 71 | { |
80 | val->tv_sec = ct / NSEC_PER_SEC; | 72 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; |
81 | val->tv_nsec = ct % NSEC_PER_SEC; | 73 | val->tv_nsec = (__force u64) ct % NSEC_PER_SEC; |
82 | } | 74 | } |
83 | 75 | ||
84 | /* | 76 | /* |
@@ -86,25 +78,28 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val) | |||
86 | */ | 78 | */ |
87 | static inline cputime_t timeval_to_cputime(struct timeval *val) | 79 | static inline cputime_t timeval_to_cputime(struct timeval *val) |
88 | { | 80 | { |
89 | cputime_t ret = val->tv_sec * NSEC_PER_SEC; | 81 | u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC; |
90 | return (ret + val->tv_usec * NSEC_PER_USEC); | 82 | return (__force cputime_t) ret; |
91 | } | 83 | } |
92 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) | 84 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val) |
93 | { | 85 | { |
94 | val->tv_sec = ct / NSEC_PER_SEC; | 86 | val->tv_sec = (__force u64) ct / NSEC_PER_SEC; |
95 | val->tv_usec = (ct % NSEC_PER_SEC) / NSEC_PER_USEC; | 87 | val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC; |
96 | } | 88 | } |
97 | 89 | ||
98 | /* | 90 | /* |
99 | * Convert cputime <-> clock (USER_HZ) | 91 | * Convert cputime <-> clock (USER_HZ) |
100 | */ | 92 | */ |
101 | #define cputime_to_clock_t(__ct) ((__ct) / (NSEC_PER_SEC / USER_HZ)) | 93 | #define cputime_to_clock_t(__ct) \ |
102 | #define clock_t_to_cputime(__x) ((__x) * (NSEC_PER_SEC / USER_HZ)) | 94 | ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ)) |
95 | #define clock_t_to_cputime(__x) \ | ||
96 | (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ)) | ||
103 | 97 | ||
104 | /* | 98 | /* |
105 | * Convert cputime64 to clock. | 99 | * Convert cputime64 to clock. |
106 | */ | 100 | */ |
107 | #define cputime64_to_clock_t(__ct) cputime_to_clock_t((cputime_t)__ct) | 101 | #define cputime64_to_clock_t(__ct) \ |
102 | cputime_to_clock_t((__force cputime_t)__ct) | ||
108 | 103 | ||
109 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | 104 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ |
110 | #endif /* __IA64_CPUTIME_H */ | 105 | #endif /* __IA64_CPUTIME_H */ |
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 1cf20bdfbeca..e94935c52019 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h | |||
@@ -29,25 +29,8 @@ static inline void setup_cputime_one_jiffy(void) { } | |||
29 | #include <asm/time.h> | 29 | #include <asm/time.h> |
30 | #include <asm/param.h> | 30 | #include <asm/param.h> |
31 | 31 | ||
32 | typedef u64 cputime_t; | 32 | typedef u64 __nocast cputime_t; |
33 | typedef u64 cputime64_t; | 33 | typedef u64 __nocast cputime64_t; |
34 | |||
35 | #define cputime_zero ((cputime_t)0) | ||
36 | #define cputime_max ((~((cputime_t)0) >> 1) - 1) | ||
37 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
38 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
39 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
40 | #define cputime_halve(__a) ((__a) >> 1) | ||
41 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
42 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
43 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
44 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
45 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
46 | |||
47 | #define cputime64_zero ((cputime64_t)0) | ||
48 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
49 | #define cputime64_sub(__a, __b) ((__a) - (__b)) | ||
50 | #define cputime_to_cputime64(__ct) (__ct) | ||
51 | 34 | ||
52 | #ifdef __KERNEL__ | 35 | #ifdef __KERNEL__ |
53 | 36 | ||
@@ -65,7 +48,7 @@ DECLARE_PER_CPU(unsigned long, cputime_scaled_last_delta); | |||
65 | 48 | ||
66 | static inline unsigned long cputime_to_jiffies(const cputime_t ct) | 49 | static inline unsigned long cputime_to_jiffies(const cputime_t ct) |
67 | { | 50 | { |
68 | return mulhdu(ct, __cputime_jiffies_factor); | 51 | return mulhdu((__force u64) ct, __cputime_jiffies_factor); |
69 | } | 52 | } |
70 | 53 | ||
71 | /* Estimate the scaled cputime by scaling the real cputime based on | 54 | /* Estimate the scaled cputime by scaling the real cputime based on |
@@ -74,14 +57,15 @@ static inline cputime_t cputime_to_scaled(const cputime_t ct) | |||
74 | { | 57 | { |
75 | if (cpu_has_feature(CPU_FTR_SPURR) && | 58 | if (cpu_has_feature(CPU_FTR_SPURR) && |
76 | __get_cpu_var(cputime_last_delta)) | 59 | __get_cpu_var(cputime_last_delta)) |
77 | return ct * __get_cpu_var(cputime_scaled_last_delta) / | 60 | return (__force u64) ct * |
78 | __get_cpu_var(cputime_last_delta); | 61 | __get_cpu_var(cputime_scaled_last_delta) / |
62 | __get_cpu_var(cputime_last_delta); | ||
79 | return ct; | 63 | return ct; |
80 | } | 64 | } |
81 | 65 | ||
82 | static inline cputime_t jiffies_to_cputime(const unsigned long jif) | 66 | static inline cputime_t jiffies_to_cputime(const unsigned long jif) |
83 | { | 67 | { |
84 | cputime_t ct; | 68 | u64 ct; |
85 | unsigned long sec; | 69 | unsigned long sec; |
86 | 70 | ||
87 | /* have to be a little careful about overflow */ | 71 | /* have to be a little careful about overflow */ |
@@ -93,7 +77,7 @@ static inline cputime_t jiffies_to_cputime(const unsigned long jif) | |||
93 | } | 77 | } |
94 | if (sec) | 78 | if (sec) |
95 | ct += (cputime_t) sec * tb_ticks_per_sec; | 79 | ct += (cputime_t) sec * tb_ticks_per_sec; |
96 | return ct; | 80 | return (__force cputime_t) ct; |
97 | } | 81 | } |
98 | 82 | ||
99 | static inline void setup_cputime_one_jiffy(void) | 83 | static inline void setup_cputime_one_jiffy(void) |
@@ -103,7 +87,7 @@ static inline void setup_cputime_one_jiffy(void) | |||
103 | 87 | ||
104 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) | 88 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) |
105 | { | 89 | { |
106 | cputime_t ct; | 90 | u64 ct; |
107 | u64 sec; | 91 | u64 sec; |
108 | 92 | ||
109 | /* have to be a little careful about overflow */ | 93 | /* have to be a little careful about overflow */ |
@@ -114,13 +98,13 @@ static inline cputime64_t jiffies64_to_cputime64(const u64 jif) | |||
114 | do_div(ct, HZ); | 98 | do_div(ct, HZ); |
115 | } | 99 | } |
116 | if (sec) | 100 | if (sec) |
117 | ct += (cputime_t) sec * tb_ticks_per_sec; | 101 | ct += (u64) sec * tb_ticks_per_sec; |
118 | return ct; | 102 | return (__force cputime64_t) ct; |
119 | } | 103 | } |
120 | 104 | ||
121 | static inline u64 cputime64_to_jiffies64(const cputime_t ct) | 105 | static inline u64 cputime64_to_jiffies64(const cputime_t ct) |
122 | { | 106 | { |
123 | return mulhdu(ct, __cputime_jiffies_factor); | 107 | return mulhdu((__force u64) ct, __cputime_jiffies_factor); |
124 | } | 108 | } |
125 | 109 | ||
126 | /* | 110 | /* |
@@ -130,12 +114,12 @@ extern u64 __cputime_msec_factor; | |||
130 | 114 | ||
131 | static inline unsigned long cputime_to_usecs(const cputime_t ct) | 115 | static inline unsigned long cputime_to_usecs(const cputime_t ct) |
132 | { | 116 | { |
133 | return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; | 117 | return mulhdu((__force u64) ct, __cputime_msec_factor) * USEC_PER_MSEC; |
134 | } | 118 | } |
135 | 119 | ||
136 | static inline cputime_t usecs_to_cputime(const unsigned long us) | 120 | static inline cputime_t usecs_to_cputime(const unsigned long us) |
137 | { | 121 | { |
138 | cputime_t ct; | 122 | u64 ct; |
139 | unsigned long sec; | 123 | unsigned long sec; |
140 | 124 | ||
141 | /* have to be a little careful about overflow */ | 125 | /* have to be a little careful about overflow */ |
@@ -147,7 +131,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us) | |||
147 | } | 131 | } |
148 | if (sec) | 132 | if (sec) |
149 | ct += (cputime_t) sec * tb_ticks_per_sec; | 133 | ct += (cputime_t) sec * tb_ticks_per_sec; |
150 | return ct; | 134 | return (__force cputime_t) ct; |
151 | } | 135 | } |
152 | 136 | ||
153 | /* | 137 | /* |
@@ -157,12 +141,12 @@ extern u64 __cputime_sec_factor; | |||
157 | 141 | ||
158 | static inline unsigned long cputime_to_secs(const cputime_t ct) | 142 | static inline unsigned long cputime_to_secs(const cputime_t ct) |
159 | { | 143 | { |
160 | return mulhdu(ct, __cputime_sec_factor); | 144 | return mulhdu((__force u64) ct, __cputime_sec_factor); |
161 | } | 145 | } |
162 | 146 | ||
163 | static inline cputime_t secs_to_cputime(const unsigned long sec) | 147 | static inline cputime_t secs_to_cputime(const unsigned long sec) |
164 | { | 148 | { |
165 | return (cputime_t) sec * tb_ticks_per_sec; | 149 | return (__force cputime_t)((u64) sec * tb_ticks_per_sec); |
166 | } | 150 | } |
167 | 151 | ||
168 | /* | 152 | /* |
@@ -170,7 +154,7 @@ static inline cputime_t secs_to_cputime(const unsigned long sec) | |||
170 | */ | 154 | */ |
171 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) | 155 | static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) |
172 | { | 156 | { |
173 | u64 x = ct; | 157 | u64 x = (__force u64) ct; |
174 | unsigned int frac; | 158 | unsigned int frac; |
175 | 159 | ||
176 | frac = do_div(x, tb_ticks_per_sec); | 160 | frac = do_div(x, tb_ticks_per_sec); |
@@ -182,11 +166,11 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *p) | |||
182 | 166 | ||
183 | static inline cputime_t timespec_to_cputime(const struct timespec *p) | 167 | static inline cputime_t timespec_to_cputime(const struct timespec *p) |
184 | { | 168 | { |
185 | cputime_t ct; | 169 | u64 ct; |
186 | 170 | ||
187 | ct = (u64) p->tv_nsec * tb_ticks_per_sec; | 171 | ct = (u64) p->tv_nsec * tb_ticks_per_sec; |
188 | do_div(ct, 1000000000); | 172 | do_div(ct, 1000000000); |
189 | return ct + (u64) p->tv_sec * tb_ticks_per_sec; | 173 | return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); |
190 | } | 174 | } |
191 | 175 | ||
192 | /* | 176 | /* |
@@ -194,7 +178,7 @@ static inline cputime_t timespec_to_cputime(const struct timespec *p) | |||
194 | */ | 178 | */ |
195 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) | 179 | static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) |
196 | { | 180 | { |
197 | u64 x = ct; | 181 | u64 x = (__force u64) ct; |
198 | unsigned int frac; | 182 | unsigned int frac; |
199 | 183 | ||
200 | frac = do_div(x, tb_ticks_per_sec); | 184 | frac = do_div(x, tb_ticks_per_sec); |
@@ -206,11 +190,11 @@ static inline void cputime_to_timeval(const cputime_t ct, struct timeval *p) | |||
206 | 190 | ||
207 | static inline cputime_t timeval_to_cputime(const struct timeval *p) | 191 | static inline cputime_t timeval_to_cputime(const struct timeval *p) |
208 | { | 192 | { |
209 | cputime_t ct; | 193 | u64 ct; |
210 | 194 | ||
211 | ct = (u64) p->tv_usec * tb_ticks_per_sec; | 195 | ct = (u64) p->tv_usec * tb_ticks_per_sec; |
212 | do_div(ct, 1000000); | 196 | do_div(ct, 1000000); |
213 | return ct + (u64) p->tv_sec * tb_ticks_per_sec; | 197 | return (__force cputime_t)(ct + (u64) p->tv_sec * tb_ticks_per_sec); |
214 | } | 198 | } |
215 | 199 | ||
216 | /* | 200 | /* |
@@ -220,12 +204,12 @@ extern u64 __cputime_clockt_factor; | |||
220 | 204 | ||
221 | static inline unsigned long cputime_to_clock_t(const cputime_t ct) | 205 | static inline unsigned long cputime_to_clock_t(const cputime_t ct) |
222 | { | 206 | { |
223 | return mulhdu(ct, __cputime_clockt_factor); | 207 | return mulhdu((__force u64) ct, __cputime_clockt_factor); |
224 | } | 208 | } |
225 | 209 | ||
226 | static inline cputime_t clock_t_to_cputime(const unsigned long clk) | 210 | static inline cputime_t clock_t_to_cputime(const unsigned long clk) |
227 | { | 211 | { |
228 | cputime_t ct; | 212 | u64 ct; |
229 | unsigned long sec; | 213 | unsigned long sec; |
230 | 214 | ||
231 | /* have to be a little careful about overflow */ | 215 | /* have to be a little careful about overflow */ |
@@ -236,8 +220,8 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk) | |||
236 | do_div(ct, USER_HZ); | 220 | do_div(ct, USER_HZ); |
237 | } | 221 | } |
238 | if (sec) | 222 | if (sec) |
239 | ct += (cputime_t) sec * tb_ticks_per_sec; | 223 | ct += (u64) sec * tb_ticks_per_sec; |
240 | return ct; | 224 | return (__force cputime_t) ct; |
241 | } | 225 | } |
242 | 226 | ||
243 | #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) | 227 | #define cputime64_to_clock_t(ct) cputime_to_clock_t((cputime_t)(ct)) |
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index 081434878296..0887a0463e33 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h | |||
@@ -16,114 +16,98 @@ | |||
16 | 16 | ||
17 | /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ | 17 | /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ |
18 | 18 | ||
19 | typedef unsigned long long cputime_t; | 19 | typedef unsigned long long __nocast cputime_t; |
20 | typedef unsigned long long cputime64_t; | 20 | typedef unsigned long long __nocast cputime64_t; |
21 | 21 | ||
22 | #ifndef __s390x__ | 22 | static inline unsigned long __div(unsigned long long n, unsigned long base) |
23 | |||
24 | static inline unsigned int | ||
25 | __div(unsigned long long n, unsigned int base) | ||
26 | { | 23 | { |
24 | #ifndef __s390x__ | ||
27 | register_pair rp; | 25 | register_pair rp; |
28 | 26 | ||
29 | rp.pair = n >> 1; | 27 | rp.pair = n >> 1; |
30 | asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); | 28 | asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); |
31 | return rp.subreg.odd; | 29 | return rp.subreg.odd; |
30 | #else /* __s390x__ */ | ||
31 | return n / base; | ||
32 | #endif /* __s390x__ */ | ||
32 | } | 33 | } |
33 | 34 | ||
34 | #else /* __s390x__ */ | 35 | #define cputime_one_jiffy jiffies_to_cputime(1) |
35 | 36 | ||
36 | static inline unsigned int | 37 | /* |
37 | __div(unsigned long long n, unsigned int base) | 38 | * Convert cputime to jiffies and back. |
39 | */ | ||
40 | static inline unsigned long cputime_to_jiffies(const cputime_t cputime) | ||
38 | { | 41 | { |
39 | return n / base; | 42 | return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); |
40 | } | 43 | } |
41 | 44 | ||
42 | #endif /* __s390x__ */ | 45 | static inline cputime_t jiffies_to_cputime(const unsigned int jif) |
46 | { | ||
47 | return (__force cputime_t)(jif * (4096000000ULL / HZ)); | ||
48 | } | ||
43 | 49 | ||
44 | #define cputime_zero (0ULL) | 50 | static inline u64 cputime64_to_jiffies64(cputime64_t cputime) |
45 | #define cputime_one_jiffy jiffies_to_cputime(1) | 51 | { |
46 | #define cputime_max ((~0UL >> 1) - 1) | 52 | unsigned long long jif = (__force unsigned long long) cputime; |
47 | #define cputime_add(__a, __b) ((__a) + (__b)) | 53 | do_div(jif, 4096000000ULL / HZ); |
48 | #define cputime_sub(__a, __b) ((__a) - (__b)) | 54 | return jif; |
49 | #define cputime_div(__a, __n) ({ \ | 55 | } |
50 | unsigned long long __div = (__a); \ | 56 | |
51 | do_div(__div,__n); \ | 57 | static inline cputime64_t jiffies64_to_cputime64(const u64 jif) |
52 | __div; \ | 58 | { |
53 | }) | 59 | return (__force cputime64_t)(jif * (4096000000ULL / HZ)); |
54 | #define cputime_halve(__a) ((__a) >> 1) | ||
55 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
56 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
57 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
58 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
59 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
60 | #define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ)) | ||
61 | #define cputime_to_scaled(__ct) (__ct) | ||
62 | #define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ)) | ||
63 | |||
64 | #define cputime64_zero (0ULL) | ||
65 | #define cputime64_add(__a, __b) ((__a) + (__b)) | ||
66 | #define cputime_to_cputime64(__ct) (__ct) | ||
67 | |||
68 | static inline u64 | ||
69 | cputime64_to_jiffies64(cputime64_t cputime) | ||
70 | { | ||
71 | do_div(cputime, 4096000000ULL / HZ); | ||
72 | return cputime; | ||
73 | } | 60 | } |
74 | 61 | ||
75 | /* | 62 | /* |
76 | * Convert cputime to microseconds and back. | 63 | * Convert cputime to microseconds and back. |
77 | */ | 64 | */ |
78 | static inline unsigned int | 65 | static inline unsigned int cputime_to_usecs(const cputime_t cputime) |
79 | cputime_to_usecs(const cputime_t cputime) | ||
80 | { | 66 | { |
81 | return cputime_div(cputime, 4096); | 67 | return (__force unsigned long long) cputime >> 12; |
82 | } | 68 | } |
83 | 69 | ||
84 | static inline cputime_t | 70 | static inline cputime_t usecs_to_cputime(const unsigned int m) |
85 | usecs_to_cputime(const unsigned int m) | ||
86 | { | 71 | { |
87 | return (cputime_t) m * 4096; | 72 | return (__force cputime_t)(m * 4096ULL); |
88 | } | 73 | } |
89 | 74 | ||
90 | /* | 75 | /* |
91 | * Convert cputime to milliseconds and back. | 76 | * Convert cputime to milliseconds and back. |
92 | */ | 77 | */ |
93 | static inline unsigned int | 78 | static inline unsigned int cputime_to_secs(const cputime_t cputime) |
94 | cputime_to_secs(const cputime_t cputime) | ||
95 | { | 79 | { |
96 | return __div(cputime, 2048000000) >> 1; | 80 | return __div((__force unsigned long long) cputime, 2048000000) >> 1; |
97 | } | 81 | } |
98 | 82 | ||
99 | static inline cputime_t | 83 | static inline cputime_t secs_to_cputime(const unsigned int s) |
100 | secs_to_cputime(const unsigned int s) | ||
101 | { | 84 | { |
102 | return (cputime_t) s * 4096000000ULL; | 85 | return (__force cputime_t)(s * 4096000000ULL); |
103 | } | 86 | } |
104 | 87 | ||
105 | /* | 88 | /* |
106 | * Convert cputime to timespec and back. | 89 | * Convert cputime to timespec and back. |
107 | */ | 90 | */ |
108 | static inline cputime_t | 91 | static inline cputime_t timespec_to_cputime(const struct timespec *value) |
109 | timespec_to_cputime(const struct timespec *value) | ||
110 | { | 92 | { |
111 | return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL; | 93 | unsigned long long ret = value->tv_sec * 4096000000ULL; |
94 | return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); | ||
112 | } | 95 | } |
113 | 96 | ||
114 | static inline void | 97 | static inline void cputime_to_timespec(const cputime_t cputime, |
115 | cputime_to_timespec(const cputime_t cputime, struct timespec *value) | 98 | struct timespec *value) |
116 | { | 99 | { |
100 | unsigned long long __cputime = (__force unsigned long long) cputime; | ||
117 | #ifndef __s390x__ | 101 | #ifndef __s390x__ |
118 | register_pair rp; | 102 | register_pair rp; |
119 | 103 | ||
120 | rp.pair = cputime >> 1; | 104 | rp.pair = __cputime >> 1; |
121 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); | 105 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); |
122 | value->tv_nsec = rp.subreg.even * 1000 / 4096; | 106 | value->tv_nsec = rp.subreg.even * 1000 / 4096; |
123 | value->tv_sec = rp.subreg.odd; | 107 | value->tv_sec = rp.subreg.odd; |
124 | #else | 108 | #else |
125 | value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096; | 109 | value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; |
126 | value->tv_sec = cputime / 4096000000ULL; | 110 | value->tv_sec = __cputime / 4096000000ULL; |
127 | #endif | 111 | #endif |
128 | } | 112 | } |
129 | 113 | ||
@@ -132,50 +116,52 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value) | |||
132 | * Since cputime and timeval have the same resolution (microseconds) | 116 | * Since cputime and timeval have the same resolution (microseconds) |
133 | * this is easy. | 117 | * this is easy. |
134 | */ | 118 | */ |
135 | static inline cputime_t | 119 | static inline cputime_t timeval_to_cputime(const struct timeval *value) |
136 | timeval_to_cputime(const struct timeval *value) | ||
137 | { | 120 | { |
138 | return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL; | 121 | unsigned long long ret = value->tv_sec * 4096000000ULL; |
122 | return (__force cputime_t)(ret + value->tv_usec * 4096ULL); | ||
139 | } | 123 | } |
140 | 124 | ||
141 | static inline void | 125 | static inline void cputime_to_timeval(const cputime_t cputime, |
142 | cputime_to_timeval(const cputime_t cputime, struct timeval *value) | 126 | struct timeval *value) |
143 | { | 127 | { |
128 | unsigned long long __cputime = (__force unsigned long long) cputime; | ||
144 | #ifndef __s390x__ | 129 | #ifndef __s390x__ |
145 | register_pair rp; | 130 | register_pair rp; |
146 | 131 | ||
147 | rp.pair = cputime >> 1; | 132 | rp.pair = __cputime >> 1; |
148 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); | 133 | asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); |
149 | value->tv_usec = rp.subreg.even / 4096; | 134 | value->tv_usec = rp.subreg.even / 4096; |
150 | value->tv_sec = rp.subreg.odd; | 135 | value->tv_sec = rp.subreg.odd; |
151 | #else | 136 | #else |
152 | value->tv_usec = (cputime % 4096000000ULL) / 4096; | 137 | value->tv_usec = (__cputime % 4096000000ULL) / 4096; |
153 | value->tv_sec = cputime / 4096000000ULL; | 138 | value->tv_sec = __cputime / 4096000000ULL; |
154 | #endif | 139 | #endif |
155 | } | 140 | } |
156 | 141 | ||
157 | /* | 142 | /* |
158 | * Convert cputime to clock and back. | 143 | * Convert cputime to clock and back. |
159 | */ | 144 | */ |
160 | static inline clock_t | 145 | static inline clock_t cputime_to_clock_t(cputime_t cputime) |
161 | cputime_to_clock_t(cputime_t cputime) | ||
162 | { | 146 | { |
163 | return cputime_div(cputime, 4096000000ULL / USER_HZ); | 147 | unsigned long long clock = (__force unsigned long long) cputime; |
148 | do_div(clock, 4096000000ULL / USER_HZ); | ||
149 | return clock; | ||
164 | } | 150 | } |
165 | 151 | ||
166 | static inline cputime_t | 152 | static inline cputime_t clock_t_to_cputime(unsigned long x) |
167 | clock_t_to_cputime(unsigned long x) | ||
168 | { | 153 | { |
169 | return (cputime_t) x * (4096000000ULL / USER_HZ); | 154 | return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); |
170 | } | 155 | } |
171 | 156 | ||
172 | /* | 157 | /* |
173 | * Convert cputime64 to clock. | 158 | * Convert cputime64 to clock. |
174 | */ | 159 | */ |
175 | static inline clock_t | 160 | static inline clock_t cputime64_to_clock_t(cputime64_t cputime) |
176 | cputime64_to_clock_t(cputime64_t cputime) | ||
177 | { | 161 | { |
178 | return cputime_div(cputime, 4096000000ULL / USER_HZ); | 162 | unsigned long long clock = (__force unsigned long long) cputime; |
163 | do_div(clock, 4096000000ULL / USER_HZ); | ||
164 | return clock; | ||
179 | } | 165 | } |
180 | 166 | ||
181 | struct s390_idle_data { | 167 | struct s390_idle_data { |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index c97b468ee9f7..7f31a031c0b5 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -103,15 +103,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | |||
103 | cputime64_t busy_time; | 103 | cputime64_t busy_time; |
104 | 104 | ||
105 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 105 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
106 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 106 | busy_time = kstat_cpu(cpu).cpustat.user; |
107 | kstat_cpu(cpu).cpustat.system); | 107 | busy_time += kstat_cpu(cpu).cpustat.system; |
108 | 108 | busy_time += kstat_cpu(cpu).cpustat.irq; | |
109 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); | 109 | busy_time += kstat_cpu(cpu).cpustat.softirq; |
110 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); | 110 | busy_time += kstat_cpu(cpu).cpustat.steal; |
111 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | 111 | busy_time += kstat_cpu(cpu).cpustat.nice; |
112 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); | 112 | |
113 | 113 | idle_time = cur_wall_time - busy_time; | |
114 | idle_time = cputime64_sub(cur_wall_time, busy_time); | ||
115 | if (wall) | 114 | if (wall) |
116 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); | 115 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); |
117 | 116 | ||
@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
353 | 352 | ||
354 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | 353 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
355 | 354 | ||
356 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | 355 | wall_time = (unsigned int) |
357 | j_dbs_info->prev_cpu_wall); | 356 | (cur_wall_time - j_dbs_info->prev_cpu_wall); |
358 | j_dbs_info->prev_cpu_wall = cur_wall_time; | 357 | j_dbs_info->prev_cpu_wall = cur_wall_time; |
359 | 358 | ||
360 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | 359 | idle_time = (unsigned int) |
361 | j_dbs_info->prev_cpu_idle); | 360 | (cur_idle_time - j_dbs_info->prev_cpu_idle); |
362 | j_dbs_info->prev_cpu_idle = cur_idle_time; | 361 | j_dbs_info->prev_cpu_idle = cur_idle_time; |
363 | 362 | ||
364 | if (dbs_tuners_ins.ignore_nice) { | 363 | if (dbs_tuners_ins.ignore_nice) { |
365 | cputime64_t cur_nice; | 364 | cputime64_t cur_nice; |
366 | unsigned long cur_nice_jiffies; | 365 | unsigned long cur_nice_jiffies; |
367 | 366 | ||
368 | cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, | 367 | cur_nice = kstat_cpu(j).cpustat.nice - |
369 | j_dbs_info->prev_cpu_nice); | 368 | j_dbs_info->prev_cpu_nice; |
370 | /* | 369 | /* |
371 | * Assumption: nice time between sampling periods will | 370 | * Assumption: nice time between sampling periods will |
372 | * be less than 2^32 jiffies for 32 bit sys | 371 | * be less than 2^32 jiffies for 32 bit sys |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index fa8af4ebb1d6..07cffe2f6cff 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -127,15 +127,14 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | |||
127 | cputime64_t busy_time; | 127 | cputime64_t busy_time; |
128 | 128 | ||
129 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); | 129 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
130 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 130 | busy_time = kstat_cpu(cpu).cpustat.user; |
131 | kstat_cpu(cpu).cpustat.system); | 131 | busy_time += kstat_cpu(cpu).cpustat.system; |
132 | 132 | busy_time += kstat_cpu(cpu).cpustat.irq; | |
133 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); | 133 | busy_time += kstat_cpu(cpu).cpustat.softirq; |
134 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); | 134 | busy_time += kstat_cpu(cpu).cpustat.steal; |
135 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | 135 | busy_time += kstat_cpu(cpu).cpustat.nice; |
136 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); | 136 | |
137 | 137 | idle_time = cur_wall_time - busy_time; | |
138 | idle_time = cputime64_sub(cur_wall_time, busy_time); | ||
139 | if (wall) | 138 | if (wall) |
140 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); | 139 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); |
141 | 140 | ||
@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
442 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | 441 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
443 | cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); | 442 | cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time); |
444 | 443 | ||
445 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | 444 | wall_time = (unsigned int) |
446 | j_dbs_info->prev_cpu_wall); | 445 | (cur_wall_time - j_dbs_info->prev_cpu_wall); |
447 | j_dbs_info->prev_cpu_wall = cur_wall_time; | 446 | j_dbs_info->prev_cpu_wall = cur_wall_time; |
448 | 447 | ||
449 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | 448 | idle_time = (unsigned int) |
450 | j_dbs_info->prev_cpu_idle); | 449 | (cur_idle_time - j_dbs_info->prev_cpu_idle); |
451 | j_dbs_info->prev_cpu_idle = cur_idle_time; | 450 | j_dbs_info->prev_cpu_idle = cur_idle_time; |
452 | 451 | ||
453 | iowait_time = (unsigned int) cputime64_sub(cur_iowait_time, | 452 | iowait_time = (unsigned int) |
454 | j_dbs_info->prev_cpu_iowait); | 453 | (cur_iowait_time - j_dbs_info->prev_cpu_iowait); |
455 | j_dbs_info->prev_cpu_iowait = cur_iowait_time; | 454 | j_dbs_info->prev_cpu_iowait = cur_iowait_time; |
456 | 455 | ||
457 | if (dbs_tuners_ins.ignore_nice) { | 456 | if (dbs_tuners_ins.ignore_nice) { |
458 | cputime64_t cur_nice; | 457 | cputime64_t cur_nice; |
459 | unsigned long cur_nice_jiffies; | 458 | unsigned long cur_nice_jiffies; |
460 | 459 | ||
461 | cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice, | 460 | cur_nice = kstat_cpu(j).cpustat.nice - |
462 | j_dbs_info->prev_cpu_nice); | 461 | j_dbs_info->prev_cpu_nice; |
463 | /* | 462 | /* |
464 | * Assumption: nice time between sampling periods will | 463 | * Assumption: nice time between sampling periods will |
465 | * be less than 2^32 jiffies for 32 bit sys | 464 | * be less than 2^32 jiffies for 32 bit sys |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index c5072a91e848..2a508edd768b 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -61,9 +61,8 @@ static int cpufreq_stats_update(unsigned int cpu) | |||
61 | spin_lock(&cpufreq_stats_lock); | 61 | spin_lock(&cpufreq_stats_lock); |
62 | stat = per_cpu(cpufreq_stats_table, cpu); | 62 | stat = per_cpu(cpufreq_stats_table, cpu); |
63 | if (stat->time_in_state) | 63 | if (stat->time_in_state) |
64 | stat->time_in_state[stat->last_index] = | 64 | stat->time_in_state[stat->last_index] += |
65 | cputime64_add(stat->time_in_state[stat->last_index], | 65 | cur_time - stat->last_time; |
66 | cputime_sub(cur_time, stat->last_time)); | ||
67 | stat->last_time = cur_time; | 66 | stat->last_time = cur_time; |
68 | spin_unlock(&cpufreq_stats_lock); | 67 | spin_unlock(&cpufreq_stats_lock); |
69 | return 0; | 68 | return 0; |
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 2637c139777b..909908ebf164 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c | |||
@@ -83,11 +83,10 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | |||
83 | { | 83 | { |
84 | cputime64_t retval; | 84 | cputime64_t retval; |
85 | 85 | ||
86 | retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, | 86 | retval = kstat_cpu(cpu).cpustat.idle + kstat_cpu(cpu).cpustat.iowait; |
87 | kstat_cpu(cpu).cpustat.iowait); | ||
88 | 87 | ||
89 | if (rackmeter_ignore_nice) | 88 | if (rackmeter_ignore_nice) |
90 | retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); | 89 | retval += kstat_cpu(cpu).cpustat.nice; |
91 | 90 | ||
92 | return retval; | 91 | return retval; |
93 | } | 92 | } |
@@ -220,13 +219,11 @@ static void rackmeter_do_timer(struct work_struct *work) | |||
220 | int i, offset, load, cumm, pause; | 219 | int i, offset, load, cumm, pause; |
221 | 220 | ||
222 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | 221 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); |
223 | total_ticks = (unsigned int)cputime64_sub(cur_jiffies, | 222 | total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall); |
224 | rcpu->prev_wall); | ||
225 | rcpu->prev_wall = cur_jiffies; | 223 | rcpu->prev_wall = cur_jiffies; |
226 | 224 | ||
227 | total_idle_ticks = get_cpu_idle_time(cpu); | 225 | total_idle_ticks = get_cpu_idle_time(cpu); |
228 | idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, | 226 | idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle); |
229 | rcpu->prev_idle); | ||
230 | rcpu->prev_idle = total_idle_ticks; | 227 | rcpu->prev_idle = total_idle_ticks; |
231 | 228 | ||
232 | /* We do a very dumb calculation to update the LEDs for now, | 229 | /* We do a very dumb calculation to update the LEDs for now, |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 3a1dafd228d1..8c344f037bd0 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -394,8 +394,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
394 | 394 | ||
395 | sigemptyset(&sigign); | 395 | sigemptyset(&sigign); |
396 | sigemptyset(&sigcatch); | 396 | sigemptyset(&sigcatch); |
397 | cutime = cstime = utime = stime = cputime_zero; | 397 | cutime = cstime = utime = stime = 0; |
398 | cgtime = gtime = cputime_zero; | 398 | cgtime = gtime = 0; |
399 | 399 | ||
400 | if (lock_task_sighand(task, &flags)) { | 400 | if (lock_task_sighand(task, &flags)) { |
401 | struct signal_struct *sig = task->signal; | 401 | struct signal_struct *sig = task->signal; |
@@ -423,14 +423,14 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, | |||
423 | do { | 423 | do { |
424 | min_flt += t->min_flt; | 424 | min_flt += t->min_flt; |
425 | maj_flt += t->maj_flt; | 425 | maj_flt += t->maj_flt; |
426 | gtime = cputime_add(gtime, t->gtime); | 426 | gtime += t->gtime; |
427 | t = next_thread(t); | 427 | t = next_thread(t); |
428 | } while (t != task); | 428 | } while (t != task); |
429 | 429 | ||
430 | min_flt += sig->min_flt; | 430 | min_flt += sig->min_flt; |
431 | maj_flt += sig->maj_flt; | 431 | maj_flt += sig->maj_flt; |
432 | thread_group_times(task, &utime, &stime); | 432 | thread_group_times(task, &utime, &stime); |
433 | gtime = cputime_add(gtime, sig->gtime); | 433 | gtime += sig->gtime; |
434 | } | 434 | } |
435 | 435 | ||
436 | sid = task_session_nr_ns(task, ns); | 436 | sid = task_session_nr_ns(task, ns); |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 2a30d67dd6b8..714d5d131e76 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -30,7 +30,7 @@ static cputime64_t get_idle_time(int cpu) | |||
30 | if (idle_time == -1ULL) { | 30 | if (idle_time == -1ULL) { |
31 | /* !NO_HZ so we can rely on cpustat.idle */ | 31 | /* !NO_HZ so we can rely on cpustat.idle */ |
32 | idle = kstat_cpu(cpu).cpustat.idle; | 32 | idle = kstat_cpu(cpu).cpustat.idle; |
33 | idle = cputime64_add(idle, arch_idle_time(cpu)); | 33 | idle += arch_idle_time(cpu); |
34 | } else | 34 | } else |
35 | idle = nsecs_to_jiffies64(1000 * idle_time); | 35 | idle = nsecs_to_jiffies64(1000 * idle_time); |
36 | 36 | ||
@@ -63,23 +63,22 @@ static int show_stat(struct seq_file *p, void *v) | |||
63 | struct timespec boottime; | 63 | struct timespec boottime; |
64 | 64 | ||
65 | user = nice = system = idle = iowait = | 65 | user = nice = system = idle = iowait = |
66 | irq = softirq = steal = cputime64_zero; | 66 | irq = softirq = steal = 0; |
67 | guest = guest_nice = cputime64_zero; | 67 | guest = guest_nice = 0; |
68 | getboottime(&boottime); | 68 | getboottime(&boottime); |
69 | jif = boottime.tv_sec; | 69 | jif = boottime.tv_sec; |
70 | 70 | ||
71 | for_each_possible_cpu(i) { | 71 | for_each_possible_cpu(i) { |
72 | user = cputime64_add(user, kstat_cpu(i).cpustat.user); | 72 | user += kstat_cpu(i).cpustat.user; |
73 | nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); | 73 | nice += kstat_cpu(i).cpustat.nice; |
74 | system = cputime64_add(system, kstat_cpu(i).cpustat.system); | 74 | system += kstat_cpu(i).cpustat.system; |
75 | idle = cputime64_add(idle, get_idle_time(i)); | 75 | idle += get_idle_time(i); |
76 | iowait = cputime64_add(iowait, get_iowait_time(i)); | 76 | iowait += get_iowait_time(i); |
77 | irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); | 77 | irq += kstat_cpu(i).cpustat.irq; |
78 | softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); | 78 | softirq += kstat_cpu(i).cpustat.softirq; |
79 | steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); | 79 | steal += kstat_cpu(i).cpustat.steal; |
80 | guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); | 80 | guest += kstat_cpu(i).cpustat.guest; |
81 | guest_nice = cputime64_add(guest_nice, | 81 | guest_nice += kstat_cpu(i).cpustat.guest_nice; |
82 | kstat_cpu(i).cpustat.guest_nice); | ||
83 | sum += kstat_cpu_irqs_sum(i); | 82 | sum += kstat_cpu_irqs_sum(i); |
84 | sum += arch_irq_stat_cpu(i); | 83 | sum += arch_irq_stat_cpu(i); |
85 | 84 | ||
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index 766b1d456050..ac5243657da3 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c | |||
@@ -12,10 +12,10 @@ static int uptime_proc_show(struct seq_file *m, void *v) | |||
12 | struct timespec uptime; | 12 | struct timespec uptime; |
13 | struct timespec idle; | 13 | struct timespec idle; |
14 | int i; | 14 | int i; |
15 | cputime_t idletime = cputime_zero; | 15 | cputime_t idletime = 0; |
16 | 16 | ||
17 | for_each_possible_cpu(i) | 17 | for_each_possible_cpu(i) |
18 | idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); | 18 | idletime += kstat_cpu(i).cpustat.idle; |
19 | 19 | ||
20 | do_posix_clock_monotonic_gettime(&uptime); | 20 | do_posix_clock_monotonic_gettime(&uptime); |
21 | monotonic_to_bootbased(&uptime); | 21 | monotonic_to_bootbased(&uptime); |
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h index 62ce6823c0f2..77202e2c9fc5 100644 --- a/include/asm-generic/cputime.h +++ b/include/asm-generic/cputime.h | |||
@@ -4,70 +4,64 @@ | |||
4 | #include <linux/time.h> | 4 | #include <linux/time.h> |
5 | #include <linux/jiffies.h> | 5 | #include <linux/jiffies.h> |
6 | 6 | ||
7 | typedef unsigned long cputime_t; | 7 | typedef unsigned long __nocast cputime_t; |
8 | 8 | ||
9 | #define cputime_zero (0UL) | ||
10 | #define cputime_one_jiffy jiffies_to_cputime(1) | 9 | #define cputime_one_jiffy jiffies_to_cputime(1) |
11 | #define cputime_max ((~0UL >> 1) - 1) | 10 | #define cputime_to_jiffies(__ct) (__force unsigned long)(__ct) |
12 | #define cputime_add(__a, __b) ((__a) + (__b)) | ||
13 | #define cputime_sub(__a, __b) ((__a) - (__b)) | ||
14 | #define cputime_div(__a, __n) ((__a) / (__n)) | ||
15 | #define cputime_halve(__a) ((__a) >> 1) | ||
16 | #define cputime_eq(__a, __b) ((__a) == (__b)) | ||
17 | #define cputime_gt(__a, __b) ((__a) > (__b)) | ||
18 | #define cputime_ge(__a, __b) ((__a) >= (__b)) | ||
19 | #define cputime_lt(__a, __b) ((__a) < (__b)) | ||
20 | #define cputime_le(__a, __b) ((__a) <= (__b)) | ||
21 | #define cputime_to_jiffies(__ct) (__ct) | ||
22 | #define cputime_to_scaled(__ct) (__ct) | 11 | #define cputime_to_scaled(__ct) (__ct) |
23 | #define jiffies_to_cputime(__hz) (__hz) | 12 | #define jiffies_to_cputime(__hz) (__force cputime_t)(__hz) |
24 | 13 | ||
25 | typedef u64 cputime64_t; | 14 | typedef u64 __nocast cputime64_t; |
26 | 15 | ||
27 | #define cputime64_zero (0ULL) | 16 | #define cputime64_to_jiffies64(__ct) (__force u64)(__ct) |
28 | #define cputime64_add(__a, __b) ((__a) + (__b)) | 17 | #define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif) |
29 | #define cputime64_sub(__a, __b) ((__a) - (__b)) | ||
30 | #define cputime64_to_jiffies64(__ct) (__ct) | ||
31 | #define jiffies64_to_cputime64(__jif) (__jif) | ||
32 | #define cputime_to_cputime64(__ct) ((u64) __ct) | ||
33 | #define cputime64_gt(__a, __b) ((__a) > (__b)) | ||
34 | 18 | ||
35 | #define nsecs_to_cputime64(__ct) nsecs_to_jiffies64(__ct) | 19 | #define nsecs_to_cputime64(__ct) \ |
20 | jiffies64_to_cputime64(nsecs_to_jiffies64(__ct)) | ||
36 | 21 | ||
37 | 22 | ||
38 | /* | 23 | /* |
39 | * Convert cputime to microseconds and back. | 24 | * Convert cputime to microseconds and back. |
40 | */ | 25 | */ |
41 | #define cputime_to_usecs(__ct) jiffies_to_usecs(__ct) | 26 | #define cputime_to_usecs(__ct) \ |
42 | #define usecs_to_cputime(__msecs) usecs_to_jiffies(__msecs) | 27 | jiffies_to_usecs(cputime_to_jiffies(__ct)); |
28 | #define usecs_to_cputime(__msecs) \ | ||
29 | jiffies_to_cputime(usecs_to_jiffies(__msecs)); | ||
43 | 30 | ||
44 | /* | 31 | /* |
45 | * Convert cputime to seconds and back. | 32 | * Convert cputime to seconds and back. |
46 | */ | 33 | */ |
47 | #define cputime_to_secs(jif) ((jif) / HZ) | 34 | #define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ) |
48 | #define secs_to_cputime(sec) ((sec) * HZ) | 35 | #define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ) |
49 | 36 | ||
50 | /* | 37 | /* |
51 | * Convert cputime to timespec and back. | 38 | * Convert cputime to timespec and back. |
52 | */ | 39 | */ |
53 | #define timespec_to_cputime(__val) timespec_to_jiffies(__val) | 40 | #define timespec_to_cputime(__val) \ |
54 | #define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val) | 41 | jiffies_to_cputime(timespec_to_jiffies(__val)) |
42 | #define cputime_to_timespec(__ct,__val) \ | ||
43 | jiffies_to_timespec(cputime_to_jiffies(__ct),__val) | ||
55 | 44 | ||
56 | /* | 45 | /* |
57 | * Convert cputime to timeval and back. | 46 | * Convert cputime to timeval and back. |
58 | */ | 47 | */ |
59 | #define timeval_to_cputime(__val) timeval_to_jiffies(__val) | 48 | #define timeval_to_cputime(__val) \ |
60 | #define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val) | 49 | jiffies_to_cputime(timeval_to_jiffies(__val)) |
50 | #define cputime_to_timeval(__ct,__val) \ | ||
51 | jiffies_to_timeval(cputime_to_jiffies(__ct),__val) | ||
61 | 52 | ||
62 | /* | 53 | /* |
63 | * Convert cputime to clock and back. | 54 | * Convert cputime to clock and back. |
64 | */ | 55 | */ |
65 | #define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct) | 56 | #define cputime_to_clock_t(__ct) \ |
66 | #define clock_t_to_cputime(__x) clock_t_to_jiffies(__x) | 57 | jiffies_to_clock_t(cputime_to_jiffies(__ct)) |
58 | #define clock_t_to_cputime(__x) \ | ||
59 | jiffies_to_cputime(clock_t_to_jiffies(__x)) | ||
67 | 60 | ||
68 | /* | 61 | /* |
69 | * Convert cputime64 to clock. | 62 | * Convert cputime64 to clock. |
70 | */ | 63 | */ |
71 | #define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct) | 64 | #define cputime64_to_clock_t(__ct) \ |
65 | jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct)) | ||
72 | 66 | ||
73 | #endif | 67 | #endif |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c4f3e9b9bc5..5649032d73fe 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -483,8 +483,8 @@ struct task_cputime { | |||
483 | 483 | ||
484 | #define INIT_CPUTIME \ | 484 | #define INIT_CPUTIME \ |
485 | (struct task_cputime) { \ | 485 | (struct task_cputime) { \ |
486 | .utime = cputime_zero, \ | 486 | .utime = 0, \ |
487 | .stime = cputime_zero, \ | 487 | .stime = 0, \ |
488 | .sum_exec_runtime = 0, \ | 488 | .sum_exec_runtime = 0, \ |
489 | } | 489 | } |
490 | 490 | ||
diff --git a/kernel/acct.c b/kernel/acct.c index fa7eb3de2ddc..203dfead2e06 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -613,8 +613,8 @@ void acct_collect(long exitcode, int group_dead) | |||
613 | pacct->ac_flag |= ACORE; | 613 | pacct->ac_flag |= ACORE; |
614 | if (current->flags & PF_SIGNALED) | 614 | if (current->flags & PF_SIGNALED) |
615 | pacct->ac_flag |= AXSIG; | 615 | pacct->ac_flag |= AXSIG; |
616 | pacct->ac_utime = cputime_add(pacct->ac_utime, current->utime); | 616 | pacct->ac_utime += current->utime; |
617 | pacct->ac_stime = cputime_add(pacct->ac_stime, current->stime); | 617 | pacct->ac_stime += current->stime; |
618 | pacct->ac_minflt += current->min_flt; | 618 | pacct->ac_minflt += current->min_flt; |
619 | pacct->ac_majflt += current->maj_flt; | 619 | pacct->ac_majflt += current->maj_flt; |
620 | spin_unlock_irq(¤t->sighand->siglock); | 620 | spin_unlock_irq(¤t->sighand->siglock); |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 563f13609470..3f8ee8a138c4 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -178,8 +178,7 @@ static inline void check_for_tasks(int cpu) | |||
178 | write_lock_irq(&tasklist_lock); | 178 | write_lock_irq(&tasklist_lock); |
179 | for_each_process(p) { | 179 | for_each_process(p) { |
180 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && | 180 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && |
181 | (!cputime_eq(p->utime, cputime_zero) || | 181 | (p->utime || p->stime)) |
182 | !cputime_eq(p->stime, cputime_zero))) | ||
183 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " | 182 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " |
184 | "(state = %ld, flags = %x)\n", | 183 | "(state = %ld, flags = %x)\n", |
185 | p->comm, task_pid_nr(p), cpu, | 184 | p->comm, task_pid_nr(p), cpu, |
diff --git a/kernel/exit.c b/kernel/exit.c index d0b7d988f873..5e0d1f4c696e 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -121,9 +121,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
121 | * We won't ever get here for the group leader, since it | 121 | * We won't ever get here for the group leader, since it |
122 | * will have been the last reference on the signal_struct. | 122 | * will have been the last reference on the signal_struct. |
123 | */ | 123 | */ |
124 | sig->utime = cputime_add(sig->utime, tsk->utime); | 124 | sig->utime += tsk->utime; |
125 | sig->stime = cputime_add(sig->stime, tsk->stime); | 125 | sig->stime += tsk->stime; |
126 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); | 126 | sig->gtime += tsk->gtime; |
127 | sig->min_flt += tsk->min_flt; | 127 | sig->min_flt += tsk->min_flt; |
128 | sig->maj_flt += tsk->maj_flt; | 128 | sig->maj_flt += tsk->maj_flt; |
129 | sig->nvcsw += tsk->nvcsw; | 129 | sig->nvcsw += tsk->nvcsw; |
@@ -1255,19 +1255,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1255 | spin_lock_irq(&p->real_parent->sighand->siglock); | 1255 | spin_lock_irq(&p->real_parent->sighand->siglock); |
1256 | psig = p->real_parent->signal; | 1256 | psig = p->real_parent->signal; |
1257 | sig = p->signal; | 1257 | sig = p->signal; |
1258 | psig->cutime = | 1258 | psig->cutime += tgutime + sig->cutime; |
1259 | cputime_add(psig->cutime, | 1259 | psig->cstime += tgstime + sig->cstime; |
1260 | cputime_add(tgutime, | 1260 | psig->cgtime += p->gtime + sig->gtime + sig->cgtime; |
1261 | sig->cutime)); | ||
1262 | psig->cstime = | ||
1263 | cputime_add(psig->cstime, | ||
1264 | cputime_add(tgstime, | ||
1265 | sig->cstime)); | ||
1266 | psig->cgtime = | ||
1267 | cputime_add(psig->cgtime, | ||
1268 | cputime_add(p->gtime, | ||
1269 | cputime_add(sig->gtime, | ||
1270 | sig->cgtime))); | ||
1271 | psig->cmin_flt += | 1261 | psig->cmin_flt += |
1272 | p->min_flt + sig->min_flt + sig->cmin_flt; | 1262 | p->min_flt + sig->min_flt + sig->cmin_flt; |
1273 | psig->cmaj_flt += | 1263 | psig->cmaj_flt += |
diff --git a/kernel/fork.c b/kernel/fork.c index da4a6a10d088..b058c5820ecd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1023,8 +1023,8 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | |||
1023 | */ | 1023 | */ |
1024 | static void posix_cpu_timers_init(struct task_struct *tsk) | 1024 | static void posix_cpu_timers_init(struct task_struct *tsk) |
1025 | { | 1025 | { |
1026 | tsk->cputime_expires.prof_exp = cputime_zero; | 1026 | tsk->cputime_expires.prof_exp = 0; |
1027 | tsk->cputime_expires.virt_exp = cputime_zero; | 1027 | tsk->cputime_expires.virt_exp = 0; |
1028 | tsk->cputime_expires.sched_exp = 0; | 1028 | tsk->cputime_expires.sched_exp = 0; |
1029 | INIT_LIST_HEAD(&tsk->cpu_timers[0]); | 1029 | INIT_LIST_HEAD(&tsk->cpu_timers[0]); |
1030 | INIT_LIST_HEAD(&tsk->cpu_timers[1]); | 1030 | INIT_LIST_HEAD(&tsk->cpu_timers[1]); |
@@ -1132,14 +1132,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1132 | 1132 | ||
1133 | init_sigpending(&p->pending); | 1133 | init_sigpending(&p->pending); |
1134 | 1134 | ||
1135 | p->utime = cputime_zero; | 1135 | p->utime = p->stime = p->gtime = 0; |
1136 | p->stime = cputime_zero; | 1136 | p->utimescaled = p->stimescaled = 0; |
1137 | p->gtime = cputime_zero; | ||
1138 | p->utimescaled = cputime_zero; | ||
1139 | p->stimescaled = cputime_zero; | ||
1140 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 1137 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
1141 | p->prev_utime = cputime_zero; | 1138 | p->prev_utime = p->prev_stime = 0; |
1142 | p->prev_stime = cputime_zero; | ||
1143 | #endif | 1139 | #endif |
1144 | #if defined(SPLIT_RSS_COUNTING) | 1140 | #if defined(SPLIT_RSS_COUNTING) |
1145 | memset(&p->rss_stat, 0, sizeof(p->rss_stat)); | 1141 | memset(&p->rss_stat, 0, sizeof(p->rss_stat)); |
diff --git a/kernel/itimer.c b/kernel/itimer.c index d802883153da..22000c3db0dd 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
@@ -52,22 +52,22 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, | |||
52 | 52 | ||
53 | cval = it->expires; | 53 | cval = it->expires; |
54 | cinterval = it->incr; | 54 | cinterval = it->incr; |
55 | if (!cputime_eq(cval, cputime_zero)) { | 55 | if (cval) { |
56 | struct task_cputime cputime; | 56 | struct task_cputime cputime; |
57 | cputime_t t; | 57 | cputime_t t; |
58 | 58 | ||
59 | thread_group_cputimer(tsk, &cputime); | 59 | thread_group_cputimer(tsk, &cputime); |
60 | if (clock_id == CPUCLOCK_PROF) | 60 | if (clock_id == CPUCLOCK_PROF) |
61 | t = cputime_add(cputime.utime, cputime.stime); | 61 | t = cputime.utime + cputime.stime; |
62 | else | 62 | else |
63 | /* CPUCLOCK_VIRT */ | 63 | /* CPUCLOCK_VIRT */ |
64 | t = cputime.utime; | 64 | t = cputime.utime; |
65 | 65 | ||
66 | if (cputime_le(cval, t)) | 66 | if (cval < t) |
67 | /* about to fire */ | 67 | /* about to fire */ |
68 | cval = cputime_one_jiffy; | 68 | cval = cputime_one_jiffy; |
69 | else | 69 | else |
70 | cval = cputime_sub(cval, t); | 70 | cval = cval - t; |
71 | } | 71 | } |
72 | 72 | ||
73 | spin_unlock_irq(&tsk->sighand->siglock); | 73 | spin_unlock_irq(&tsk->sighand->siglock); |
@@ -161,10 +161,9 @@ static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, | |||
161 | 161 | ||
162 | cval = it->expires; | 162 | cval = it->expires; |
163 | cinterval = it->incr; | 163 | cinterval = it->incr; |
164 | if (!cputime_eq(cval, cputime_zero) || | 164 | if (cval || nval) { |
165 | !cputime_eq(nval, cputime_zero)) { | 165 | if (nval > 0) |
166 | if (cputime_gt(nval, cputime_zero)) | 166 | nval += cputime_one_jiffy; |
167 | nval = cputime_add(nval, cputime_one_jiffy); | ||
168 | set_process_cpu_timer(tsk, clock_id, &nval, &cval); | 167 | set_process_cpu_timer(tsk, clock_id, &nval, &cval); |
169 | } | 168 | } |
170 | it->expires = nval; | 169 | it->expires = nval; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index e7cb76dc18f5..125cb67daa21 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -78,7 +78,7 @@ static inline int cpu_time_before(const clockid_t which_clock, | |||
78 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 78 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
79 | return now.sched < then.sched; | 79 | return now.sched < then.sched; |
80 | } else { | 80 | } else { |
81 | return cputime_lt(now.cpu, then.cpu); | 81 | return now.cpu < then.cpu; |
82 | } | 82 | } |
83 | } | 83 | } |
84 | static inline void cpu_time_add(const clockid_t which_clock, | 84 | static inline void cpu_time_add(const clockid_t which_clock, |
@@ -88,7 +88,7 @@ static inline void cpu_time_add(const clockid_t which_clock, | |||
88 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 88 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
89 | acc->sched += val.sched; | 89 | acc->sched += val.sched; |
90 | } else { | 90 | } else { |
91 | acc->cpu = cputime_add(acc->cpu, val.cpu); | 91 | acc->cpu += val.cpu; |
92 | } | 92 | } |
93 | } | 93 | } |
94 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | 94 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, |
@@ -98,25 +98,12 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | |||
98 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 98 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
99 | a.sched -= b.sched; | 99 | a.sched -= b.sched; |
100 | } else { | 100 | } else { |
101 | a.cpu = cputime_sub(a.cpu, b.cpu); | 101 | a.cpu -= b.cpu; |
102 | } | 102 | } |
103 | return a; | 103 | return a; |
104 | } | 104 | } |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * Divide and limit the result to res >= 1 | ||
108 | * | ||
109 | * This is necessary to prevent signal delivery starvation, when the result of | ||
110 | * the division would be rounded down to 0. | ||
111 | */ | ||
112 | static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) | ||
113 | { | ||
114 | cputime_t res = cputime_div(time, div); | ||
115 | |||
116 | return max_t(cputime_t, res, 1); | ||
117 | } | ||
118 | |||
119 | /* | ||
120 | * Update expiry time from increment, and increase overrun count, | 107 | * Update expiry time from increment, and increase overrun count, |
121 | * given the current clock sample. | 108 | * given the current clock sample. |
122 | */ | 109 | */ |
@@ -148,28 +135,26 @@ static void bump_cpu_timer(struct k_itimer *timer, | |||
148 | } else { | 135 | } else { |
149 | cputime_t delta, incr; | 136 | cputime_t delta, incr; |
150 | 137 | ||
151 | if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) | 138 | if (now.cpu < timer->it.cpu.expires.cpu) |
152 | return; | 139 | return; |
153 | incr = timer->it.cpu.incr.cpu; | 140 | incr = timer->it.cpu.incr.cpu; |
154 | delta = cputime_sub(cputime_add(now.cpu, incr), | 141 | delta = now.cpu + incr - timer->it.cpu.expires.cpu; |
155 | timer->it.cpu.expires.cpu); | ||
156 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ | 142 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
157 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) | 143 | for (i = 0; incr < delta - incr; i++) |
158 | incr = cputime_add(incr, incr); | 144 | incr += incr; |
159 | for (; i >= 0; incr = cputime_halve(incr), i--) { | 145 | for (; i >= 0; incr = incr >> 1, i--) { |
160 | if (cputime_lt(delta, incr)) | 146 | if (delta < incr) |
161 | continue; | 147 | continue; |
162 | timer->it.cpu.expires.cpu = | 148 | timer->it.cpu.expires.cpu += incr; |
163 | cputime_add(timer->it.cpu.expires.cpu, incr); | ||
164 | timer->it_overrun += 1 << i; | 149 | timer->it_overrun += 1 << i; |
165 | delta = cputime_sub(delta, incr); | 150 | delta -= incr; |
166 | } | 151 | } |
167 | } | 152 | } |
168 | } | 153 | } |
169 | 154 | ||
170 | static inline cputime_t prof_ticks(struct task_struct *p) | 155 | static inline cputime_t prof_ticks(struct task_struct *p) |
171 | { | 156 | { |
172 | return cputime_add(p->utime, p->stime); | 157 | return p->utime + p->stime; |
173 | } | 158 | } |
174 | static inline cputime_t virt_ticks(struct task_struct *p) | 159 | static inline cputime_t virt_ticks(struct task_struct *p) |
175 | { | 160 | { |
@@ -248,8 +233,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | |||
248 | 233 | ||
249 | t = tsk; | 234 | t = tsk; |
250 | do { | 235 | do { |
251 | times->utime = cputime_add(times->utime, t->utime); | 236 | times->utime += t->utime; |
252 | times->stime = cputime_add(times->stime, t->stime); | 237 | times->stime += t->stime; |
253 | times->sum_exec_runtime += task_sched_runtime(t); | 238 | times->sum_exec_runtime += task_sched_runtime(t); |
254 | } while_each_thread(tsk, t); | 239 | } while_each_thread(tsk, t); |
255 | out: | 240 | out: |
@@ -258,10 +243,10 @@ out: | |||
258 | 243 | ||
259 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) | 244 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) |
260 | { | 245 | { |
261 | if (cputime_gt(b->utime, a->utime)) | 246 | if (b->utime > a->utime) |
262 | a->utime = b->utime; | 247 | a->utime = b->utime; |
263 | 248 | ||
264 | if (cputime_gt(b->stime, a->stime)) | 249 | if (b->stime > a->stime) |
265 | a->stime = b->stime; | 250 | a->stime = b->stime; |
266 | 251 | ||
267 | if (b->sum_exec_runtime > a->sum_exec_runtime) | 252 | if (b->sum_exec_runtime > a->sum_exec_runtime) |
@@ -306,7 +291,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
306 | return -EINVAL; | 291 | return -EINVAL; |
307 | case CPUCLOCK_PROF: | 292 | case CPUCLOCK_PROF: |
308 | thread_group_cputime(p, &cputime); | 293 | thread_group_cputime(p, &cputime); |
309 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | 294 | cpu->cpu = cputime.utime + cputime.stime; |
310 | break; | 295 | break; |
311 | case CPUCLOCK_VIRT: | 296 | case CPUCLOCK_VIRT: |
312 | thread_group_cputime(p, &cputime); | 297 | thread_group_cputime(p, &cputime); |
@@ -470,26 +455,24 @@ static void cleanup_timers(struct list_head *head, | |||
470 | unsigned long long sum_exec_runtime) | 455 | unsigned long long sum_exec_runtime) |
471 | { | 456 | { |
472 | struct cpu_timer_list *timer, *next; | 457 | struct cpu_timer_list *timer, *next; |
473 | cputime_t ptime = cputime_add(utime, stime); | 458 | cputime_t ptime = utime + stime; |
474 | 459 | ||
475 | list_for_each_entry_safe(timer, next, head, entry) { | 460 | list_for_each_entry_safe(timer, next, head, entry) { |
476 | list_del_init(&timer->entry); | 461 | list_del_init(&timer->entry); |
477 | if (cputime_lt(timer->expires.cpu, ptime)) { | 462 | if (timer->expires.cpu < ptime) { |
478 | timer->expires.cpu = cputime_zero; | 463 | timer->expires.cpu = 0; |
479 | } else { | 464 | } else { |
480 | timer->expires.cpu = cputime_sub(timer->expires.cpu, | 465 | timer->expires.cpu -= ptime; |
481 | ptime); | ||
482 | } | 466 | } |
483 | } | 467 | } |
484 | 468 | ||
485 | ++head; | 469 | ++head; |
486 | list_for_each_entry_safe(timer, next, head, entry) { | 470 | list_for_each_entry_safe(timer, next, head, entry) { |
487 | list_del_init(&timer->entry); | 471 | list_del_init(&timer->entry); |
488 | if (cputime_lt(timer->expires.cpu, utime)) { | 472 | if (timer->expires.cpu < utime) { |
489 | timer->expires.cpu = cputime_zero; | 473 | timer->expires.cpu = 0; |
490 | } else { | 474 | } else { |
491 | timer->expires.cpu = cputime_sub(timer->expires.cpu, | 475 | timer->expires.cpu -= utime; |
492 | utime); | ||
493 | } | 476 | } |
494 | } | 477 | } |
495 | 478 | ||
@@ -520,8 +503,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) | |||
520 | struct signal_struct *const sig = tsk->signal; | 503 | struct signal_struct *const sig = tsk->signal; |
521 | 504 | ||
522 | cleanup_timers(tsk->signal->cpu_timers, | 505 | cleanup_timers(tsk->signal->cpu_timers, |
523 | cputime_add(tsk->utime, sig->utime), | 506 | tsk->utime + sig->utime, tsk->stime + sig->stime, |
524 | cputime_add(tsk->stime, sig->stime), | ||
525 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | 507 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); |
526 | } | 508 | } |
527 | 509 | ||
@@ -540,8 +522,7 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
540 | 522 | ||
541 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | 523 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) |
542 | { | 524 | { |
543 | return cputime_eq(expires, cputime_zero) || | 525 | return expires == 0 || expires > new_exp; |
544 | cputime_gt(expires, new_exp); | ||
545 | } | 526 | } |
546 | 527 | ||
547 | /* | 528 | /* |
@@ -651,7 +632,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, | |||
651 | default: | 632 | default: |
652 | return -EINVAL; | 633 | return -EINVAL; |
653 | case CPUCLOCK_PROF: | 634 | case CPUCLOCK_PROF: |
654 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | 635 | cpu->cpu = cputime.utime + cputime.stime; |
655 | break; | 636 | break; |
656 | case CPUCLOCK_VIRT: | 637 | case CPUCLOCK_VIRT: |
657 | cpu->cpu = cputime.utime; | 638 | cpu->cpu = cputime.utime; |
@@ -918,12 +899,12 @@ static void check_thread_timers(struct task_struct *tsk, | |||
918 | unsigned long soft; | 899 | unsigned long soft; |
919 | 900 | ||
920 | maxfire = 20; | 901 | maxfire = 20; |
921 | tsk->cputime_expires.prof_exp = cputime_zero; | 902 | tsk->cputime_expires.prof_exp = 0; |
922 | while (!list_empty(timers)) { | 903 | while (!list_empty(timers)) { |
923 | struct cpu_timer_list *t = list_first_entry(timers, | 904 | struct cpu_timer_list *t = list_first_entry(timers, |
924 | struct cpu_timer_list, | 905 | struct cpu_timer_list, |
925 | entry); | 906 | entry); |
926 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { | 907 | if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) { |
927 | tsk->cputime_expires.prof_exp = t->expires.cpu; | 908 | tsk->cputime_expires.prof_exp = t->expires.cpu; |
928 | break; | 909 | break; |
929 | } | 910 | } |
@@ -933,12 +914,12 @@ static void check_thread_timers(struct task_struct *tsk, | |||
933 | 914 | ||
934 | ++timers; | 915 | ++timers; |
935 | maxfire = 20; | 916 | maxfire = 20; |
936 | tsk->cputime_expires.virt_exp = cputime_zero; | 917 | tsk->cputime_expires.virt_exp = 0; |
937 | while (!list_empty(timers)) { | 918 | while (!list_empty(timers)) { |
938 | struct cpu_timer_list *t = list_first_entry(timers, | 919 | struct cpu_timer_list *t = list_first_entry(timers, |
939 | struct cpu_timer_list, | 920 | struct cpu_timer_list, |
940 | entry); | 921 | entry); |
941 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { | 922 | if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) { |
942 | tsk->cputime_expires.virt_exp = t->expires.cpu; | 923 | tsk->cputime_expires.virt_exp = t->expires.cpu; |
943 | break; | 924 | break; |
944 | } | 925 | } |
@@ -1009,20 +990,19 @@ static u32 onecputick; | |||
1009 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | 990 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
1010 | cputime_t *expires, cputime_t cur_time, int signo) | 991 | cputime_t *expires, cputime_t cur_time, int signo) |
1011 | { | 992 | { |
1012 | if (cputime_eq(it->expires, cputime_zero)) | 993 | if (!it->expires) |
1013 | return; | 994 | return; |
1014 | 995 | ||
1015 | if (cputime_ge(cur_time, it->expires)) { | 996 | if (cur_time >= it->expires) { |
1016 | if (!cputime_eq(it->incr, cputime_zero)) { | 997 | if (it->incr) { |
1017 | it->expires = cputime_add(it->expires, it->incr); | 998 | it->expires += it->incr; |
1018 | it->error += it->incr_error; | 999 | it->error += it->incr_error; |
1019 | if (it->error >= onecputick) { | 1000 | if (it->error >= onecputick) { |
1020 | it->expires = cputime_sub(it->expires, | 1001 | it->expires -= cputime_one_jiffy; |
1021 | cputime_one_jiffy); | ||
1022 | it->error -= onecputick; | 1002 | it->error -= onecputick; |
1023 | } | 1003 | } |
1024 | } else { | 1004 | } else { |
1025 | it->expires = cputime_zero; | 1005 | it->expires = 0; |
1026 | } | 1006 | } |
1027 | 1007 | ||
1028 | trace_itimer_expire(signo == SIGPROF ? | 1008 | trace_itimer_expire(signo == SIGPROF ? |
@@ -1031,9 +1011,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | |||
1031 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); | 1011 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); |
1032 | } | 1012 | } |
1033 | 1013 | ||
1034 | if (!cputime_eq(it->expires, cputime_zero) && | 1014 | if (it->expires && (!*expires || it->expires < *expires)) { |
1035 | (cputime_eq(*expires, cputime_zero) || | ||
1036 | cputime_lt(it->expires, *expires))) { | ||
1037 | *expires = it->expires; | 1015 | *expires = it->expires; |
1038 | } | 1016 | } |
1039 | } | 1017 | } |
@@ -1048,9 +1026,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | |||
1048 | */ | 1026 | */ |
1049 | static inline int task_cputime_zero(const struct task_cputime *cputime) | 1027 | static inline int task_cputime_zero(const struct task_cputime *cputime) |
1050 | { | 1028 | { |
1051 | if (cputime_eq(cputime->utime, cputime_zero) && | 1029 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) |
1052 | cputime_eq(cputime->stime, cputime_zero) && | ||
1053 | cputime->sum_exec_runtime == 0) | ||
1054 | return 1; | 1030 | return 1; |
1055 | return 0; | 1031 | return 0; |
1056 | } | 1032 | } |
@@ -1076,15 +1052,15 @@ static void check_process_timers(struct task_struct *tsk, | |||
1076 | */ | 1052 | */ |
1077 | thread_group_cputimer(tsk, &cputime); | 1053 | thread_group_cputimer(tsk, &cputime); |
1078 | utime = cputime.utime; | 1054 | utime = cputime.utime; |
1079 | ptime = cputime_add(utime, cputime.stime); | 1055 | ptime = utime + cputime.stime; |
1080 | sum_sched_runtime = cputime.sum_exec_runtime; | 1056 | sum_sched_runtime = cputime.sum_exec_runtime; |
1081 | maxfire = 20; | 1057 | maxfire = 20; |
1082 | prof_expires = cputime_zero; | 1058 | prof_expires = 0; |
1083 | while (!list_empty(timers)) { | 1059 | while (!list_empty(timers)) { |
1084 | struct cpu_timer_list *tl = list_first_entry(timers, | 1060 | struct cpu_timer_list *tl = list_first_entry(timers, |
1085 | struct cpu_timer_list, | 1061 | struct cpu_timer_list, |
1086 | entry); | 1062 | entry); |
1087 | if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { | 1063 | if (!--maxfire || ptime < tl->expires.cpu) { |
1088 | prof_expires = tl->expires.cpu; | 1064 | prof_expires = tl->expires.cpu; |
1089 | break; | 1065 | break; |
1090 | } | 1066 | } |
@@ -1094,12 +1070,12 @@ static void check_process_timers(struct task_struct *tsk, | |||
1094 | 1070 | ||
1095 | ++timers; | 1071 | ++timers; |
1096 | maxfire = 20; | 1072 | maxfire = 20; |
1097 | virt_expires = cputime_zero; | 1073 | virt_expires = 0; |
1098 | while (!list_empty(timers)) { | 1074 | while (!list_empty(timers)) { |
1099 | struct cpu_timer_list *tl = list_first_entry(timers, | 1075 | struct cpu_timer_list *tl = list_first_entry(timers, |
1100 | struct cpu_timer_list, | 1076 | struct cpu_timer_list, |
1101 | entry); | 1077 | entry); |
1102 | if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { | 1078 | if (!--maxfire || utime < tl->expires.cpu) { |
1103 | virt_expires = tl->expires.cpu; | 1079 | virt_expires = tl->expires.cpu; |
1104 | break; | 1080 | break; |
1105 | } | 1081 | } |
@@ -1154,8 +1130,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
1154 | } | 1130 | } |
1155 | } | 1131 | } |
1156 | x = secs_to_cputime(soft); | 1132 | x = secs_to_cputime(soft); |
1157 | if (cputime_eq(prof_expires, cputime_zero) || | 1133 | if (!prof_expires || x < prof_expires) { |
1158 | cputime_lt(x, prof_expires)) { | ||
1159 | prof_expires = x; | 1134 | prof_expires = x; |
1160 | } | 1135 | } |
1161 | } | 1136 | } |
@@ -1249,12 +1224,9 @@ out: | |||
1249 | static inline int task_cputime_expired(const struct task_cputime *sample, | 1224 | static inline int task_cputime_expired(const struct task_cputime *sample, |
1250 | const struct task_cputime *expires) | 1225 | const struct task_cputime *expires) |
1251 | { | 1226 | { |
1252 | if (!cputime_eq(expires->utime, cputime_zero) && | 1227 | if (expires->utime && sample->utime >= expires->utime) |
1253 | cputime_ge(sample->utime, expires->utime)) | ||
1254 | return 1; | 1228 | return 1; |
1255 | if (!cputime_eq(expires->stime, cputime_zero) && | 1229 | if (expires->stime && sample->utime + sample->stime >= expires->stime) |
1256 | cputime_ge(cputime_add(sample->utime, sample->stime), | ||
1257 | expires->stime)) | ||
1258 | return 1; | 1230 | return 1; |
1259 | if (expires->sum_exec_runtime != 0 && | 1231 | if (expires->sum_exec_runtime != 0 && |
1260 | sample->sum_exec_runtime >= expires->sum_exec_runtime) | 1232 | sample->sum_exec_runtime >= expires->sum_exec_runtime) |
@@ -1389,18 +1361,18 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1389 | * it to be relative, *newval argument is relative and we update | 1361 | * it to be relative, *newval argument is relative and we update |
1390 | * it to be absolute. | 1362 | * it to be absolute. |
1391 | */ | 1363 | */ |
1392 | if (!cputime_eq(*oldval, cputime_zero)) { | 1364 | if (*oldval) { |
1393 | if (cputime_le(*oldval, now.cpu)) { | 1365 | if (*oldval <= now.cpu) { |
1394 | /* Just about to fire. */ | 1366 | /* Just about to fire. */ |
1395 | *oldval = cputime_one_jiffy; | 1367 | *oldval = cputime_one_jiffy; |
1396 | } else { | 1368 | } else { |
1397 | *oldval = cputime_sub(*oldval, now.cpu); | 1369 | *oldval -= now.cpu; |
1398 | } | 1370 | } |
1399 | } | 1371 | } |
1400 | 1372 | ||
1401 | if (cputime_eq(*newval, cputime_zero)) | 1373 | if (!*newval) |
1402 | return; | 1374 | return; |
1403 | *newval = cputime_add(*newval, now.cpu); | 1375 | *newval += now.cpu; |
1404 | } | 1376 | } |
1405 | 1377 | ||
1406 | /* | 1378 | /* |
diff --git a/kernel/sched.c b/kernel/sched.c index d6b149ccf925..18cad4467e61 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -2166,7 +2166,7 @@ static int irqtime_account_hi_update(void) | |||
2166 | 2166 | ||
2167 | local_irq_save(flags); | 2167 | local_irq_save(flags); |
2168 | latest_ns = this_cpu_read(cpu_hardirq_time); | 2168 | latest_ns = this_cpu_read(cpu_hardirq_time); |
2169 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) | 2169 | if (nsecs_to_cputime64(latest_ns) > cpustat->irq) |
2170 | ret = 1; | 2170 | ret = 1; |
2171 | local_irq_restore(flags); | 2171 | local_irq_restore(flags); |
2172 | return ret; | 2172 | return ret; |
@@ -2181,7 +2181,7 @@ static int irqtime_account_si_update(void) | |||
2181 | 2181 | ||
2182 | local_irq_save(flags); | 2182 | local_irq_save(flags); |
2183 | latest_ns = this_cpu_read(cpu_softirq_time); | 2183 | latest_ns = this_cpu_read(cpu_softirq_time); |
2184 | if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) | 2184 | if (nsecs_to_cputime64(latest_ns) > cpustat->softirq) |
2185 | ret = 1; | 2185 | ret = 1; |
2186 | local_irq_restore(flags); | 2186 | local_irq_restore(flags); |
2187 | return ret; | 2187 | return ret; |
@@ -3868,19 +3868,17 @@ void account_user_time(struct task_struct *p, cputime_t cputime, | |||
3868 | cputime_t cputime_scaled) | 3868 | cputime_t cputime_scaled) |
3869 | { | 3869 | { |
3870 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 3870 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
3871 | cputime64_t tmp; | ||
3872 | 3871 | ||
3873 | /* Add user time to process. */ | 3872 | /* Add user time to process. */ |
3874 | p->utime = cputime_add(p->utime, cputime); | 3873 | p->utime += cputime; |
3875 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | 3874 | p->utimescaled += cputime_scaled; |
3876 | account_group_user_time(p, cputime); | 3875 | account_group_user_time(p, cputime); |
3877 | 3876 | ||
3878 | /* Add user time to cpustat. */ | 3877 | /* Add user time to cpustat. */ |
3879 | tmp = cputime_to_cputime64(cputime); | ||
3880 | if (TASK_NICE(p) > 0) | 3878 | if (TASK_NICE(p) > 0) |
3881 | cpustat->nice = cputime64_add(cpustat->nice, tmp); | 3879 | cpustat->nice += (__force cputime64_t) cputime; |
3882 | else | 3880 | else |
3883 | cpustat->user = cputime64_add(cpustat->user, tmp); | 3881 | cpustat->user += (__force cputime64_t) cputime; |
3884 | 3882 | ||
3885 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); | 3883 | cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); |
3886 | /* Account for user time used */ | 3884 | /* Account for user time used */ |
@@ -3896,24 +3894,21 @@ void account_user_time(struct task_struct *p, cputime_t cputime, | |||
3896 | static void account_guest_time(struct task_struct *p, cputime_t cputime, | 3894 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
3897 | cputime_t cputime_scaled) | 3895 | cputime_t cputime_scaled) |
3898 | { | 3896 | { |
3899 | cputime64_t tmp; | ||
3900 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 3897 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
3901 | 3898 | ||
3902 | tmp = cputime_to_cputime64(cputime); | ||
3903 | |||
3904 | /* Add guest time to process. */ | 3899 | /* Add guest time to process. */ |
3905 | p->utime = cputime_add(p->utime, cputime); | 3900 | p->utime += cputime; |
3906 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | 3901 | p->utimescaled += cputime_scaled; |
3907 | account_group_user_time(p, cputime); | 3902 | account_group_user_time(p, cputime); |
3908 | p->gtime = cputime_add(p->gtime, cputime); | 3903 | p->gtime += cputime; |
3909 | 3904 | ||
3910 | /* Add guest time to cpustat. */ | 3905 | /* Add guest time to cpustat. */ |
3911 | if (TASK_NICE(p) > 0) { | 3906 | if (TASK_NICE(p) > 0) { |
3912 | cpustat->nice = cputime64_add(cpustat->nice, tmp); | 3907 | cpustat->nice += (__force cputime64_t) cputime; |
3913 | cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); | 3908 | cpustat->guest_nice += (__force cputime64_t) cputime; |
3914 | } else { | 3909 | } else { |
3915 | cpustat->user = cputime64_add(cpustat->user, tmp); | 3910 | cpustat->user += (__force cputime64_t) cputime; |
3916 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 3911 | cpustat->guest += (__force cputime64_t) cputime; |
3917 | } | 3912 | } |
3918 | } | 3913 | } |
3919 | 3914 | ||
@@ -3928,15 +3923,13 @@ static inline | |||
3928 | void __account_system_time(struct task_struct *p, cputime_t cputime, | 3923 | void __account_system_time(struct task_struct *p, cputime_t cputime, |
3929 | cputime_t cputime_scaled, cputime64_t *target_cputime64) | 3924 | cputime_t cputime_scaled, cputime64_t *target_cputime64) |
3930 | { | 3925 | { |
3931 | cputime64_t tmp = cputime_to_cputime64(cputime); | ||
3932 | |||
3933 | /* Add system time to process. */ | 3926 | /* Add system time to process. */ |
3934 | p->stime = cputime_add(p->stime, cputime); | 3927 | p->stime += cputime; |
3935 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); | 3928 | p->stimescaled += cputime_scaled; |
3936 | account_group_system_time(p, cputime); | 3929 | account_group_system_time(p, cputime); |
3937 | 3930 | ||
3938 | /* Add system time to cpustat. */ | 3931 | /* Add system time to cpustat. */ |
3939 | *target_cputime64 = cputime64_add(*target_cputime64, tmp); | 3932 | *target_cputime64 += (__force cputime64_t) cputime; |
3940 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); | 3933 | cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); |
3941 | 3934 | ||
3942 | /* Account for system time used */ | 3935 | /* Account for system time used */ |
@@ -3978,9 +3971,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
3978 | void account_steal_time(cputime_t cputime) | 3971 | void account_steal_time(cputime_t cputime) |
3979 | { | 3972 | { |
3980 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 3973 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
3981 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
3982 | 3974 | ||
3983 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | 3975 | cpustat->steal += (__force cputime64_t) cputime; |
3984 | } | 3976 | } |
3985 | 3977 | ||
3986 | /* | 3978 | /* |
@@ -3990,13 +3982,12 @@ void account_steal_time(cputime_t cputime) | |||
3990 | void account_idle_time(cputime_t cputime) | 3982 | void account_idle_time(cputime_t cputime) |
3991 | { | 3983 | { |
3992 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 3984 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
3993 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
3994 | struct rq *rq = this_rq(); | 3985 | struct rq *rq = this_rq(); |
3995 | 3986 | ||
3996 | if (atomic_read(&rq->nr_iowait) > 0) | 3987 | if (atomic_read(&rq->nr_iowait) > 0) |
3997 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); | 3988 | cpustat->iowait += (__force cputime64_t) cputime; |
3998 | else | 3989 | else |
3999 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); | 3990 | cpustat->idle += (__force cputime64_t) cputime; |
4000 | } | 3991 | } |
4001 | 3992 | ||
4002 | static __always_inline bool steal_account_process_tick(void) | 3993 | static __always_inline bool steal_account_process_tick(void) |
@@ -4046,16 +4037,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
4046 | struct rq *rq) | 4037 | struct rq *rq) |
4047 | { | 4038 | { |
4048 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); | 4039 | cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); |
4049 | cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); | ||
4050 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4040 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4051 | 4041 | ||
4052 | if (steal_account_process_tick()) | 4042 | if (steal_account_process_tick()) |
4053 | return; | 4043 | return; |
4054 | 4044 | ||
4055 | if (irqtime_account_hi_update()) { | 4045 | if (irqtime_account_hi_update()) { |
4056 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 4046 | cpustat->irq += (__force cputime64_t) cputime_one_jiffy; |
4057 | } else if (irqtime_account_si_update()) { | 4047 | } else if (irqtime_account_si_update()) { |
4058 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 4048 | cpustat->softirq += (__force cputime64_t) cputime_one_jiffy; |
4059 | } else if (this_cpu_ksoftirqd() == p) { | 4049 | } else if (this_cpu_ksoftirqd() == p) { |
4060 | /* | 4050 | /* |
4061 | * ksoftirqd time do not get accounted in cpu_softirq_time. | 4051 | * ksoftirqd time do not get accounted in cpu_softirq_time. |
@@ -4171,7 +4161,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
4171 | 4161 | ||
4172 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | 4162 | void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) |
4173 | { | 4163 | { |
4174 | cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); | 4164 | cputime_t rtime, utime = p->utime, total = utime + p->stime; |
4175 | 4165 | ||
4176 | /* | 4166 | /* |
4177 | * Use CFS's precise accounting: | 4167 | * Use CFS's precise accounting: |
@@ -4179,11 +4169,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
4179 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 4169 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
4180 | 4170 | ||
4181 | if (total) { | 4171 | if (total) { |
4182 | u64 temp = rtime; | 4172 | u64 temp = (__force u64) rtime; |
4183 | 4173 | ||
4184 | temp *= utime; | 4174 | temp *= (__force u64) utime; |
4185 | do_div(temp, total); | 4175 | do_div(temp, (__force u32) total); |
4186 | utime = (cputime_t)temp; | 4176 | utime = (__force cputime_t) temp; |
4187 | } else | 4177 | } else |
4188 | utime = rtime; | 4178 | utime = rtime; |
4189 | 4179 | ||
@@ -4191,7 +4181,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
4191 | * Compare with previous values, to keep monotonicity: | 4181 | * Compare with previous values, to keep monotonicity: |
4192 | */ | 4182 | */ |
4193 | p->prev_utime = max(p->prev_utime, utime); | 4183 | p->prev_utime = max(p->prev_utime, utime); |
4194 | p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); | 4184 | p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); |
4195 | 4185 | ||
4196 | *ut = p->prev_utime; | 4186 | *ut = p->prev_utime; |
4197 | *st = p->prev_stime; | 4187 | *st = p->prev_stime; |
@@ -4208,21 +4198,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
4208 | 4198 | ||
4209 | thread_group_cputime(p, &cputime); | 4199 | thread_group_cputime(p, &cputime); |
4210 | 4200 | ||
4211 | total = cputime_add(cputime.utime, cputime.stime); | 4201 | total = cputime.utime + cputime.stime; |
4212 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 4202 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
4213 | 4203 | ||
4214 | if (total) { | 4204 | if (total) { |
4215 | u64 temp = rtime; | 4205 | u64 temp = (__force u64) rtime; |
4216 | 4206 | ||
4217 | temp *= cputime.utime; | 4207 | temp *= (__force u64) cputime.utime; |
4218 | do_div(temp, total); | 4208 | do_div(temp, (__force u32) total); |
4219 | utime = (cputime_t)temp; | 4209 | utime = (__force cputime_t) temp; |
4220 | } else | 4210 | } else |
4221 | utime = rtime; | 4211 | utime = rtime; |
4222 | 4212 | ||
4223 | sig->prev_utime = max(sig->prev_utime, utime); | 4213 | sig->prev_utime = max(sig->prev_utime, utime); |
4224 | sig->prev_stime = max(sig->prev_stime, | 4214 | sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); |
4225 | cputime_sub(rtime, sig->prev_utime)); | ||
4226 | 4215 | ||
4227 | *ut = sig->prev_utime; | 4216 | *ut = sig->prev_utime; |
4228 | *st = sig->prev_stime; | 4217 | *st = sig->prev_stime; |
@@ -9769,7 +9758,8 @@ static void cpuacct_update_stats(struct task_struct *tsk, | |||
9769 | ca = task_ca(tsk); | 9758 | ca = task_ca(tsk); |
9770 | 9759 | ||
9771 | do { | 9760 | do { |
9772 | __percpu_counter_add(&ca->cpustat[idx], val, batch); | 9761 | __percpu_counter_add(&ca->cpustat[idx], |
9762 | (__force s64) val, batch); | ||
9773 | ca = ca->parent; | 9763 | ca = ca->parent; |
9774 | } while (ca); | 9764 | } while (ca); |
9775 | rcu_read_unlock(); | 9765 | rcu_read_unlock(); |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 87f9e36ea56e..4b71dbef271d 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -283,8 +283,7 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
283 | return; | 283 | return; |
284 | 284 | ||
285 | raw_spin_lock(&cputimer->lock); | 285 | raw_spin_lock(&cputimer->lock); |
286 | cputimer->cputime.utime = | 286 | cputimer->cputime.utime += cputime; |
287 | cputime_add(cputimer->cputime.utime, cputime); | ||
288 | raw_spin_unlock(&cputimer->lock); | 287 | raw_spin_unlock(&cputimer->lock); |
289 | } | 288 | } |
290 | 289 | ||
@@ -307,8 +306,7 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
307 | return; | 306 | return; |
308 | 307 | ||
309 | raw_spin_lock(&cputimer->lock); | 308 | raw_spin_lock(&cputimer->lock); |
310 | cputimer->cputime.stime = | 309 | cputimer->cputime.stime += cputime; |
311 | cputime_add(cputimer->cputime.stime, cputime); | ||
312 | raw_spin_unlock(&cputimer->lock); | 310 | raw_spin_unlock(&cputimer->lock); |
313 | } | 311 | } |
314 | 312 | ||
diff --git a/kernel/signal.c b/kernel/signal.c index b3f78d09a105..739ef2bf105c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -1629,10 +1629,8 @@ bool do_notify_parent(struct task_struct *tsk, int sig) | |||
1629 | info.si_uid = __task_cred(tsk)->uid; | 1629 | info.si_uid = __task_cred(tsk)->uid; |
1630 | rcu_read_unlock(); | 1630 | rcu_read_unlock(); |
1631 | 1631 | ||
1632 | info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, | 1632 | info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); |
1633 | tsk->signal->utime)); | 1633 | info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); |
1634 | info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, | ||
1635 | tsk->signal->stime)); | ||
1636 | 1634 | ||
1637 | info.si_status = tsk->exit_code & 0x7f; | 1635 | info.si_status = tsk->exit_code & 0x7f; |
1638 | if (tsk->exit_code & 0x80) | 1636 | if (tsk->exit_code & 0x80) |
diff --git a/kernel/sys.c b/kernel/sys.c index 481611fbd079..ddf8155bf3f8 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1605,7 +1605,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1605 | unsigned long maxrss = 0; | 1605 | unsigned long maxrss = 0; |
1606 | 1606 | ||
1607 | memset((char *) r, 0, sizeof *r); | 1607 | memset((char *) r, 0, sizeof *r); |
1608 | utime = stime = cputime_zero; | 1608 | utime = stime = 0; |
1609 | 1609 | ||
1610 | if (who == RUSAGE_THREAD) { | 1610 | if (who == RUSAGE_THREAD) { |
1611 | task_times(current, &utime, &stime); | 1611 | task_times(current, &utime, &stime); |
@@ -1635,8 +1635,8 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1635 | 1635 | ||
1636 | case RUSAGE_SELF: | 1636 | case RUSAGE_SELF: |
1637 | thread_group_times(p, &tgutime, &tgstime); | 1637 | thread_group_times(p, &tgutime, &tgstime); |
1638 | utime = cputime_add(utime, tgutime); | 1638 | utime += tgutime; |
1639 | stime = cputime_add(stime, tgstime); | 1639 | stime += tgstime; |
1640 | r->ru_nvcsw += p->signal->nvcsw; | 1640 | r->ru_nvcsw += p->signal->nvcsw; |
1641 | r->ru_nivcsw += p->signal->nivcsw; | 1641 | r->ru_nivcsw += p->signal->nivcsw; |
1642 | r->ru_minflt += p->signal->min_flt; | 1642 | r->ru_minflt += p->signal->min_flt; |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 5bbfac85866e..23b4d784ebdd 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -127,7 +127,7 @@ void acct_update_integrals(struct task_struct *tsk) | |||
127 | 127 | ||
128 | local_irq_save(flags); | 128 | local_irq_save(flags); |
129 | time = tsk->stime + tsk->utime; | 129 | time = tsk->stime + tsk->utime; |
130 | dtime = cputime_sub(time, tsk->acct_timexpd); | 130 | dtime = time - tsk->acct_timexpd; |
131 | jiffies_to_timeval(cputime_to_jiffies(dtime), &value); | 131 | jiffies_to_timeval(cputime_to_jiffies(dtime), &value); |
132 | delta = value.tv_sec; | 132 | delta = value.tv_sec; |
133 | delta = delta * USEC_PER_SEC + value.tv_usec; | 133 | delta = delta * USEC_PER_SEC + value.tv_usec; |