summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2017-01-30 22:09:22 -0500
committerIngo Molnar <mingo@kernel.org>2017-02-01 03:13:48 -0500
commita1cecf2ba78e0a6de00ff99df34b662728535aa5 (patch)
tree6435af480632b1605e8075e24cc1170b29a6603f
parent16a6d9be90373fb0b521850cd0185a4d460dd152 (diff)
sched/cputime: Introduce special task_cputime_t() API to return old-typed cputime
This API returns a task's cputime in cputime_t in order to ease the conversion of cputime internals to use nsecs units instead. Blindly converting all cputime readers to use this API now will later let us convert more smoothly and step by step all these places to use the new nsec based cputime. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1485832191-26889-7-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/alpha/kernel/osf_sys.c2
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--drivers/isdn/mISDN/stack.c2
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/binfmt_elf_fdpic.c6
-rw-r--r--include/linux/sched.h32
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/delayacct.c4
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/time/itimer.c2
-rw-r--r--kernel/time/posix-cpu-timers.c46
-rw-r--r--kernel/tsacct.c6
12 files changed, 70 insertions, 44 deletions
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 54d8616644e2..0f92438d736b 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1154,7 +1154,7 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
1154 memset(&r, 0, sizeof(r)); 1154 memset(&r, 0, sizeof(r));
1155 switch (who) { 1155 switch (who) {
1156 case RUSAGE_SELF: 1156 case RUSAGE_SELF:
1157 task_cputime(current, &utime, &stime); 1157 task_cputime_t(current, &utime, &stime);
1158 utime_jiffies = cputime_to_jiffies(utime); 1158 utime_jiffies = cputime_to_jiffies(utime);
1159 stime_jiffies = cputime_to_jiffies(stime); 1159 stime_jiffies = cputime_to_jiffies(stime);
1160 jiffies_to_timeval32(utime_jiffies, &r.ru_utime); 1160 jiffies_to_timeval32(utime_jiffies, &r.ru_utime);
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 45d44c173cf9..89c84fcdd3c0 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -913,7 +913,7 @@ static int apm_cpu_idle(struct cpuidle_device *dev,
913 unsigned int bucket; 913 unsigned int bucket;
914 914
915recalc: 915recalc:
916 task_cputime(current, &utime, &stime); 916 task_cputime_t(current, &utime, &stime);
917 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 917 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
918 use_apm_idle = 0; 918 use_apm_idle = 0;
919 } else if (jiffies_since_last_check > idle_period) { 919 } else if (jiffies_since_last_check > idle_period) {
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b621fbc3..0a3661767531 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -306,7 +306,7 @@ mISDNStackd(void *data)
306 "msg %d sleep %d stopped\n", 306 "msg %d sleep %d stopped\n",
307 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, 307 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
308 st->stopped_cnt); 308 st->stopped_cnt);
309 task_cputime(st->thread, &utime, &stime); 309 task_cputime_t(st->thread, &utime, &stime);
310 printk(KERN_DEBUG 310 printk(KERN_DEBUG
311 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", 311 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
312 dev_name(&st->dev->dev), utime, stime); 312 dev_name(&st->dev->dev), utime, stime);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 422370293cfd..68b915650cae 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1421,19 +1421,19 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
1421 prstatus->pr_pgrp = task_pgrp_vnr(p); 1421 prstatus->pr_pgrp = task_pgrp_vnr(p);
1422 prstatus->pr_sid = task_session_vnr(p); 1422 prstatus->pr_sid = task_session_vnr(p);
1423 if (thread_group_leader(p)) { 1423 if (thread_group_leader(p)) {
1424 struct task_cputime cputime; 1424 struct task_cputime_t cputime;
1425 1425
1426 /* 1426 /*
1427 * This is the record for the group leader. It shows the 1427 * This is the record for the group leader. It shows the
1428 * group-wide total, not its individual thread total. 1428 * group-wide total, not its individual thread total.
1429 */ 1429 */
1430 thread_group_cputime(p, &cputime); 1430 thread_group_cputime_t(p, &cputime);
1431 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1431 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1432 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1432 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1433 } else { 1433 } else {
1434 cputime_t utime, stime; 1434 cputime_t utime, stime;
1435 1435
1436 task_cputime(p, &utime, &stime); 1436 task_cputime_t(p, &utime, &stime);
1437 cputime_to_timeval(utime, &prstatus->pr_utime); 1437 cputime_to_timeval(utime, &prstatus->pr_utime);
1438 cputime_to_timeval(stime, &prstatus->pr_stime); 1438 cputime_to_timeval(stime, &prstatus->pr_stime);
1439 } 1439 }
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index d2e36f82c35d..6ccd9df7247a 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1342,19 +1342,19 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
1342 prstatus->pr_pgrp = task_pgrp_vnr(p); 1342 prstatus->pr_pgrp = task_pgrp_vnr(p);
1343 prstatus->pr_sid = task_session_vnr(p); 1343 prstatus->pr_sid = task_session_vnr(p);
1344 if (thread_group_leader(p)) { 1344 if (thread_group_leader(p)) {
1345 struct task_cputime cputime; 1345 struct task_cputime_t cputime;
1346 1346
1347 /* 1347 /*
1348 * This is the record for the group leader. It shows the 1348 * This is the record for the group leader. It shows the
1349 * group-wide total, not its individual thread total. 1349 * group-wide total, not its individual thread total.
1350 */ 1350 */
1351 thread_group_cputime(p, &cputime); 1351 thread_group_cputime_t(p, &cputime);
1352 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1352 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1353 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1353 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1354 } else { 1354 } else {
1355 cputime_t utime, stime; 1355 cputime_t utime, stime;
1356 1356
1357 task_cputime(p, &utime, &stime); 1357 task_cputime_t(p, &utime, &stime);
1358 cputime_to_timeval(utime, &prstatus->pr_utime); 1358 cputime_to_timeval(utime, &prstatus->pr_utime);
1359 cputime_to_timeval(stime, &prstatus->pr_stime); 1359 cputime_to_timeval(stime, &prstatus->pr_stime);
1360 } 1360 }
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 252ff25983c8..9cc722f77799 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -615,6 +615,13 @@ struct task_cputime {
615 unsigned long long sum_exec_runtime; 615 unsigned long long sum_exec_runtime;
616}; 616};
617 617
618/* Temporary type to ease cputime_t to nsecs conversion */
619struct task_cputime_t {
620 cputime_t utime;
621 cputime_t stime;
622 unsigned long long sum_exec_runtime;
623};
624
618/* Alternate field names when used to cache expirations. */ 625/* Alternate field names when used to cache expirations. */
619#define virt_exp utime 626#define virt_exp utime
620#define prof_exp stime 627#define prof_exp stime
@@ -748,7 +755,7 @@ struct signal_struct {
748 struct thread_group_cputimer cputimer; 755 struct thread_group_cputimer cputimer;
749 756
750 /* Earliest-expiration cache. */ 757 /* Earliest-expiration cache. */
751 struct task_cputime cputime_expires; 758 struct task_cputime_t cputime_expires;
752 759
753#ifdef CONFIG_NO_HZ_FULL 760#ifdef CONFIG_NO_HZ_FULL
754 atomic_t tick_dep_mask; 761 atomic_t tick_dep_mask;
@@ -1682,7 +1689,7 @@ struct task_struct {
1682/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1689/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1683 unsigned long min_flt, maj_flt; 1690 unsigned long min_flt, maj_flt;
1684 1691
1685 struct task_cputime cputime_expires; 1692 struct task_cputime_t cputime_expires;
1686 struct list_head cpu_timers[3]; 1693 struct list_head cpu_timers[3];
1687 1694
1688/* process credentials */ 1695/* process credentials */
@@ -2286,6 +2293,19 @@ static inline void task_cputime_scaled(struct task_struct *t,
2286} 2293}
2287#endif 2294#endif
2288 2295
2296static inline void task_cputime_t(struct task_struct *t,
2297 cputime_t *utime, cputime_t *stime)
2298{
2299 task_cputime(t, utime, stime);
2300}
2301
2302static inline void task_cputime_t_scaled(struct task_struct *t,
2303 cputime_t *utimescaled,
2304 cputime_t *stimescaled)
2305{
2306 task_cputime_scaled(t, utimescaled, stimescaled);
2307}
2308
2289extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2309extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2290extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 2310extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
2291 2311
@@ -3499,7 +3519,13 @@ static __always_inline bool need_resched(void)
3499 * Thread group CPU time accounting. 3519 * Thread group CPU time accounting.
3500 */ 3520 */
3501void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); 3521void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
3502void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); 3522void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times);
3523
3524static inline void thread_group_cputime_t(struct task_struct *tsk,
3525 struct task_cputime_t *times)
3526{
3527 thread_group_cputime(tsk, (struct task_cputime *)times);
3528}
3503 3529
3504/* 3530/*
3505 * Reevaluate whether the task has signals pending delivery. 3531 * Reevaluate whether the task has signals pending delivery.
diff --git a/kernel/acct.c b/kernel/acct.c
index 74963d192c5d..b9b190a8eecf 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -559,7 +559,7 @@ void acct_collect(long exitcode, int group_dead)
559 pacct->ac_flag |= ACORE; 559 pacct->ac_flag |= ACORE;
560 if (current->flags & PF_SIGNALED) 560 if (current->flags & PF_SIGNALED)
561 pacct->ac_flag |= AXSIG; 561 pacct->ac_flag |= AXSIG;
562 task_cputime(current, &utime, &stime); 562 task_cputime_t(current, &utime, &stime);
563 pacct->ac_utime += utime; 563 pacct->ac_utime += utime;
564 pacct->ac_stime += stime; 564 pacct->ac_stime += stime;
565 pacct->ac_minflt += current->min_flt; 565 pacct->ac_minflt += current->min_flt;
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 435c14a45118..228640f2b3d2 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -87,12 +87,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
87 unsigned long flags, t1; 87 unsigned long flags, t1;
88 s64 tmp; 88 s64 tmp;
89 89
90 task_cputime(tsk, &utime, &stime); 90 task_cputime_t(tsk, &utime, &stime);
91 tmp = (s64)d->cpu_run_real_total; 91 tmp = (s64)d->cpu_run_real_total;
92 tmp += cputime_to_nsecs(utime + stime); 92 tmp += cputime_to_nsecs(utime + stime);
93 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 93 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
94 94
95 task_cputime_scaled(tsk, &utimescaled, &stimescaled); 95 task_cputime_t_scaled(tsk, &utimescaled, &stimescaled);
96 tmp = (s64)d->cpu_scaled_run_real_total; 96 tmp = (s64)d->cpu_scaled_run_real_total;
97 tmp += cputime_to_nsecs(utimescaled + stimescaled); 97 tmp += cputime_to_nsecs(utimescaled + stimescaled);
98 d->cpu_scaled_run_real_total = 98 d->cpu_scaled_run_real_total =
diff --git a/kernel/signal.c b/kernel/signal.c
index 3603d93a1968..218048a837ea 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1619,7 +1619,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
1619 task_uid(tsk)); 1619 task_uid(tsk));
1620 rcu_read_unlock(); 1620 rcu_read_unlock();
1621 1621
1622 task_cputime(tsk, &utime, &stime); 1622 task_cputime_t(tsk, &utime, &stime);
1623 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime); 1623 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1624 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime); 1624 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1625 1625
@@ -1704,7 +1704,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
1704 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 1704 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1705 rcu_read_unlock(); 1705 rcu_read_unlock();
1706 1706
1707 task_cputime(tsk, &utime, &stime); 1707 task_cputime_t(tsk, &utime, &stime);
1708 info.si_utime = cputime_to_clock_t(utime); 1708 info.si_utime = cputime_to_clock_t(utime);
1709 info.si_stime = cputime_to_clock_t(stime); 1709 info.si_stime = cputime_to_clock_t(stime);
1710 1710
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 8c89143f9ebf..f2d5097bcb6d 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -53,7 +53,7 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
53 cval = it->expires; 53 cval = it->expires;
54 cinterval = it->incr; 54 cinterval = it->incr;
55 if (cval) { 55 if (cval) {
56 struct task_cputime cputime; 56 struct task_cputime_t cputime;
57 cputime_t t; 57 cputime_t t;
58 58
59 thread_group_cputimer(tsk, &cputime); 59 thread_group_cputimer(tsk, &cputime);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index e9e8c10f0d9a..d53ff711a2a8 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -115,7 +115,7 @@ static void bump_cpu_timer(struct k_itimer *timer,
115 * Checks @cputime to see if all fields are zero. Returns true if all fields 115 * Checks @cputime to see if all fields are zero. Returns true if all fields
116 * are zero, false if any field is nonzero. 116 * are zero, false if any field is nonzero.
117 */ 117 */
118static inline int task_cputime_zero(const struct task_cputime *cputime) 118static inline int task_cputime_zero(const struct task_cputime_t *cputime)
119{ 119{
120 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) 120 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
121 return 1; 121 return 1;
@@ -126,7 +126,7 @@ static inline unsigned long long prof_ticks(struct task_struct *p)
126{ 126{
127 cputime_t utime, stime; 127 cputime_t utime, stime;
128 128
129 task_cputime(p, &utime, &stime); 129 task_cputime_t(p, &utime, &stime);
130 130
131 return cputime_to_expires(utime + stime); 131 return cputime_to_expires(utime + stime);
132} 132}
@@ -134,7 +134,7 @@ static inline unsigned long long virt_ticks(struct task_struct *p)
134{ 134{
135 cputime_t utime, stime; 135 cputime_t utime, stime;
136 136
137 task_cputime(p, &utime, &stime); 137 task_cputime_t(p, &utime, &stime);
138 138
139 return cputime_to_expires(utime); 139 return cputime_to_expires(utime);
140} 140}
@@ -210,7 +210,7 @@ retry:
210 } 210 }
211} 211}
212 212
213static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) 213static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime_t *sum)
214{ 214{
215 __update_gt_cputime(&cputime_atomic->utime, sum->utime); 215 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
216 __update_gt_cputime(&cputime_atomic->stime, sum->stime); 216 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
@@ -218,7 +218,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct
218} 218}
219 219
220/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ 220/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
221static inline void sample_cputime_atomic(struct task_cputime *times, 221static inline void sample_cputime_atomic(struct task_cputime_t *times,
222 struct task_cputime_atomic *atomic_times) 222 struct task_cputime_atomic *atomic_times)
223{ 223{
224 times->utime = atomic64_read(&atomic_times->utime); 224 times->utime = atomic64_read(&atomic_times->utime);
@@ -226,10 +226,10 @@ static inline void sample_cputime_atomic(struct task_cputime *times,
226 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); 226 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
227} 227}
228 228
229void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) 229void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times)
230{ 230{
231 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 231 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
232 struct task_cputime sum; 232 struct task_cputime_t sum;
233 233
234 /* Check if cputimer isn't running. This is accessed without locking. */ 234 /* Check if cputimer isn't running. This is accessed without locking. */
235 if (!READ_ONCE(cputimer->running)) { 235 if (!READ_ONCE(cputimer->running)) {
@@ -238,7 +238,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
238 * values through the TIMER_ABSTIME flag, therefore we have 238 * values through the TIMER_ABSTIME flag, therefore we have
239 * to synchronize the timer to the clock every time we start it. 239 * to synchronize the timer to the clock every time we start it.
240 */ 240 */
241 thread_group_cputime(tsk, &sum); 241 thread_group_cputime_t(tsk, &sum);
242 update_gt_cputime(&cputimer->cputime_atomic, &sum); 242 update_gt_cputime(&cputimer->cputime_atomic, &sum);
243 243
244 /* 244 /*
@@ -262,21 +262,21 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
262 struct task_struct *p, 262 struct task_struct *p,
263 unsigned long long *sample) 263 unsigned long long *sample)
264{ 264{
265 struct task_cputime cputime; 265 struct task_cputime_t cputime;
266 266
267 switch (CPUCLOCK_WHICH(which_clock)) { 267 switch (CPUCLOCK_WHICH(which_clock)) {
268 default: 268 default:
269 return -EINVAL; 269 return -EINVAL;
270 case CPUCLOCK_PROF: 270 case CPUCLOCK_PROF:
271 thread_group_cputime(p, &cputime); 271 thread_group_cputime_t(p, &cputime);
272 *sample = cputime_to_expires(cputime.utime + cputime.stime); 272 *sample = cputime_to_expires(cputime.utime + cputime.stime);
273 break; 273 break;
274 case CPUCLOCK_VIRT: 274 case CPUCLOCK_VIRT:
275 thread_group_cputime(p, &cputime); 275 thread_group_cputime_t(p, &cputime);
276 *sample = cputime_to_expires(cputime.utime); 276 *sample = cputime_to_expires(cputime.utime);
277 break; 277 break;
278 case CPUCLOCK_SCHED: 278 case CPUCLOCK_SCHED:
279 thread_group_cputime(p, &cputime); 279 thread_group_cputime_t(p, &cputime);
280 *sample = cputime.sum_exec_runtime; 280 *sample = cputime.sum_exec_runtime;
281 break; 281 break;
282 } 282 }
@@ -466,7 +466,7 @@ static void arm_timer(struct k_itimer *timer)
466{ 466{
467 struct task_struct *p = timer->it.cpu.task; 467 struct task_struct *p = timer->it.cpu.task;
468 struct list_head *head, *listpos; 468 struct list_head *head, *listpos;
469 struct task_cputime *cputime_expires; 469 struct task_cputime_t *cputime_expires;
470 struct cpu_timer_list *const nt = &timer->it.cpu; 470 struct cpu_timer_list *const nt = &timer->it.cpu;
471 struct cpu_timer_list *next; 471 struct cpu_timer_list *next;
472 472
@@ -562,7 +562,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
562 struct task_struct *p, 562 struct task_struct *p,
563 unsigned long long *sample) 563 unsigned long long *sample)
564{ 564{
565 struct task_cputime cputime; 565 struct task_cputime_t cputime;
566 566
567 thread_group_cputimer(p, &cputime); 567 thread_group_cputimer(p, &cputime);
568 switch (CPUCLOCK_WHICH(which_clock)) { 568 switch (CPUCLOCK_WHICH(which_clock)) {
@@ -761,7 +761,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
761 /* 761 /*
762 * Protect against sighand release/switch in exit/exec and 762 * Protect against sighand release/switch in exit/exec and
763 * also make timer sampling safe if it ends up calling 763 * also make timer sampling safe if it ends up calling
764 * thread_group_cputime(). 764 * thread_group_cputime_t().
765 */ 765 */
766 sighand = lock_task_sighand(p, &flags); 766 sighand = lock_task_sighand(p, &flags);
767 if (unlikely(sighand == NULL)) { 767 if (unlikely(sighand == NULL)) {
@@ -826,7 +826,7 @@ static void check_thread_timers(struct task_struct *tsk,
826{ 826{
827 struct list_head *timers = tsk->cpu_timers; 827 struct list_head *timers = tsk->cpu_timers;
828 struct signal_struct *const sig = tsk->signal; 828 struct signal_struct *const sig = tsk->signal;
829 struct task_cputime *tsk_expires = &tsk->cputime_expires; 829 struct task_cputime_t *tsk_expires = &tsk->cputime_expires;
830 unsigned long long expires; 830 unsigned long long expires;
831 unsigned long soft; 831 unsigned long soft;
832 832
@@ -934,7 +934,7 @@ static void check_process_timers(struct task_struct *tsk,
934 unsigned long long utime, ptime, virt_expires, prof_expires; 934 unsigned long long utime, ptime, virt_expires, prof_expires;
935 unsigned long long sum_sched_runtime, sched_expires; 935 unsigned long long sum_sched_runtime, sched_expires;
936 struct list_head *timers = sig->cpu_timers; 936 struct list_head *timers = sig->cpu_timers;
937 struct task_cputime cputime; 937 struct task_cputime_t cputime;
938 unsigned long soft; 938 unsigned long soft;
939 939
940 /* 940 /*
@@ -1037,7 +1037,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1037 } else { 1037 } else {
1038 /* 1038 /*
1039 * Protect arm_timer() and timer sampling in case of call to 1039 * Protect arm_timer() and timer sampling in case of call to
1040 * thread_group_cputime(). 1040 * thread_group_cputime_t().
1041 */ 1041 */
1042 sighand = lock_task_sighand(p, &flags); 1042 sighand = lock_task_sighand(p, &flags);
1043 if (unlikely(sighand == NULL)) { 1043 if (unlikely(sighand == NULL)) {
@@ -1080,8 +1080,8 @@ out:
1080 * Returns true if any field of the former is greater than the corresponding 1080 * Returns true if any field of the former is greater than the corresponding
1081 * field of the latter if the latter field is set. Otherwise returns false. 1081 * field of the latter if the latter field is set. Otherwise returns false.
1082 */ 1082 */
1083static inline int task_cputime_expired(const struct task_cputime *sample, 1083static inline int task_cputime_expired(const struct task_cputime_t *sample,
1084 const struct task_cputime *expires) 1084 const struct task_cputime_t *expires)
1085{ 1085{
1086 if (expires->utime && sample->utime >= expires->utime) 1086 if (expires->utime && sample->utime >= expires->utime)
1087 return 1; 1087 return 1;
@@ -1108,9 +1108,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1108 struct signal_struct *sig; 1108 struct signal_struct *sig;
1109 1109
1110 if (!task_cputime_zero(&tsk->cputime_expires)) { 1110 if (!task_cputime_zero(&tsk->cputime_expires)) {
1111 struct task_cputime task_sample; 1111 struct task_cputime_t task_sample;
1112 1112
1113 task_cputime(tsk, &task_sample.utime, &task_sample.stime); 1113 task_cputime_t(tsk, &task_sample.utime, &task_sample.stime);
1114 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; 1114 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1115 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1115 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1116 return 1; 1116 return 1;
@@ -1133,7 +1133,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1133 */ 1133 */
1134 if (READ_ONCE(sig->cputimer.running) && 1134 if (READ_ONCE(sig->cputimer.running) &&
1135 !READ_ONCE(sig->cputimer.checking_timer)) { 1135 !READ_ONCE(sig->cputimer.checking_timer)) {
1136 struct task_cputime group_sample; 1136 struct task_cputime_t group_sample;
1137 1137
1138 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); 1138 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1139 1139
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index f8e26ab963ed..040d0a64d0d1 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -66,11 +66,11 @@ void bacct_add_tsk(struct user_namespace *user_ns,
66 task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; 66 task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0;
67 rcu_read_unlock(); 67 rcu_read_unlock();
68 68
69 task_cputime(tsk, &utime, &stime); 69 task_cputime_t(tsk, &utime, &stime);
70 stats->ac_utime = cputime_to_usecs(utime); 70 stats->ac_utime = cputime_to_usecs(utime);
71 stats->ac_stime = cputime_to_usecs(stime); 71 stats->ac_stime = cputime_to_usecs(stime);
72 72
73 task_cputime_scaled(tsk, &utimescaled, &stimescaled); 73 task_cputime_t_scaled(tsk, &utimescaled, &stimescaled);
74 stats->ac_utimescaled = cputime_to_usecs(utimescaled); 74 stats->ac_utimescaled = cputime_to_usecs(utimescaled);
75 stats->ac_stimescaled = cputime_to_usecs(stimescaled); 75 stats->ac_stimescaled = cputime_to_usecs(stimescaled);
76 76
@@ -159,7 +159,7 @@ void acct_update_integrals(struct task_struct *tsk)
159 unsigned long flags; 159 unsigned long flags;
160 160
161 local_irq_save(flags); 161 local_irq_save(flags);
162 task_cputime(tsk, &utime, &stime); 162 task_cputime_t(tsk, &utime, &stime);
163 __acct_update_integrals(tsk, utime, stime); 163 __acct_update_integrals(tsk, utime, stime);
164 local_irq_restore(flags); 164 local_irq_restore(flags);
165} 165}