diff options
Diffstat (limited to 'kernel/time/posix-cpu-timers.c')
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index e9e8c10f0d9a..d53ff711a2a8 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -115,7 +115,7 @@ static void bump_cpu_timer(struct k_itimer *timer, | |||
115 | * Checks @cputime to see if all fields are zero. Returns true if all fields | 115 | * Checks @cputime to see if all fields are zero. Returns true if all fields |
116 | * are zero, false if any field is nonzero. | 116 | * are zero, false if any field is nonzero. |
117 | */ | 117 | */ |
118 | static inline int task_cputime_zero(const struct task_cputime *cputime) | 118 | static inline int task_cputime_zero(const struct task_cputime_t *cputime) |
119 | { | 119 | { |
120 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) | 120 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) |
121 | return 1; | 121 | return 1; |
@@ -126,7 +126,7 @@ static inline unsigned long long prof_ticks(struct task_struct *p) | |||
126 | { | 126 | { |
127 | cputime_t utime, stime; | 127 | cputime_t utime, stime; |
128 | 128 | ||
129 | task_cputime(p, &utime, &stime); | 129 | task_cputime_t(p, &utime, &stime); |
130 | 130 | ||
131 | return cputime_to_expires(utime + stime); | 131 | return cputime_to_expires(utime + stime); |
132 | } | 132 | } |
@@ -134,7 +134,7 @@ static inline unsigned long long virt_ticks(struct task_struct *p) | |||
134 | { | 134 | { |
135 | cputime_t utime, stime; | 135 | cputime_t utime, stime; |
136 | 136 | ||
137 | task_cputime(p, &utime, &stime); | 137 | task_cputime_t(p, &utime, &stime); |
138 | 138 | ||
139 | return cputime_to_expires(utime); | 139 | return cputime_to_expires(utime); |
140 | } | 140 | } |
@@ -210,7 +210,7 @@ retry: | |||
210 | } | 210 | } |
211 | } | 211 | } |
212 | 212 | ||
213 | static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum) | 213 | static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime_t *sum) |
214 | { | 214 | { |
215 | __update_gt_cputime(&cputime_atomic->utime, sum->utime); | 215 | __update_gt_cputime(&cputime_atomic->utime, sum->utime); |
216 | __update_gt_cputime(&cputime_atomic->stime, sum->stime); | 216 | __update_gt_cputime(&cputime_atomic->stime, sum->stime); |
@@ -218,7 +218,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct | |||
218 | } | 218 | } |
219 | 219 | ||
220 | /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ | 220 | /* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ |
221 | static inline void sample_cputime_atomic(struct task_cputime *times, | 221 | static inline void sample_cputime_atomic(struct task_cputime_t *times, |
222 | struct task_cputime_atomic *atomic_times) | 222 | struct task_cputime_atomic *atomic_times) |
223 | { | 223 | { |
224 | times->utime = atomic64_read(&atomic_times->utime); | 224 | times->utime = atomic64_read(&atomic_times->utime); |
@@ -226,10 +226,10 @@ static inline void sample_cputime_atomic(struct task_cputime *times, | |||
226 | times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); | 226 | times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); |
227 | } | 227 | } |
228 | 228 | ||
229 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | 229 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times) |
230 | { | 230 | { |
231 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | 231 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; |
232 | struct task_cputime sum; | 232 | struct task_cputime_t sum; |
233 | 233 | ||
234 | /* Check if cputimer isn't running. This is accessed without locking. */ | 234 | /* Check if cputimer isn't running. This is accessed without locking. */ |
235 | if (!READ_ONCE(cputimer->running)) { | 235 | if (!READ_ONCE(cputimer->running)) { |
@@ -238,7 +238,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
238 | * values through the TIMER_ABSTIME flag, therefore we have | 238 | * values through the TIMER_ABSTIME flag, therefore we have |
239 | * to synchronize the timer to the clock every time we start it. | 239 | * to synchronize the timer to the clock every time we start it. |
240 | */ | 240 | */ |
241 | thread_group_cputime(tsk, &sum); | 241 | thread_group_cputime_t(tsk, &sum); |
242 | update_gt_cputime(&cputimer->cputime_atomic, &sum); | 242 | update_gt_cputime(&cputimer->cputime_atomic, &sum); |
243 | 243 | ||
244 | /* | 244 | /* |
@@ -262,21 +262,21 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
262 | struct task_struct *p, | 262 | struct task_struct *p, |
263 | unsigned long long *sample) | 263 | unsigned long long *sample) |
264 | { | 264 | { |
265 | struct task_cputime cputime; | 265 | struct task_cputime_t cputime; |
266 | 266 | ||
267 | switch (CPUCLOCK_WHICH(which_clock)) { | 267 | switch (CPUCLOCK_WHICH(which_clock)) { |
268 | default: | 268 | default: |
269 | return -EINVAL; | 269 | return -EINVAL; |
270 | case CPUCLOCK_PROF: | 270 | case CPUCLOCK_PROF: |
271 | thread_group_cputime(p, &cputime); | 271 | thread_group_cputime_t(p, &cputime); |
272 | *sample = cputime_to_expires(cputime.utime + cputime.stime); | 272 | *sample = cputime_to_expires(cputime.utime + cputime.stime); |
273 | break; | 273 | break; |
274 | case CPUCLOCK_VIRT: | 274 | case CPUCLOCK_VIRT: |
275 | thread_group_cputime(p, &cputime); | 275 | thread_group_cputime_t(p, &cputime); |
276 | *sample = cputime_to_expires(cputime.utime); | 276 | *sample = cputime_to_expires(cputime.utime); |
277 | break; | 277 | break; |
278 | case CPUCLOCK_SCHED: | 278 | case CPUCLOCK_SCHED: |
279 | thread_group_cputime(p, &cputime); | 279 | thread_group_cputime_t(p, &cputime); |
280 | *sample = cputime.sum_exec_runtime; | 280 | *sample = cputime.sum_exec_runtime; |
281 | break; | 281 | break; |
282 | } | 282 | } |
@@ -466,7 +466,7 @@ static void arm_timer(struct k_itimer *timer) | |||
466 | { | 466 | { |
467 | struct task_struct *p = timer->it.cpu.task; | 467 | struct task_struct *p = timer->it.cpu.task; |
468 | struct list_head *head, *listpos; | 468 | struct list_head *head, *listpos; |
469 | struct task_cputime *cputime_expires; | 469 | struct task_cputime_t *cputime_expires; |
470 | struct cpu_timer_list *const nt = &timer->it.cpu; | 470 | struct cpu_timer_list *const nt = &timer->it.cpu; |
471 | struct cpu_timer_list *next; | 471 | struct cpu_timer_list *next; |
472 | 472 | ||
@@ -562,7 +562,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, | |||
562 | struct task_struct *p, | 562 | struct task_struct *p, |
563 | unsigned long long *sample) | 563 | unsigned long long *sample) |
564 | { | 564 | { |
565 | struct task_cputime cputime; | 565 | struct task_cputime_t cputime; |
566 | 566 | ||
567 | thread_group_cputimer(p, &cputime); | 567 | thread_group_cputimer(p, &cputime); |
568 | switch (CPUCLOCK_WHICH(which_clock)) { | 568 | switch (CPUCLOCK_WHICH(which_clock)) { |
@@ -761,7 +761,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
761 | /* | 761 | /* |
762 | * Protect against sighand release/switch in exit/exec and | 762 | * Protect against sighand release/switch in exit/exec and |
763 | * also make timer sampling safe if it ends up calling | 763 | * also make timer sampling safe if it ends up calling |
764 | * thread_group_cputime(). | 764 | * thread_group_cputime_t(). |
765 | */ | 765 | */ |
766 | sighand = lock_task_sighand(p, &flags); | 766 | sighand = lock_task_sighand(p, &flags); |
767 | if (unlikely(sighand == NULL)) { | 767 | if (unlikely(sighand == NULL)) { |
@@ -826,7 +826,7 @@ static void check_thread_timers(struct task_struct *tsk, | |||
826 | { | 826 | { |
827 | struct list_head *timers = tsk->cpu_timers; | 827 | struct list_head *timers = tsk->cpu_timers; |
828 | struct signal_struct *const sig = tsk->signal; | 828 | struct signal_struct *const sig = tsk->signal; |
829 | struct task_cputime *tsk_expires = &tsk->cputime_expires; | 829 | struct task_cputime_t *tsk_expires = &tsk->cputime_expires; |
830 | unsigned long long expires; | 830 | unsigned long long expires; |
831 | unsigned long soft; | 831 | unsigned long soft; |
832 | 832 | ||
@@ -934,7 +934,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
934 | unsigned long long utime, ptime, virt_expires, prof_expires; | 934 | unsigned long long utime, ptime, virt_expires, prof_expires; |
935 | unsigned long long sum_sched_runtime, sched_expires; | 935 | unsigned long long sum_sched_runtime, sched_expires; |
936 | struct list_head *timers = sig->cpu_timers; | 936 | struct list_head *timers = sig->cpu_timers; |
937 | struct task_cputime cputime; | 937 | struct task_cputime_t cputime; |
938 | unsigned long soft; | 938 | unsigned long soft; |
939 | 939 | ||
940 | /* | 940 | /* |
@@ -1037,7 +1037,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1037 | } else { | 1037 | } else { |
1038 | /* | 1038 | /* |
1039 | * Protect arm_timer() and timer sampling in case of call to | 1039 | * Protect arm_timer() and timer sampling in case of call to |
1040 | * thread_group_cputime(). | 1040 | * thread_group_cputime_t(). |
1041 | */ | 1041 | */ |
1042 | sighand = lock_task_sighand(p, &flags); | 1042 | sighand = lock_task_sighand(p, &flags); |
1043 | if (unlikely(sighand == NULL)) { | 1043 | if (unlikely(sighand == NULL)) { |
@@ -1080,8 +1080,8 @@ out: | |||
1080 | * Returns true if any field of the former is greater than the corresponding | 1080 | * Returns true if any field of the former is greater than the corresponding |
1081 | * field of the latter if the latter field is set. Otherwise returns false. | 1081 | * field of the latter if the latter field is set. Otherwise returns false. |
1082 | */ | 1082 | */ |
1083 | static inline int task_cputime_expired(const struct task_cputime *sample, | 1083 | static inline int task_cputime_expired(const struct task_cputime_t *sample, |
1084 | const struct task_cputime *expires) | 1084 | const struct task_cputime_t *expires) |
1085 | { | 1085 | { |
1086 | if (expires->utime && sample->utime >= expires->utime) | 1086 | if (expires->utime && sample->utime >= expires->utime) |
1087 | return 1; | 1087 | return 1; |
@@ -1108,9 +1108,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1108 | struct signal_struct *sig; | 1108 | struct signal_struct *sig; |
1109 | 1109 | ||
1110 | if (!task_cputime_zero(&tsk->cputime_expires)) { | 1110 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
1111 | struct task_cputime task_sample; | 1111 | struct task_cputime_t task_sample; |
1112 | 1112 | ||
1113 | task_cputime(tsk, &task_sample.utime, &task_sample.stime); | 1113 | task_cputime_t(tsk, &task_sample.utime, &task_sample.stime); |
1114 | task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; | 1114 | task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; |
1115 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) | 1115 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
1116 | return 1; | 1116 | return 1; |
@@ -1133,7 +1133,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1133 | */ | 1133 | */ |
1134 | if (READ_ONCE(sig->cputimer.running) && | 1134 | if (READ_ONCE(sig->cputimer.running) && |
1135 | !READ_ONCE(sig->cputimer.checking_timer)) { | 1135 | !READ_ONCE(sig->cputimer.checking_timer)) { |
1136 | struct task_cputime group_sample; | 1136 | struct task_cputime_t group_sample; |
1137 | 1137 | ||
1138 | sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); | 1138 | sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); |
1139 | 1139 | ||