aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c80
1 files changed, 35 insertions, 45 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d6b149ccf925..18cad4467e61 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2166,7 +2166,7 @@ static int irqtime_account_hi_update(void)
2166 2166
2167 local_irq_save(flags); 2167 local_irq_save(flags);
2168 latest_ns = this_cpu_read(cpu_hardirq_time); 2168 latest_ns = this_cpu_read(cpu_hardirq_time);
2169 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq)) 2169 if (nsecs_to_cputime64(latest_ns) > cpustat->irq)
2170 ret = 1; 2170 ret = 1;
2171 local_irq_restore(flags); 2171 local_irq_restore(flags);
2172 return ret; 2172 return ret;
@@ -2181,7 +2181,7 @@ static int irqtime_account_si_update(void)
2181 2181
2182 local_irq_save(flags); 2182 local_irq_save(flags);
2183 latest_ns = this_cpu_read(cpu_softirq_time); 2183 latest_ns = this_cpu_read(cpu_softirq_time);
2184 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq)) 2184 if (nsecs_to_cputime64(latest_ns) > cpustat->softirq)
2185 ret = 1; 2185 ret = 1;
2186 local_irq_restore(flags); 2186 local_irq_restore(flags);
2187 return ret; 2187 return ret;
@@ -3868,19 +3868,17 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
3868 cputime_t cputime_scaled) 3868 cputime_t cputime_scaled)
3869{ 3869{
3870 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3870 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3871 cputime64_t tmp;
3872 3871
3873 /* Add user time to process. */ 3872 /* Add user time to process. */
3874 p->utime = cputime_add(p->utime, cputime); 3873 p->utime += cputime;
3875 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); 3874 p->utimescaled += cputime_scaled;
3876 account_group_user_time(p, cputime); 3875 account_group_user_time(p, cputime);
3877 3876
3878 /* Add user time to cpustat. */ 3877 /* Add user time to cpustat. */
3879 tmp = cputime_to_cputime64(cputime);
3880 if (TASK_NICE(p) > 0) 3878 if (TASK_NICE(p) > 0)
3881 cpustat->nice = cputime64_add(cpustat->nice, tmp); 3879 cpustat->nice += (__force cputime64_t) cputime;
3882 else 3880 else
3883 cpustat->user = cputime64_add(cpustat->user, tmp); 3881 cpustat->user += (__force cputime64_t) cputime;
3884 3882
3885 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime); 3883 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
3886 /* Account for user time used */ 3884 /* Account for user time used */
@@ -3896,24 +3894,21 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
3896static void account_guest_time(struct task_struct *p, cputime_t cputime, 3894static void account_guest_time(struct task_struct *p, cputime_t cputime,
3897 cputime_t cputime_scaled) 3895 cputime_t cputime_scaled)
3898{ 3896{
3899 cputime64_t tmp;
3900 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3897 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3901 3898
3902 tmp = cputime_to_cputime64(cputime);
3903
3904 /* Add guest time to process. */ 3899 /* Add guest time to process. */
3905 p->utime = cputime_add(p->utime, cputime); 3900 p->utime += cputime;
3906 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); 3901 p->utimescaled += cputime_scaled;
3907 account_group_user_time(p, cputime); 3902 account_group_user_time(p, cputime);
3908 p->gtime = cputime_add(p->gtime, cputime); 3903 p->gtime += cputime;
3909 3904
3910 /* Add guest time to cpustat. */ 3905 /* Add guest time to cpustat. */
3911 if (TASK_NICE(p) > 0) { 3906 if (TASK_NICE(p) > 0) {
3912 cpustat->nice = cputime64_add(cpustat->nice, tmp); 3907 cpustat->nice += (__force cputime64_t) cputime;
3913 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp); 3908 cpustat->guest_nice += (__force cputime64_t) cputime;
3914 } else { 3909 } else {
3915 cpustat->user = cputime64_add(cpustat->user, tmp); 3910 cpustat->user += (__force cputime64_t) cputime;
3916 cpustat->guest = cputime64_add(cpustat->guest, tmp); 3911 cpustat->guest += (__force cputime64_t) cputime;
3917 } 3912 }
3918} 3913}
3919 3914
@@ -3928,15 +3923,13 @@ static inline
3928void __account_system_time(struct task_struct *p, cputime_t cputime, 3923void __account_system_time(struct task_struct *p, cputime_t cputime,
3929 cputime_t cputime_scaled, cputime64_t *target_cputime64) 3924 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3930{ 3925{
3931 cputime64_t tmp = cputime_to_cputime64(cputime);
3932
3933 /* Add system time to process. */ 3926 /* Add system time to process. */
3934 p->stime = cputime_add(p->stime, cputime); 3927 p->stime += cputime;
3935 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); 3928 p->stimescaled += cputime_scaled;
3936 account_group_system_time(p, cputime); 3929 account_group_system_time(p, cputime);
3937 3930
3938 /* Add system time to cpustat. */ 3931 /* Add system time to cpustat. */
3939 *target_cputime64 = cputime64_add(*target_cputime64, tmp); 3932 *target_cputime64 += (__force cputime64_t) cputime;
3940 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); 3933 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3941 3934
3942 /* Account for system time used */ 3935 /* Account for system time used */
@@ -3978,9 +3971,8 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3978void account_steal_time(cputime_t cputime) 3971void account_steal_time(cputime_t cputime)
3979{ 3972{
3980 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3973 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3981 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3982 3974
3983 cpustat->steal = cputime64_add(cpustat->steal, cputime64); 3975 cpustat->steal += (__force cputime64_t) cputime;
3984} 3976}
3985 3977
3986/* 3978/*
@@ -3990,13 +3982,12 @@ void account_steal_time(cputime_t cputime)
3990void account_idle_time(cputime_t cputime) 3982void account_idle_time(cputime_t cputime)
3991{ 3983{
3992 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3984 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3993 cputime64_t cputime64 = cputime_to_cputime64(cputime);
3994 struct rq *rq = this_rq(); 3985 struct rq *rq = this_rq();
3995 3986
3996 if (atomic_read(&rq->nr_iowait) > 0) 3987 if (atomic_read(&rq->nr_iowait) > 0)
3997 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); 3988 cpustat->iowait += (__force cputime64_t) cputime;
3998 else 3989 else
3999 cpustat->idle = cputime64_add(cpustat->idle, cputime64); 3990 cpustat->idle += (__force cputime64_t) cputime;
4000} 3991}
4001 3992
4002static __always_inline bool steal_account_process_tick(void) 3993static __always_inline bool steal_account_process_tick(void)
@@ -4046,16 +4037,15 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
4046 struct rq *rq) 4037 struct rq *rq)
4047{ 4038{
4048 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 4039 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
4049 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
4050 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 4040 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
4051 4041
4052 if (steal_account_process_tick()) 4042 if (steal_account_process_tick())
4053 return; 4043 return;
4054 4044
4055 if (irqtime_account_hi_update()) { 4045 if (irqtime_account_hi_update()) {
4056 cpustat->irq = cputime64_add(cpustat->irq, tmp); 4046 cpustat->irq += (__force cputime64_t) cputime_one_jiffy;
4057 } else if (irqtime_account_si_update()) { 4047 } else if (irqtime_account_si_update()) {
4058 cpustat->softirq = cputime64_add(cpustat->softirq, tmp); 4048 cpustat->softirq += (__force cputime64_t) cputime_one_jiffy;
4059 } else if (this_cpu_ksoftirqd() == p) { 4049 } else if (this_cpu_ksoftirqd() == p) {
4060 /* 4050 /*
4061 * ksoftirqd time do not get accounted in cpu_softirq_time. 4051 * ksoftirqd time do not get accounted in cpu_softirq_time.
@@ -4171,7 +4161,7 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4171 4161
4172void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) 4162void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4173{ 4163{
4174 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime); 4164 cputime_t rtime, utime = p->utime, total = utime + p->stime;
4175 4165
4176 /* 4166 /*
4177 * Use CFS's precise accounting: 4167 * Use CFS's precise accounting:
@@ -4179,11 +4169,11 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4179 rtime = nsecs_to_cputime(p->se.sum_exec_runtime); 4169 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
4180 4170
4181 if (total) { 4171 if (total) {
4182 u64 temp = rtime; 4172 u64 temp = (__force u64) rtime;
4183 4173
4184 temp *= utime; 4174 temp *= (__force u64) utime;
4185 do_div(temp, total); 4175 do_div(temp, (__force u32) total);
4186 utime = (cputime_t)temp; 4176 utime = (__force cputime_t) temp;
4187 } else 4177 } else
4188 utime = rtime; 4178 utime = rtime;
4189 4179
@@ -4191,7 +4181,7 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4191 * Compare with previous values, to keep monotonicity: 4181 * Compare with previous values, to keep monotonicity:
4192 */ 4182 */
4193 p->prev_utime = max(p->prev_utime, utime); 4183 p->prev_utime = max(p->prev_utime, utime);
4194 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime)); 4184 p->prev_stime = max(p->prev_stime, rtime - p->prev_utime);
4195 4185
4196 *ut = p->prev_utime; 4186 *ut = p->prev_utime;
4197 *st = p->prev_stime; 4187 *st = p->prev_stime;
@@ -4208,21 +4198,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
4208 4198
4209 thread_group_cputime(p, &cputime); 4199 thread_group_cputime(p, &cputime);
4210 4200
4211 total = cputime_add(cputime.utime, cputime.stime); 4201 total = cputime.utime + cputime.stime;
4212 rtime = nsecs_to_cputime(cputime.sum_exec_runtime); 4202 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
4213 4203
4214 if (total) { 4204 if (total) {
4215 u64 temp = rtime; 4205 u64 temp = (__force u64) rtime;
4216 4206
4217 temp *= cputime.utime; 4207 temp *= (__force u64) cputime.utime;
4218 do_div(temp, total); 4208 do_div(temp, (__force u32) total);
4219 utime = (cputime_t)temp; 4209 utime = (__force cputime_t) temp;
4220 } else 4210 } else
4221 utime = rtime; 4211 utime = rtime;
4222 4212
4223 sig->prev_utime = max(sig->prev_utime, utime); 4213 sig->prev_utime = max(sig->prev_utime, utime);
4224 sig->prev_stime = max(sig->prev_stime, 4214 sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime);
4225 cputime_sub(rtime, sig->prev_utime));
4226 4215
4227 *ut = sig->prev_utime; 4216 *ut = sig->prev_utime;
4228 *st = sig->prev_stime; 4217 *st = sig->prev_stime;
@@ -9769,7 +9758,8 @@ static void cpuacct_update_stats(struct task_struct *tsk,
9769 ca = task_ca(tsk); 9758 ca = task_ca(tsk);
9770 9759
9771 do { 9760 do {
9772 __percpu_counter_add(&ca->cpustat[idx], val, batch); 9761 __percpu_counter_add(&ca->cpustat[idx],
9762 (__force s64) val, batch);
9773 ca = ca->parent; 9763 ca = ca->parent;
9774 } while (ca); 9764 } while (ca);
9775 rcu_read_unlock(); 9765 rcu_read_unlock();