aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/itimer.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 15:52:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-20 15:52:55 -0500
commit828cad8ea05d194d8a9452e0793261c2024c23a2 (patch)
tree0ad7c7e044cdcfe75d78da0b52eb2358d4686e02 /kernel/time/itimer.c
parent60c906bab124a0627fba04c9ca5e61bba4747c0c (diff)
parentbb3bac2ca9a3a5b7fa601781adf70167a0449d75 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this (fairly busy) cycle were: - There was a class of scheduler bugs related to forgetting to update the rq-clock timestamp which can cause weird and hard to debug problems, so there's a new debug facility for this: which uncovered a whole lot of bugs which convinced us that we want to keep the debug facility. (Peter Zijlstra, Matt Fleming) - Various cputime related updates: eliminate cputime and use u64 nanoseconds directly, simplify and improve the arch interfaces, implement delayed accounting more widely, etc. - (Frederic Weisbecker) - Move code around for better structure plus cleanups (Ingo Molnar) - Move IO schedule accounting deeper into the scheduler plus related changes to improve the situation (Tejun Heo) - ... plus a round of sched/rt and sched/deadline fixes, plus other fixes, updats and cleanups" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (85 commits) sched/core: Remove unlikely() annotation from sched_move_task() sched/autogroup: Rename auto_group.[ch] to autogroup.[ch] sched/topology: Split out scheduler topology code from core.c into topology.c sched/core: Remove unnecessary #include headers sched/rq_clock: Consolidate the ordering of the rq_clock methods delayacct: Include <uapi/linux/taskstats.h> sched/core: Clean up comments sched/rt: Show the 'sched_rr_timeslice' SCHED_RR timeslice tuning knob in milliseconds sched/clock: Add dummy clear_sched_clock_stable() stub function sched/cputime: Remove generic asm headers sched/cputime: Remove unused nsec_to_cputime() s390, sched/cputime: Remove unused cputime definitions powerpc, sched/cputime: Remove unused cputime definitions s390, sched/cputime: Make arch_cpu_idle_time() to return nsecs ia64, sched/cputime: Remove unused cputime definitions ia64: Convert vtime to use nsec units directly ia64, sched/cputime: Move the nsecs based cputime headers to the last arch using it sched/cputime: Remove jiffies based cputime sched/cputime, vtime: Return nsecs instead of cputime_t to account sched/cputime: Complete nsec conversion of tick based accounting ...
Diffstat (limited to 'kernel/time/itimer.c')
-rw-r--r--kernel/time/itimer.c60
1 files changed, 20 insertions, 40 deletions
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index 8c89143f9ebf..a95f13c31464 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -45,16 +45,16 @@ static struct timeval itimer_get_remtime(struct hrtimer *timer)
45static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, 45static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
46 struct itimerval *const value) 46 struct itimerval *const value)
47{ 47{
48 cputime_t cval, cinterval; 48 u64 val, interval;
49 struct cpu_itimer *it = &tsk->signal->it[clock_id]; 49 struct cpu_itimer *it = &tsk->signal->it[clock_id];
50 50
51 spin_lock_irq(&tsk->sighand->siglock); 51 spin_lock_irq(&tsk->sighand->siglock);
52 52
53 cval = it->expires; 53 val = it->expires;
54 cinterval = it->incr; 54 interval = it->incr;
55 if (cval) { 55 if (val) {
56 struct task_cputime cputime; 56 struct task_cputime cputime;
57 cputime_t t; 57 u64 t;
58 58
59 thread_group_cputimer(tsk, &cputime); 59 thread_group_cputimer(tsk, &cputime);
60 if (clock_id == CPUCLOCK_PROF) 60 if (clock_id == CPUCLOCK_PROF)
@@ -63,17 +63,17 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
63 /* CPUCLOCK_VIRT */ 63 /* CPUCLOCK_VIRT */
64 t = cputime.utime; 64 t = cputime.utime;
65 65
66 if (cval < t) 66 if (val < t)
67 /* about to fire */ 67 /* about to fire */
68 cval = cputime_one_jiffy; 68 val = TICK_NSEC;
69 else 69 else
70 cval = cval - t; 70 val -= t;
71 } 71 }
72 72
73 spin_unlock_irq(&tsk->sighand->siglock); 73 spin_unlock_irq(&tsk->sighand->siglock);
74 74
75 cputime_to_timeval(cval, &value->it_value); 75 value->it_value = ns_to_timeval(val);
76 cputime_to_timeval(cinterval, &value->it_interval); 76 value->it_interval = ns_to_timeval(interval);
77} 77}
78 78
79int do_getitimer(int which, struct itimerval *value) 79int do_getitimer(int which, struct itimerval *value)
@@ -129,55 +129,35 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
129 return HRTIMER_NORESTART; 129 return HRTIMER_NORESTART;
130} 130}
131 131
132static inline u32 cputime_sub_ns(cputime_t ct, s64 real_ns)
133{
134 struct timespec ts;
135 s64 cpu_ns;
136
137 cputime_to_timespec(ct, &ts);
138 cpu_ns = timespec_to_ns(&ts);
139
140 return (cpu_ns <= real_ns) ? 0 : cpu_ns - real_ns;
141}
142
143static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id, 132static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
144 const struct itimerval *const value, 133 const struct itimerval *const value,
145 struct itimerval *const ovalue) 134 struct itimerval *const ovalue)
146{ 135{
147 cputime_t cval, nval, cinterval, ninterval; 136 u64 oval, nval, ointerval, ninterval;
148 s64 ns_ninterval, ns_nval;
149 u32 error, incr_error;
150 struct cpu_itimer *it = &tsk->signal->it[clock_id]; 137 struct cpu_itimer *it = &tsk->signal->it[clock_id];
151 138
152 nval = timeval_to_cputime(&value->it_value); 139 nval = timeval_to_ns(&value->it_value);
153 ns_nval = timeval_to_ns(&value->it_value); 140 ninterval = timeval_to_ns(&value->it_interval);
154 ninterval = timeval_to_cputime(&value->it_interval);
155 ns_ninterval = timeval_to_ns(&value->it_interval);
156
157 error = cputime_sub_ns(nval, ns_nval);
158 incr_error = cputime_sub_ns(ninterval, ns_ninterval);
159 141
160 spin_lock_irq(&tsk->sighand->siglock); 142 spin_lock_irq(&tsk->sighand->siglock);
161 143
162 cval = it->expires; 144 oval = it->expires;
163 cinterval = it->incr; 145 ointerval = it->incr;
164 if (cval || nval) { 146 if (oval || nval) {
165 if (nval > 0) 147 if (nval > 0)
166 nval += cputime_one_jiffy; 148 nval += TICK_NSEC;
167 set_process_cpu_timer(tsk, clock_id, &nval, &cval); 149 set_process_cpu_timer(tsk, clock_id, &nval, &oval);
168 } 150 }
169 it->expires = nval; 151 it->expires = nval;
170 it->incr = ninterval; 152 it->incr = ninterval;
171 it->error = error;
172 it->incr_error = incr_error;
173 trace_itimer_state(clock_id == CPUCLOCK_VIRT ? 153 trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
174 ITIMER_VIRTUAL : ITIMER_PROF, value, nval); 154 ITIMER_VIRTUAL : ITIMER_PROF, value, nval);
175 155
176 spin_unlock_irq(&tsk->sighand->siglock); 156 spin_unlock_irq(&tsk->sighand->siglock);
177 157
178 if (ovalue) { 158 if (ovalue) {
179 cputime_to_timeval(cval, &ovalue->it_value); 159 ovalue->it_value = ns_to_timeval(oval);
180 cputime_to_timeval(cinterval, &ovalue->it_interval); 160 ovalue->it_interval = ns_to_timeval(ointerval);
181 } 161 }
182} 162}
183 163