diff options
author | Ingo Molnar <mingo@kernel.org> | 2017-02-01 06:29:21 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-02-07 04:58:01 -0500 |
commit | 535b9552bb81eebe112ee7bd34ee498857b0c26b (patch) | |
tree | 024786189a8aaa410c15196355dab2c5c24d2d3b | |
parent | 4025819d328cd0efc53ee22e01b800631944b7f3 (diff) |
sched/rq_clock: Consolidate the ordering of the rq_clock methods
update_rq_clock_task() and update_rq_clock() we unnecessarily
spread across core.c, requiring an extra prototype line.
Move them next to each other and in the proper order.
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/sched/core.c | 153 |
1 files changed, 78 insertions, 75 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 87cf7ba82bd5..a400190792b9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -73,27 +73,6 @@ | |||
73 | DEFINE_MUTEX(sched_domains_mutex); | 73 | DEFINE_MUTEX(sched_domains_mutex); |
74 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 74 | DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
75 | 75 | ||
76 | static void update_rq_clock_task(struct rq *rq, s64 delta); | ||
77 | |||
78 | void update_rq_clock(struct rq *rq) | ||
79 | { | ||
80 | s64 delta; | ||
81 | |||
82 | lockdep_assert_held(&rq->lock); | ||
83 | |||
84 | if (rq->clock_update_flags & RQCF_ACT_SKIP) | ||
85 | return; | ||
86 | |||
87 | #ifdef CONFIG_SCHED_DEBUG | ||
88 | rq->clock_update_flags |= RQCF_UPDATED; | ||
89 | #endif | ||
90 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; | ||
91 | if (delta < 0) | ||
92 | return; | ||
93 | rq->clock += delta; | ||
94 | update_rq_clock_task(rq, delta); | ||
95 | } | ||
96 | |||
97 | /* | 76 | /* |
98 | * Debugging: various feature bits | 77 | * Debugging: various feature bits |
99 | */ | 78 | */ |
@@ -218,6 +197,84 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) | |||
218 | } | 197 | } |
219 | } | 198 | } |
220 | 199 | ||
200 | /* | ||
201 | * RQ-clock updating methods: | ||
202 | */ | ||
203 | |||
204 | static void update_rq_clock_task(struct rq *rq, s64 delta) | ||
205 | { | ||
206 | /* | ||
207 | * In theory, the compile should just see 0 here, and optimize out the call | ||
208 | * to sched_rt_avg_update. But I don't trust it... | ||
209 | */ | ||
210 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) | ||
211 | s64 steal = 0, irq_delta = 0; | ||
212 | #endif | ||
213 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
214 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; | ||
215 | |||
216 | /* | ||
217 | * Since irq_time is only updated on {soft,}irq_exit, we might run into | ||
218 | * this case when a previous update_rq_clock() happened inside a | ||
219 | * {soft,}irq region. | ||
220 | * | ||
221 | * When this happens, we stop ->clock_task and only update the | ||
222 | * prev_irq_time stamp to account for the part that fit, so that a next | ||
223 | * update will consume the rest. This ensures ->clock_task is | ||
224 | * monotonic. | ||
225 | * | ||
226 | * It does however cause some slight miss-attribution of {soft,}irq | ||
227 | * time, a more accurate solution would be to update the irq_time using | ||
228 | * the current rq->clock timestamp, except that would require using | ||
229 | * atomic ops. | ||
230 | */ | ||
231 | if (irq_delta > delta) | ||
232 | irq_delta = delta; | ||
233 | |||
234 | rq->prev_irq_time += irq_delta; | ||
235 | delta -= irq_delta; | ||
236 | #endif | ||
237 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | ||
238 | if (static_key_false((¶virt_steal_rq_enabled))) { | ||
239 | steal = paravirt_steal_clock(cpu_of(rq)); | ||
240 | steal -= rq->prev_steal_time_rq; | ||
241 | |||
242 | if (unlikely(steal > delta)) | ||
243 | steal = delta; | ||
244 | |||
245 | rq->prev_steal_time_rq += steal; | ||
246 | delta -= steal; | ||
247 | } | ||
248 | #endif | ||
249 | |||
250 | rq->clock_task += delta; | ||
251 | |||
252 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) | ||
253 | if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) | ||
254 | sched_rt_avg_update(rq, irq_delta + steal); | ||
255 | #endif | ||
256 | } | ||
257 | |||
258 | void update_rq_clock(struct rq *rq) | ||
259 | { | ||
260 | s64 delta; | ||
261 | |||
262 | lockdep_assert_held(&rq->lock); | ||
263 | |||
264 | if (rq->clock_update_flags & RQCF_ACT_SKIP) | ||
265 | return; | ||
266 | |||
267 | #ifdef CONFIG_SCHED_DEBUG | ||
268 | rq->clock_update_flags |= RQCF_UPDATED; | ||
269 | #endif | ||
270 | delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; | ||
271 | if (delta < 0) | ||
272 | return; | ||
273 | rq->clock += delta; | ||
274 | update_rq_clock_task(rq, delta); | ||
275 | } | ||
276 | |||
277 | |||
221 | #ifdef CONFIG_SCHED_HRTICK | 278 | #ifdef CONFIG_SCHED_HRTICK |
222 | /* | 279 | /* |
223 | * Use HR-timers to deliver accurate preemption points. | 280 | * Use HR-timers to deliver accurate preemption points. |
@@ -767,60 +824,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) | |||
767 | dequeue_task(rq, p, flags); | 824 | dequeue_task(rq, p, flags); |
768 | } | 825 | } |
769 | 826 | ||
770 | static void update_rq_clock_task(struct rq *rq, s64 delta) | ||
771 | { | ||
772 | /* | ||
773 | * In theory, the compile should just see 0 here, and optimize out the call | ||
774 | * to sched_rt_avg_update. But I don't trust it... | ||
775 | */ | ||
776 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) | ||
777 | s64 steal = 0, irq_delta = 0; | ||
778 | #endif | ||
779 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
780 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; | ||
781 | |||
782 | /* | ||
783 | * Since irq_time is only updated on {soft,}irq_exit, we might run into | ||
784 | * this case when a previous update_rq_clock() happened inside a | ||
785 | * {soft,}irq region. | ||
786 | * | ||
787 | * When this happens, we stop ->clock_task and only update the | ||
788 | * prev_irq_time stamp to account for the part that fit, so that a next | ||
789 | * update will consume the rest. This ensures ->clock_task is | ||
790 | * monotonic. | ||
791 | * | ||
792 | * It does however cause some slight miss-attribution of {soft,}irq | ||
793 | * time, a more accurate solution would be to update the irq_time using | ||
794 | * the current rq->clock timestamp, except that would require using | ||
795 | * atomic ops. | ||
796 | */ | ||
797 | if (irq_delta > delta) | ||
798 | irq_delta = delta; | ||
799 | |||
800 | rq->prev_irq_time += irq_delta; | ||
801 | delta -= irq_delta; | ||
802 | #endif | ||
803 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | ||
804 | if (static_key_false((¶virt_steal_rq_enabled))) { | ||
805 | steal = paravirt_steal_clock(cpu_of(rq)); | ||
806 | steal -= rq->prev_steal_time_rq; | ||
807 | |||
808 | if (unlikely(steal > delta)) | ||
809 | steal = delta; | ||
810 | |||
811 | rq->prev_steal_time_rq += steal; | ||
812 | delta -= steal; | ||
813 | } | ||
814 | #endif | ||
815 | |||
816 | rq->clock_task += delta; | ||
817 | |||
818 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) | ||
819 | if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) | ||
820 | sched_rt_avg_update(rq, irq_delta + steal); | ||
821 | #endif | ||
822 | } | ||
823 | |||
824 | void sched_set_stop_task(int cpu, struct task_struct *stop) | 827 | void sched_set_stop_task(int cpu, struct task_struct *stop) |
825 | { | 828 | { |
826 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | 829 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |