aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-01-30 08:41:04 -0500
committerIngo Molnar <mingo@kernel.org>2019-02-04 03:13:21 -0500
commit5d299eabea5a251fbf66e8277704b874bbba92dc (patch)
treedde557b31905049b265f4912deb0784ad691680b /kernel/sched
parentc546951d9c9300065bad253ecdf1ac59ce9d06c8 (diff)
sched/fair: Add tmp_alone_branch assertion
The magic in list_add_leaf_cfs_rq() requires that at the end of enqueue_task_fair(): rq->tmp_alone_branch == &rq->lead_cfs_rq_list If this is violated, list integrity is compromised for list entries and the tmp_alone_branch pointer might dangle. Also, reflow list_add_leaf_cfs_rq() while there. This looses one indentation level and generates a form that's convenient for the next patch. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c126
1 files changed, 71 insertions, 55 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8c165f0d33b3..d6a536dec0ca 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -277,64 +277,69 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
277 277
278static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq) 278static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
279{ 279{
280 if (!cfs_rq->on_list) { 280 struct rq *rq = rq_of(cfs_rq);
281 struct rq *rq = rq_of(cfs_rq); 281 int cpu = cpu_of(rq);
282 int cpu = cpu_of(rq); 282
283 if (cfs_rq->on_list)
284 return;
285
286 cfs_rq->on_list = 1;
287
288 /*
289 * Ensure we either appear before our parent (if already
290 * enqueued) or force our parent to appear after us when it is
291 * enqueued. The fact that we always enqueue bottom-up
292 * reduces this to two cases and a special case for the root
293 * cfs_rq. Furthermore, it also means that we will always reset
294 * tmp_alone_branch either when the branch is connected
295 * to a tree or when we reach the top of the tree
296 */
297 if (cfs_rq->tg->parent &&
298 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) {
283 /* 299 /*
284 * Ensure we either appear before our parent (if already 300 * If parent is already on the list, we add the child
285 * enqueued) or force our parent to appear after us when it is 301 * just before. Thanks to circular linked property of
286 * enqueued. The fact that we always enqueue bottom-up 302 * the list, this means to put the child at the tail
287 * reduces this to two cases and a special case for the root 303 * of the list that starts by parent.
288 * cfs_rq. Furthermore, it also means that we will always reset
289 * tmp_alone_branch either when the branch is connected
290 * to a tree or when we reach the beg of the tree
291 */ 304 */
292 if (cfs_rq->tg->parent && 305 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
293 cfs_rq->tg->parent->cfs_rq[cpu]->on_list) { 306 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list));
294 /* 307 /*
295 * If parent is already on the list, we add the child 308 * The branch is now connected to its tree so we can
296 * just before. Thanks to circular linked property of 309 * reset tmp_alone_branch to the beginning of the
297 * the list, this means to put the child at the tail 310 * list.
298 * of the list that starts by parent. 311 */
299 */ 312 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
300 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list, 313 return;
301 &(cfs_rq->tg->parent->cfs_rq[cpu]->leaf_cfs_rq_list)); 314 }
302 /*
303 * The branch is now connected to its tree so we can
304 * reset tmp_alone_branch to the beginning of the
305 * list.
306 */
307 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
308 } else if (!cfs_rq->tg->parent) {
309 /*
310 * cfs rq without parent should be put
311 * at the tail of the list.
312 */
313 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
314 &rq->leaf_cfs_rq_list);
315 /*
316 * We have reach the beg of a tree so we can reset
317 * tmp_alone_branch to the beginning of the list.
318 */
319 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
320 } else {
321 /*
322 * The parent has not already been added so we want to
323 * make sure that it will be put after us.
324 * tmp_alone_branch points to the beg of the branch
325 * where we will add parent.
326 */
327 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
328 rq->tmp_alone_branch);
329 /*
330 * update tmp_alone_branch to points to the new beg
331 * of the branch
332 */
333 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
334 }
335 315
336 cfs_rq->on_list = 1; 316 if (!cfs_rq->tg->parent) {
317 /*
318 * cfs rq without parent should be put
319 * at the tail of the list.
320 */
321 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
322 &rq->leaf_cfs_rq_list);
323 /*
324 * We have reach the top of a tree so we can reset
325 * tmp_alone_branch to the beginning of the list.
326 */
327 rq->tmp_alone_branch = &rq->leaf_cfs_rq_list;
328 return;
337 } 329 }
330
331 /*
332 * The parent has not already been added so we want to
333 * make sure that it will be put after us.
334 * tmp_alone_branch points to the begin of the branch
335 * where we will add parent.
336 */
337 list_add_rcu(&cfs_rq->leaf_cfs_rq_list, rq->tmp_alone_branch);
338 /*
339 * update tmp_alone_branch to points to the new begin
340 * of the branch
341 */
342 rq->tmp_alone_branch = &cfs_rq->leaf_cfs_rq_list;
338} 343}
339 344
340static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq) 345static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
@@ -345,7 +350,12 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
345 } 350 }
346} 351}
347 352
348/* Iterate through all leaf cfs_rq's on a runqueue: */ 353static inline void assert_list_leaf_cfs_rq(struct rq *rq)
354{
355 SCHED_WARN_ON(rq->tmp_alone_branch != &rq->leaf_cfs_rq_list);
356}
357
358/* Iterate through all cfs_rq's on a runqueue in bottom-up order */
349#define for_each_leaf_cfs_rq(rq, cfs_rq) \ 359#define for_each_leaf_cfs_rq(rq, cfs_rq) \
350 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 360 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
351 361
@@ -433,6 +443,10 @@ static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
433{ 443{
434} 444}
435 445
446static inline void assert_list_leaf_cfs_rq(struct rq *rq)
447{
448}
449
436#define for_each_leaf_cfs_rq(rq, cfs_rq) \ 450#define for_each_leaf_cfs_rq(rq, cfs_rq) \
437 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) 451 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
438 452
@@ -5172,6 +5186,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
5172 5186
5173 } 5187 }
5174 5188
5189 assert_list_leaf_cfs_rq(rq);
5190
5175 hrtick_update(rq); 5191 hrtick_update(rq);
5176} 5192}
5177 5193