summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2017-09-08 19:14:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-08 21:26:49 -0400
commit2161573ecd6931565936cb66793b2d2bf805c088 (patch)
treed20a0edb400805effb57590e6faa20e8630701d6 /kernel
parentbfb068892d30dcf0a32b89302fe293347adeaaaa (diff)
sched/deadline: replace earliest dl and rq leftmost caching
... with the generic rbtree flavor instead. No changes in semantics whatsoever. Link: http://lkml.kernel.org/r/20170719014603.19029-9-dave@stgolabs.net Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/deadline.c50
-rw-r--r--kernel/sched/sched.h6
2 files changed, 21 insertions, 35 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 9e38df7649f4..0191ec7667c3 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -296,7 +296,7 @@ static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
296{ 296{
297 struct sched_dl_entity *dl_se = &p->dl; 297 struct sched_dl_entity *dl_se = &p->dl;
298 298
299 return dl_rq->rb_leftmost == &dl_se->rb_node; 299 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
300} 300}
301 301
302void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime) 302void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
@@ -320,7 +320,7 @@ void init_dl_bw(struct dl_bw *dl_b)
320 320
321void init_dl_rq(struct dl_rq *dl_rq) 321void init_dl_rq(struct dl_rq *dl_rq)
322{ 322{
323 dl_rq->rb_root = RB_ROOT; 323 dl_rq->root = RB_ROOT_CACHED;
324 324
325#ifdef CONFIG_SMP 325#ifdef CONFIG_SMP
326 /* zero means no -deadline tasks */ 326 /* zero means no -deadline tasks */
@@ -328,7 +328,7 @@ void init_dl_rq(struct dl_rq *dl_rq)
328 328
329 dl_rq->dl_nr_migratory = 0; 329 dl_rq->dl_nr_migratory = 0;
330 dl_rq->overloaded = 0; 330 dl_rq->overloaded = 0;
331 dl_rq->pushable_dl_tasks_root = RB_ROOT; 331 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
332#else 332#else
333 init_dl_bw(&dl_rq->dl_bw); 333 init_dl_bw(&dl_rq->dl_bw);
334#endif 334#endif
@@ -410,10 +410,10 @@ static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
410static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p) 410static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
411{ 411{
412 struct dl_rq *dl_rq = &rq->dl; 412 struct dl_rq *dl_rq = &rq->dl;
413 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node; 413 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
414 struct rb_node *parent = NULL; 414 struct rb_node *parent = NULL;
415 struct task_struct *entry; 415 struct task_struct *entry;
416 int leftmost = 1; 416 bool leftmost = true;
417 417
418 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks)); 418 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
419 419
@@ -425,17 +425,16 @@ static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
425 link = &parent->rb_left; 425 link = &parent->rb_left;
426 else { 426 else {
427 link = &parent->rb_right; 427 link = &parent->rb_right;
428 leftmost = 0; 428 leftmost = false;
429 } 429 }
430 } 430 }
431 431
432 if (leftmost) { 432 if (leftmost)
433 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
434 dl_rq->earliest_dl.next = p->dl.deadline; 433 dl_rq->earliest_dl.next = p->dl.deadline;
435 }
436 434
437 rb_link_node(&p->pushable_dl_tasks, parent, link); 435 rb_link_node(&p->pushable_dl_tasks, parent, link);
438 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 436 rb_insert_color_cached(&p->pushable_dl_tasks,
437 &dl_rq->pushable_dl_tasks_root, leftmost);
439} 438}
440 439
441static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p) 440static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
@@ -445,24 +444,23 @@ static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
445 if (RB_EMPTY_NODE(&p->pushable_dl_tasks)) 444 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
446 return; 445 return;
447 446
448 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) { 447 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
449 struct rb_node *next_node; 448 struct rb_node *next_node;
450 449
451 next_node = rb_next(&p->pushable_dl_tasks); 450 next_node = rb_next(&p->pushable_dl_tasks);
452 dl_rq->pushable_dl_tasks_leftmost = next_node;
453 if (next_node) { 451 if (next_node) {
454 dl_rq->earliest_dl.next = rb_entry(next_node, 452 dl_rq->earliest_dl.next = rb_entry(next_node,
455 struct task_struct, pushable_dl_tasks)->dl.deadline; 453 struct task_struct, pushable_dl_tasks)->dl.deadline;
456 } 454 }
457 } 455 }
458 456
459 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root); 457 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
460 RB_CLEAR_NODE(&p->pushable_dl_tasks); 458 RB_CLEAR_NODE(&p->pushable_dl_tasks);
461} 459}
462 460
463static inline int has_pushable_dl_tasks(struct rq *rq) 461static inline int has_pushable_dl_tasks(struct rq *rq)
464{ 462{
465 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root); 463 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
466} 464}
467 465
468static int push_dl_task(struct rq *rq); 466static int push_dl_task(struct rq *rq);
@@ -1266,7 +1264,7 @@ static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1266 dl_rq->earliest_dl.next = 0; 1264 dl_rq->earliest_dl.next = 0;
1267 cpudl_clear(&rq->rd->cpudl, rq->cpu); 1265 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1268 } else { 1266 } else {
1269 struct rb_node *leftmost = dl_rq->rb_leftmost; 1267 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1270 struct sched_dl_entity *entry; 1268 struct sched_dl_entity *entry;
1271 1269
1272 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node); 1270 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
@@ -1313,7 +1311,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1313static void __enqueue_dl_entity(struct sched_dl_entity *dl_se) 1311static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1314{ 1312{
1315 struct dl_rq *dl_rq = dl_rq_of_se(dl_se); 1313 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1316 struct rb_node **link = &dl_rq->rb_root.rb_node; 1314 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
1317 struct rb_node *parent = NULL; 1315 struct rb_node *parent = NULL;
1318 struct sched_dl_entity *entry; 1316 struct sched_dl_entity *entry;
1319 int leftmost = 1; 1317 int leftmost = 1;
@@ -1331,11 +1329,8 @@ static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1331 } 1329 }
1332 } 1330 }
1333 1331
1334 if (leftmost)
1335 dl_rq->rb_leftmost = &dl_se->rb_node;
1336
1337 rb_link_node(&dl_se->rb_node, parent, link); 1332 rb_link_node(&dl_se->rb_node, parent, link);
1338 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root); 1333 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
1339 1334
1340 inc_dl_tasks(dl_se, dl_rq); 1335 inc_dl_tasks(dl_se, dl_rq);
1341} 1336}
@@ -1347,14 +1342,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1347 if (RB_EMPTY_NODE(&dl_se->rb_node)) 1342 if (RB_EMPTY_NODE(&dl_se->rb_node))
1348 return; 1343 return;
1349 1344
1350 if (dl_rq->rb_leftmost == &dl_se->rb_node) { 1345 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
1351 struct rb_node *next_node;
1352
1353 next_node = rb_next(&dl_se->rb_node);
1354 dl_rq->rb_leftmost = next_node;
1355 }
1356
1357 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
1358 RB_CLEAR_NODE(&dl_se->rb_node); 1346 RB_CLEAR_NODE(&dl_se->rb_node);
1359 1347
1360 dec_dl_tasks(dl_se, dl_rq); 1348 dec_dl_tasks(dl_se, dl_rq);
@@ -1647,7 +1635,7 @@ static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1647static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq, 1635static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1648 struct dl_rq *dl_rq) 1636 struct dl_rq *dl_rq)
1649{ 1637{
1650 struct rb_node *left = dl_rq->rb_leftmost; 1638 struct rb_node *left = rb_first_cached(&dl_rq->root);
1651 1639
1652 if (!left) 1640 if (!left)
1653 return NULL; 1641 return NULL;
@@ -1771,7 +1759,7 @@ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1771 */ 1759 */
1772static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu) 1760static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1773{ 1761{
1774 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost; 1762 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
1775 struct task_struct *p = NULL; 1763 struct task_struct *p = NULL;
1776 1764
1777 if (!has_pushable_dl_tasks(rq)) 1765 if (!has_pushable_dl_tasks(rq))
@@ -1945,7 +1933,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1945 if (!has_pushable_dl_tasks(rq)) 1933 if (!has_pushable_dl_tasks(rq))
1946 return NULL; 1934 return NULL;
1947 1935
1948 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost, 1936 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
1949 struct task_struct, pushable_dl_tasks); 1937 struct task_struct, pushable_dl_tasks);
1950 1938
1951 BUG_ON(rq->cpu != task_cpu(p)); 1939 BUG_ON(rq->cpu != task_cpu(p));
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c30c57563dbc..746ac78ff492 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -549,8 +549,7 @@ struct rt_rq {
549/* Deadline class' related fields in a runqueue */ 549/* Deadline class' related fields in a runqueue */
550struct dl_rq { 550struct dl_rq {
551 /* runqueue is an rbtree, ordered by deadline */ 551 /* runqueue is an rbtree, ordered by deadline */
552 struct rb_root rb_root; 552 struct rb_root_cached root;
553 struct rb_node *rb_leftmost;
554 553
555 unsigned long dl_nr_running; 554 unsigned long dl_nr_running;
556 555
@@ -574,8 +573,7 @@ struct dl_rq {
574 * an rb-tree, ordered by tasks' deadlines, with caching 573 * an rb-tree, ordered by tasks' deadlines, with caching
575 * of the leftmost (earliest deadline) element. 574 * of the leftmost (earliest deadline) element.
576 */ 575 */
577 struct rb_root pushable_dl_tasks_root; 576 struct rb_root_cached pushable_dl_tasks_root;
578 struct rb_node *pushable_dl_tasks_leftmost;
579#else 577#else
580 struct dl_bw dl_bw; 578 struct dl_bw dl_bw;
581#endif 579#endif