aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c146
-rw-r--r--kernel/sched_debug.c1
-rw-r--r--kernel/sched_fair.c59
-rw-r--r--kernel/sched_features.h3
-rw-r--r--kernel/sched_rt.c537
5 files changed, 553 insertions, 193 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 8ee437a5ec1d..fcc3483e9955 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -467,11 +467,17 @@ struct rt_rq {
467 struct rt_prio_array active; 467 struct rt_prio_array active;
468 unsigned long rt_nr_running; 468 unsigned long rt_nr_running;
469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 int highest_prio; /* highest queued rt task prio */ 470 struct {
471 int curr; /* highest queued rt task prio */
472#ifdef CONFIG_SMP
473 int next; /* next highest */
474#endif
475 } highest_prio;
471#endif 476#endif
472#ifdef CONFIG_SMP 477#ifdef CONFIG_SMP
473 unsigned long rt_nr_migratory; 478 unsigned long rt_nr_migratory;
474 int overloaded; 479 int overloaded;
480 struct plist_head pushable_tasks;
475#endif 481#endif
476 int rt_throttled; 482 int rt_throttled;
477 u64 rt_time; 483 u64 rt_time;
@@ -1610,21 +1616,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1610 1616
1611#endif 1617#endif
1612 1618
1619#ifdef CONFIG_PREEMPT
1620
1613/* 1621/*
1614 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1622 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1623 * way at the expense of forcing extra atomic operations in all
1624 * invocations. This assures that the double_lock is acquired using the
1625 * same underlying policy as the spinlock_t on this architecture, which
1626 * reduces latency compared to the unfair variant below. However, it
1627 * also adds more overhead and therefore may reduce throughput.
1615 */ 1628 */
1616static int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1629static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1630 __releases(this_rq->lock)
1631 __acquires(busiest->lock)
1632 __acquires(this_rq->lock)
1633{
1634 spin_unlock(&this_rq->lock);
1635 double_rq_lock(this_rq, busiest);
1636
1637 return 1;
1638}
1639
1640#else
1641/*
1642 * Unfair double_lock_balance: Optimizes throughput at the expense of
1643 * latency by eliminating extra atomic operations when the locks are
1644 * already in proper order on entry. This favors lower cpu-ids and will
1645 * grant the double lock to lower cpus over higher ids under contention,
1646 * regardless of entry order into the function.
1647 */
1648static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1617 __releases(this_rq->lock) 1649 __releases(this_rq->lock)
1618 __acquires(busiest->lock) 1650 __acquires(busiest->lock)
1619 __acquires(this_rq->lock) 1651 __acquires(this_rq->lock)
1620{ 1652{
1621 int ret = 0; 1653 int ret = 0;
1622 1654
1623 if (unlikely(!irqs_disabled())) {
1624 /* printk() doesn't work good under rq->lock */
1625 spin_unlock(&this_rq->lock);
1626 BUG_ON(1);
1627 }
1628 if (unlikely(!spin_trylock(&busiest->lock))) { 1655 if (unlikely(!spin_trylock(&busiest->lock))) {
1629 if (busiest < this_rq) { 1656 if (busiest < this_rq) {
1630 spin_unlock(&this_rq->lock); 1657 spin_unlock(&this_rq->lock);
@@ -1637,6 +1664,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1637 return ret; 1664 return ret;
1638} 1665}
1639 1666
1667#endif /* CONFIG_PREEMPT */
1668
1669/*
1670 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1671 */
1672static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1673{
1674 if (unlikely(!irqs_disabled())) {
1675 /* printk() doesn't work good under rq->lock */
1676 spin_unlock(&this_rq->lock);
1677 BUG_ON(1);
1678 }
1679
1680 return _double_lock_balance(this_rq, busiest);
1681}
1682
1640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1683static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock) 1684 __releases(busiest->lock)
1642{ 1685{
@@ -1705,6 +1748,9 @@ static void update_avg(u64 *avg, u64 sample)
1705 1748
1706static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1749static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1707{ 1750{
1751 if (wakeup)
1752 p->se.start_runtime = p->se.sum_exec_runtime;
1753
1708 sched_info_queued(p); 1754 sched_info_queued(p);
1709 p->sched_class->enqueue_task(rq, p, wakeup); 1755 p->sched_class->enqueue_task(rq, p, wakeup);
1710 p->se.on_rq = 1; 1756 p->se.on_rq = 1;
@@ -1712,10 +1758,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1712 1758
1713static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1759static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1714{ 1760{
1715 if (sleep && p->se.last_wakeup) { 1761 if (sleep) {
1716 update_avg(&p->se.avg_overlap, 1762 if (p->se.last_wakeup) {
1717 p->se.sum_exec_runtime - p->se.last_wakeup); 1763 update_avg(&p->se.avg_overlap,
1718 p->se.last_wakeup = 0; 1764 p->se.sum_exec_runtime - p->se.last_wakeup);
1765 p->se.last_wakeup = 0;
1766 } else {
1767 update_avg(&p->se.avg_wakeup,
1768 sysctl_sched_wakeup_granularity);
1769 }
1719 } 1770 }
1720 1771
1721 sched_info_dequeued(p); 1772 sched_info_dequeued(p);
@@ -2355,6 +2406,22 @@ out_activate:
2355 activate_task(rq, p, 1); 2406 activate_task(rq, p, 1);
2356 success = 1; 2407 success = 1;
2357 2408
2409 /*
2410 * Only attribute actual wakeups done by this task.
2411 */
2412 if (!in_interrupt()) {
2413 struct sched_entity *se = &current->se;
2414 u64 sample = se->sum_exec_runtime;
2415
2416 if (se->last_wakeup)
2417 sample -= se->last_wakeup;
2418 else
2419 sample -= se->start_runtime;
2420 update_avg(&se->avg_wakeup, sample);
2421
2422 se->last_wakeup = se->sum_exec_runtime;
2423 }
2424
2358out_running: 2425out_running:
2359 trace_sched_wakeup(rq, p, success); 2426 trace_sched_wakeup(rq, p, success);
2360 check_preempt_curr(rq, p, sync); 2427 check_preempt_curr(rq, p, sync);
@@ -2365,8 +2432,6 @@ out_running:
2365 p->sched_class->task_wake_up(rq, p); 2432 p->sched_class->task_wake_up(rq, p);
2366#endif 2433#endif
2367out: 2434out:
2368 current->se.last_wakeup = current->se.sum_exec_runtime;
2369
2370 task_rq_unlock(rq, &flags); 2435 task_rq_unlock(rq, &flags);
2371 2436
2372 return success; 2437 return success;
@@ -2396,6 +2461,8 @@ static void __sched_fork(struct task_struct *p)
2396 p->se.prev_sum_exec_runtime = 0; 2461 p->se.prev_sum_exec_runtime = 0;
2397 p->se.last_wakeup = 0; 2462 p->se.last_wakeup = 0;
2398 p->se.avg_overlap = 0; 2463 p->se.avg_overlap = 0;
2464 p->se.start_runtime = 0;
2465 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2399 2466
2400#ifdef CONFIG_SCHEDSTATS 2467#ifdef CONFIG_SCHEDSTATS
2401 p->se.wait_start = 0; 2468 p->se.wait_start = 0;
@@ -2458,6 +2525,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2458 /* Want to start with kernel preemption disabled. */ 2525 /* Want to start with kernel preemption disabled. */
2459 task_thread_info(p)->preempt_count = 1; 2526 task_thread_info(p)->preempt_count = 1;
2460#endif 2527#endif
2528 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2529
2461 put_cpu(); 2530 put_cpu();
2462} 2531}
2463 2532
@@ -2598,6 +2667,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2598{ 2667{
2599 struct mm_struct *mm = rq->prev_mm; 2668 struct mm_struct *mm = rq->prev_mm;
2600 long prev_state; 2669 long prev_state;
2670#ifdef CONFIG_SMP
2671 int post_schedule = 0;
2672
2673 if (current->sched_class->needs_post_schedule)
2674 post_schedule = current->sched_class->needs_post_schedule(rq);
2675#endif
2601 2676
2602 rq->prev_mm = NULL; 2677 rq->prev_mm = NULL;
2603 2678
@@ -2616,7 +2691,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2616 finish_arch_switch(prev); 2691 finish_arch_switch(prev);
2617 finish_lock_switch(rq, prev); 2692 finish_lock_switch(rq, prev);
2618#ifdef CONFIG_SMP 2693#ifdef CONFIG_SMP
2619 if (current->sched_class->post_schedule) 2694 if (post_schedule)
2620 current->sched_class->post_schedule(rq); 2695 current->sched_class->post_schedule(rq);
2621#endif 2696#endif
2622 2697
@@ -2997,6 +3072,16 @@ next:
2997 pulled++; 3072 pulled++;
2998 rem_load_move -= p->se.load.weight; 3073 rem_load_move -= p->se.load.weight;
2999 3074
3075#ifdef CONFIG_PREEMPT
3076 /*
3077 * NEWIDLE balancing is a source of latency, so preemptible kernels
3078 * will stop after the first task is pulled to minimize the critical
3079 * section.
3080 */
3081 if (idle == CPU_NEWLY_IDLE)
3082 goto out;
3083#endif
3084
3000 /* 3085 /*
3001 * We only want to steal up to the prescribed amount of weighted load. 3086 * We only want to steal up to the prescribed amount of weighted load.
3002 */ 3087 */
@@ -3043,9 +3128,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3043 sd, idle, all_pinned, &this_best_prio); 3128 sd, idle, all_pinned, &this_best_prio);
3044 class = class->next; 3129 class = class->next;
3045 3130
3131#ifdef CONFIG_PREEMPT
3132 /*
3133 * NEWIDLE balancing is a source of latency, so preemptible
3134 * kernels will stop after the first task is pulled to minimize
3135 * the critical section.
3136 */
3046 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 3137 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3047 break; 3138 break;
3048 3139#endif
3049 } while (class && max_load_move > total_load_moved); 3140 } while (class && max_load_move > total_load_moved);
3050 3141
3051 return total_load_moved > 0; 3142 return total_load_moved > 0;
@@ -3890,19 +3981,24 @@ int select_nohz_load_balancer(int stop_tick)
3890 int cpu = smp_processor_id(); 3981 int cpu = smp_processor_id();
3891 3982
3892 if (stop_tick) { 3983 if (stop_tick) {
3893 cpumask_set_cpu(cpu, nohz.cpu_mask);
3894 cpu_rq(cpu)->in_nohz_recently = 1; 3984 cpu_rq(cpu)->in_nohz_recently = 1;
3895 3985
3896 /* 3986 if (!cpu_active(cpu)) {
3897 * If we are going offline and still the leader, give up! 3987 if (atomic_read(&nohz.load_balancer) != cpu)
3898 */ 3988 return 0;
3899 if (!cpu_active(cpu) && 3989
3900 atomic_read(&nohz.load_balancer) == cpu) { 3990 /*
3991 * If we are going offline and still the leader,
3992 * give up!
3993 */
3901 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu) 3994 if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
3902 BUG(); 3995 BUG();
3996
3903 return 0; 3997 return 0;
3904 } 3998 }
3905 3999
4000 cpumask_set_cpu(cpu, nohz.cpu_mask);
4001
3906 /* time for ilb owner also to sleep */ 4002 /* time for ilb owner also to sleep */
3907 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) { 4003 if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
3908 if (atomic_read(&nohz.load_balancer) == cpu) 4004 if (atomic_read(&nohz.load_balancer) == cpu)
@@ -8214,11 +8310,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8214 __set_bit(MAX_RT_PRIO, array->bitmap); 8310 __set_bit(MAX_RT_PRIO, array->bitmap);
8215 8311
8216#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 8312#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
8217 rt_rq->highest_prio = MAX_RT_PRIO; 8313 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8314#ifdef CONFIG_SMP
8315 rt_rq->highest_prio.next = MAX_RT_PRIO;
8316#endif
8218#endif 8317#endif
8219#ifdef CONFIG_SMP 8318#ifdef CONFIG_SMP
8220 rt_rq->rt_nr_migratory = 0; 8319 rt_rq->rt_nr_migratory = 0;
8221 rt_rq->overloaded = 0; 8320 rt_rq->overloaded = 0;
8321 plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
8222#endif 8322#endif
8223 8323
8224 rt_rq->rt_time = 0; 8324 rt_rq->rt_time = 0;
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 16eeba4e4169..2b1260f0e800 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -397,6 +397,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
397 PN(se.vruntime); 397 PN(se.vruntime);
398 PN(se.sum_exec_runtime); 398 PN(se.sum_exec_runtime);
399 PN(se.avg_overlap); 399 PN(se.avg_overlap);
400 PN(se.avg_wakeup);
400 401
401 nr_switches = p->nvcsw + p->nivcsw; 402 nr_switches = p->nvcsw + p->nivcsw;
402 403
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index a7e50ba185ac..bc1563e7a248 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1309,16 +1309,63 @@ out:
1309} 1309}
1310#endif /* CONFIG_SMP */ 1310#endif /* CONFIG_SMP */
1311 1311
1312static unsigned long wakeup_gran(struct sched_entity *se) 1312/*
1313 * Adaptive granularity
1314 *
1315 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1316 * with the limit of wakeup_gran -- when it never does a wakeup.
1317 *
1318 * So the smaller avg_wakeup is the faster we want this task to preempt,
1319 * but we don't want to treat the preemptee unfairly and therefore allow it
1320 * to run for at least the amount of time we'd like to run.
1321 *
1322 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1323 *
1324 * NOTE: we use *nr_running to scale with load, this nicely matches the
1325 * degrading latency on load.
1326 */
1327static unsigned long
1328adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1329{
1330 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1331 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1332 u64 gran = 0;
1333
1334 if (this_run < expected_wakeup)
1335 gran = expected_wakeup - this_run;
1336
1337 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1338}
1339
1340static unsigned long
1341wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1313{ 1342{
1314 unsigned long gran = sysctl_sched_wakeup_granularity; 1343 unsigned long gran = sysctl_sched_wakeup_granularity;
1315 1344
1345 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1346 gran = adaptive_gran(curr, se);
1347
1316 /* 1348 /*
1317 * More easily preempt - nice tasks, while not making it harder for 1349 * Since its curr running now, convert the gran from real-time
1318 * + nice tasks. 1350 * to virtual-time in his units.
1319 */ 1351 */
1320 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) 1352 if (sched_feat(ASYM_GRAN)) {
1321 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); 1353 /*
1354 * By using 'se' instead of 'curr' we penalize light tasks, so
1355 * they get preempted easier. That is, if 'se' < 'curr' then
1356 * the resulting gran will be larger, therefore penalizing the
1357 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1358 * be smaller, again penalizing the lighter task.
1359 *
1360 * This is especially important for buddies when the leftmost
1361 * task is higher priority than the buddy.
1362 */
1363 if (unlikely(se->load.weight != NICE_0_LOAD))
1364 gran = calc_delta_fair(gran, se);
1365 } else {
1366 if (unlikely(curr->load.weight != NICE_0_LOAD))
1367 gran = calc_delta_fair(gran, curr);
1368 }
1322 1369
1323 return gran; 1370 return gran;
1324} 1371}
@@ -1345,7 +1392,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1345 if (vdiff <= 0) 1392 if (vdiff <= 0)
1346 return -1; 1393 return -1;
1347 1394
1348 gran = wakeup_gran(curr); 1395 gran = wakeup_gran(curr, se);
1349 if (vdiff > gran) 1396 if (vdiff > gran)
1350 return 1; 1397 return 1;
1351 1398
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index da5d93b5d2c6..76f61756e677 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,5 +1,6 @@
1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) 1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
2SCHED_FEAT(NORMALIZED_SLEEPER, 1) 2SCHED_FEAT(NORMALIZED_SLEEPER, 0)
3SCHED_FEAT(ADAPTIVE_GRAN, 1)
3SCHED_FEAT(WAKEUP_PREEMPT, 1) 4SCHED_FEAT(WAKEUP_PREEMPT, 1)
4SCHED_FEAT(START_DEBIT, 1) 5SCHED_FEAT(START_DEBIT, 1)
5SCHED_FEAT(AFFINE_WAKEUPS, 1) 6SCHED_FEAT(AFFINE_WAKEUPS, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index bac1061cea2f..c79dc7844012 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,40 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{
8 return container_of(rt_se, struct task_struct, rt);
9}
10
11#ifdef CONFIG_RT_GROUP_SCHED
12
13static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
14{
15 return rt_rq->rq;
16}
17
18static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
19{
20 return rt_se->rt_rq;
21}
22
23#else /* CONFIG_RT_GROUP_SCHED */
24
25static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
26{
27 return container_of(rt_rq, struct rq, rt);
28}
29
30static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
31{
32 struct task_struct *p = rt_task_of(rt_se);
33 struct rq *rq = task_rq(p);
34
35 return &rq->rt;
36}
37
38#endif /* CONFIG_RT_GROUP_SCHED */
39
6#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
7 41
8static inline int rt_overloaded(struct rq *rq) 42static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 71 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 72}
39 73
40static void update_rt_migration(struct rq *rq) 74static void update_rt_migration(struct rt_rq *rt_rq)
41{ 75{
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { 76 if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) { 77 if (!rt_rq->overloaded) {
44 rt_set_overload(rq); 78 rt_set_overload(rq_of_rt_rq(rt_rq));
45 rq->rt.overloaded = 1; 79 rt_rq->overloaded = 1;
46 } 80 }
47 } else if (rq->rt.overloaded) { 81 } else if (rt_rq->overloaded) {
48 rt_clear_overload(rq); 82 rt_clear_overload(rq_of_rt_rq(rt_rq));
49 rq->rt.overloaded = 0; 83 rt_rq->overloaded = 0;
50 } 84 }
51} 85}
52#endif /* CONFIG_SMP */
53 86
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 87static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
88{
89 if (rt_se->nr_cpus_allowed > 1)
90 rt_rq->rt_nr_migratory++;
91
92 update_rt_migration(rt_rq);
93}
94
95static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
96{
97 if (rt_se->nr_cpus_allowed > 1)
98 rt_rq->rt_nr_migratory--;
99
100 update_rt_migration(rt_rq);
101}
102
103static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
104{
105 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
106 plist_node_init(&p->pushable_tasks, p->prio);
107 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
108}
109
110static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
111{
112 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
113}
114
115#else
116
117static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
55{ 118{
56 return container_of(rt_se, struct task_struct, rt);
57} 119}
58 120
121static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
122{
123}
124
125static inline
126void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
127{
128}
129
130static inline
131void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
132{
133}
134
135#endif /* CONFIG_SMP */
136
59static inline int on_rt_rq(struct sched_rt_entity *rt_se) 137static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{ 138{
61 return !list_empty(&rt_se->run_list); 139 return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
79#define for_each_leaf_rt_rq(rt_rq, rq) \ 157#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) 158 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81 159
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \ 160#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent) 161 for (; rt_se; rt_se = rt_se->parent)
94 162
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 176 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 177 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 178 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 179 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 180 resched_task(curr);
113 } 181 }
114} 182}
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
176#define for_each_leaf_rt_rq(rt_rq, rq) \ 244#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 245 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178 246
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \ 247#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL) 248 for (; rt_se; rt_se = NULL)
194 249
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 528 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 529
475 if (rt_rq) 530 if (rt_rq)
476 return rt_rq->highest_prio; 531 return rt_rq->highest_prio.curr;
477#endif 532#endif
478 533
479 return rt_task_of(rt_se)->prio; 534 return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
547 } 602 }
548} 603}
549 604
550static inline 605#if defined CONFIG_SMP
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 606
607static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
608
609static inline int next_prio(struct rq *rq)
552{ 610{
553 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 611 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
554 rt_rq->rt_nr_running++; 612
555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 613 if (next && rt_prio(next->prio))
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 614 return next->prio;
557#ifdef CONFIG_SMP 615 else
558 struct rq *rq = rq_of_rt_rq(rt_rq); 616 return MAX_RT_PRIO;
559#endif 617}
618
619static void
620inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
621{
622 struct rq *rq = rq_of_rt_rq(rt_rq);
623
624 if (prio < prev_prio) {
625
626 /*
627 * If the new task is higher in priority than anything on the
628 * run-queue, we know that the previous high becomes our
629 * next-highest.
630 */
631 rt_rq->highest_prio.next = prev_prio;
560 632
561 rt_rq->highest_prio = rt_se_prio(rt_se);
562#ifdef CONFIG_SMP
563 if (rq->online) 633 if (rq->online)
564 cpupri_set(&rq->rd->cpupri, rq->cpu, 634 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
565 rt_se_prio(rt_se));
566#endif
567 }
568#endif
569#ifdef CONFIG_SMP
570 if (rt_se->nr_cpus_allowed > 1) {
571 struct rq *rq = rq_of_rt_rq(rt_rq);
572 635
573 rq->rt.rt_nr_migratory++; 636 } else if (prio == rt_rq->highest_prio.curr)
574 } 637 /*
638 * If the next task is equal in priority to the highest on
639 * the run-queue, then we implicitly know that the next highest
640 * task cannot be any lower than current
641 */
642 rt_rq->highest_prio.next = prio;
643 else if (prio < rt_rq->highest_prio.next)
644 /*
645 * Otherwise, we need to recompute next-highest
646 */
647 rt_rq->highest_prio.next = next_prio(rq);
648}
575 649
576 update_rt_migration(rq_of_rt_rq(rt_rq)); 650static void
577#endif 651dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
578#ifdef CONFIG_RT_GROUP_SCHED 652{
579 if (rt_se_boosted(rt_se)) 653 struct rq *rq = rq_of_rt_rq(rt_rq);
580 rt_rq->rt_nr_boosted++;
581 654
582 if (rt_rq->tg) 655 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
583 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 656 rt_rq->highest_prio.next = next_prio(rq);
584#else 657
585 start_rt_bandwidth(&def_rt_bandwidth); 658 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
586#endif 659 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
587} 660}
588 661
662#else /* CONFIG_SMP */
663
589static inline 664static inline
590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 665void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
591{ 666static inline
592#ifdef CONFIG_SMP 667void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
593 int highest_prio = rt_rq->highest_prio; 668
594#endif 669#endif /* CONFIG_SMP */
595 670
596 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
597 WARN_ON(!rt_rq->rt_nr_running);
598 rt_rq->rt_nr_running--;
599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 671#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
672static void
673inc_rt_prio(struct rt_rq *rt_rq, int prio)
674{
675 int prev_prio = rt_rq->highest_prio.curr;
676
677 if (prio < prev_prio)
678 rt_rq->highest_prio.curr = prio;
679
680 inc_rt_prio_smp(rt_rq, prio, prev_prio);
681}
682
683static void
684dec_rt_prio(struct rt_rq *rt_rq, int prio)
685{
686 int prev_prio = rt_rq->highest_prio.curr;
687
600 if (rt_rq->rt_nr_running) { 688 if (rt_rq->rt_nr_running) {
601 struct rt_prio_array *array;
602 689
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 690 WARN_ON(prio < prev_prio);
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 691
605 /* recalculate */ 692 /*
606 array = &rt_rq->active; 693 * This may have been our highest task, and therefore
607 rt_rq->highest_prio = 694 * we may have some recomputation to do
695 */
696 if (prio == prev_prio) {
697 struct rt_prio_array *array = &rt_rq->active;
698
699 rt_rq->highest_prio.curr =
608 sched_find_first_bit(array->bitmap); 700 sched_find_first_bit(array->bitmap);
609 } /* otherwise leave rq->highest prio alone */ 701 }
702
610 } else 703 } else
611 rt_rq->highest_prio = MAX_RT_PRIO; 704 rt_rq->highest_prio.curr = MAX_RT_PRIO;
612#endif
613#ifdef CONFIG_SMP
614 if (rt_se->nr_cpus_allowed > 1) {
615 struct rq *rq = rq_of_rt_rq(rt_rq);
616 rq->rt.rt_nr_migratory--;
617 }
618 705
619 if (rt_rq->highest_prio != highest_prio) { 706 dec_rt_prio_smp(rt_rq, prio, prev_prio);
620 struct rq *rq = rq_of_rt_rq(rt_rq); 707}
621 708
622 if (rq->online) 709#else
623 cpupri_set(&rq->rd->cpupri, rq->cpu, 710
624 rt_rq->highest_prio); 711static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
625 } 712static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
713
714#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
626 715
627 update_rt_migration(rq_of_rt_rq(rt_rq));
628#endif /* CONFIG_SMP */
629#ifdef CONFIG_RT_GROUP_SCHED 716#ifdef CONFIG_RT_GROUP_SCHED
717
718static void
719inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
720{
721 if (rt_se_boosted(rt_se))
722 rt_rq->rt_nr_boosted++;
723
724 if (rt_rq->tg)
725 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
726}
727
728static void
729dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
730{
630 if (rt_se_boosted(rt_se)) 731 if (rt_se_boosted(rt_se))
631 rt_rq->rt_nr_boosted--; 732 rt_rq->rt_nr_boosted--;
632 733
633 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 734 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
634#endif 735}
736
737#else /* CONFIG_RT_GROUP_SCHED */
738
739static void
740inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
741{
742 start_rt_bandwidth(&def_rt_bandwidth);
743}
744
745static inline
746void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
747
748#endif /* CONFIG_RT_GROUP_SCHED */
749
750static inline
751void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
752{
753 int prio = rt_se_prio(rt_se);
754
755 WARN_ON(!rt_prio(prio));
756 rt_rq->rt_nr_running++;
757
758 inc_rt_prio(rt_rq, prio);
759 inc_rt_migration(rt_se, rt_rq);
760 inc_rt_group(rt_se, rt_rq);
761}
762
763static inline
764void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
765{
766 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
767 WARN_ON(!rt_rq->rt_nr_running);
768 rt_rq->rt_nr_running--;
769
770 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
771 dec_rt_migration(rt_se, rt_rq);
772 dec_rt_group(rt_se, rt_rq);
635} 773}
636 774
637static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) 775static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
718 856
719 enqueue_rt_entity(rt_se); 857 enqueue_rt_entity(rt_se);
720 858
859 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
860 enqueue_pushable_task(rq, p);
861
721 inc_cpu_load(rq, p->se.load.weight); 862 inc_cpu_load(rq, p->se.load.weight);
722} 863}
723 864
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
728 update_curr_rt(rq); 869 update_curr_rt(rq);
729 dequeue_rt_entity(rt_se); 870 dequeue_rt_entity(rt_se);
730 871
872 dequeue_pushable_task(rq, p);
873
731 dec_cpu_load(rq, p->se.load.weight); 874 dec_cpu_load(rq, p->se.load.weight);
732} 875}
733 876
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
878 return next; 1021 return next;
879} 1022}
880 1023
881static struct task_struct *pick_next_task_rt(struct rq *rq) 1024static struct task_struct *_pick_next_task_rt(struct rq *rq)
882{ 1025{
883 struct sched_rt_entity *rt_se; 1026 struct sched_rt_entity *rt_se;
884 struct task_struct *p; 1027 struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
900 1043
901 p = rt_task_of(rt_se); 1044 p = rt_task_of(rt_se);
902 p->se.exec_start = rq->clock; 1045 p->se.exec_start = rq->clock;
1046
1047 return p;
1048}
1049
1050static struct task_struct *pick_next_task_rt(struct rq *rq)
1051{
1052 struct task_struct *p = _pick_next_task_rt(rq);
1053
1054 /* The running task is never eligible for pushing */
1055 if (p)
1056 dequeue_pushable_task(rq, p);
1057
903 return p; 1058 return p;
904} 1059}
905 1060
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
907{ 1062{
908 update_curr_rt(rq); 1063 update_curr_rt(rq);
909 p->se.exec_start = 0; 1064 p->se.exec_start = 0;
1065
1066 /*
1067 * The previous task needs to be made eligible for pushing
1068 * if it is still active
1069 */
1070 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1071 enqueue_pushable_task(rq, p);
910} 1072}
911 1073
912#ifdef CONFIG_SMP 1074#ifdef CONFIG_SMP
@@ -1072,7 +1234,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1072 } 1234 }
1073 1235
1074 /* If this rq is still suitable use it. */ 1236 /* If this rq is still suitable use it. */
1075 if (lowest_rq->rt.highest_prio > task->prio) 1237 if (lowest_rq->rt.highest_prio.curr > task->prio)
1076 break; 1238 break;
1077 1239
1078 /* try again */ 1240 /* try again */
@@ -1083,6 +1245,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1083 return lowest_rq; 1245 return lowest_rq;
1084} 1246}
1085 1247
1248static inline int has_pushable_tasks(struct rq *rq)
1249{
1250 return !plist_head_empty(&rq->rt.pushable_tasks);
1251}
1252
1253static struct task_struct *pick_next_pushable_task(struct rq *rq)
1254{
1255 struct task_struct *p;
1256
1257 if (!has_pushable_tasks(rq))
1258 return NULL;
1259
1260 p = plist_first_entry(&rq->rt.pushable_tasks,
1261 struct task_struct, pushable_tasks);
1262
1263 BUG_ON(rq->cpu != task_cpu(p));
1264 BUG_ON(task_current(rq, p));
1265 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1266
1267 BUG_ON(!p->se.on_rq);
1268 BUG_ON(!rt_task(p));
1269
1270 return p;
1271}
1272
1086/* 1273/*
1087 * If the current CPU has more than one RT task, see if the non 1274 * If the current CPU has more than one RT task, see if the non
1088 * running task can migrate over to a CPU that is running a task 1275 * running task can migrate over to a CPU that is running a task
@@ -1092,13 +1279,11 @@ static int push_rt_task(struct rq *rq)
1092{ 1279{
1093 struct task_struct *next_task; 1280 struct task_struct *next_task;
1094 struct rq *lowest_rq; 1281 struct rq *lowest_rq;
1095 int ret = 0;
1096 int paranoid = RT_MAX_TRIES;
1097 1282
1098 if (!rq->rt.overloaded) 1283 if (!rq->rt.overloaded)
1099 return 0; 1284 return 0;
1100 1285
1101 next_task = pick_next_highest_task_rt(rq, -1); 1286 next_task = pick_next_pushable_task(rq);
1102 if (!next_task) 1287 if (!next_task)
1103 return 0; 1288 return 0;
1104 1289
@@ -1127,16 +1312,34 @@ static int push_rt_task(struct rq *rq)
1127 struct task_struct *task; 1312 struct task_struct *task;
1128 /* 1313 /*
1129 * find lock_lowest_rq releases rq->lock 1314 * find lock_lowest_rq releases rq->lock
1130 * so it is possible that next_task has changed. 1315 * so it is possible that next_task has migrated.
1131 * If it has, then try again. 1316 *
1317 * We need to make sure that the task is still on the same
1318 * run-queue and is also still the next task eligible for
1319 * pushing.
1132 */ 1320 */
1133 task = pick_next_highest_task_rt(rq, -1); 1321 task = pick_next_pushable_task(rq);
1134 if (unlikely(task != next_task) && task && paranoid--) { 1322 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1135 put_task_struct(next_task); 1323 /*
1136 next_task = task; 1324 * If we get here, the task hasnt moved at all, but
1137 goto retry; 1325 * it has failed to push. We will not try again,
1326 * since the other cpus will pull from us when they
1327 * are ready.
1328 */
1329 dequeue_pushable_task(rq, next_task);
1330 goto out;
1138 } 1331 }
1139 goto out; 1332
1333 if (!task)
1334 /* No more tasks, just exit */
1335 goto out;
1336
1337 /*
1338 * Something has shifted, try again.
1339 */
1340 put_task_struct(next_task);
1341 next_task = task;
1342 goto retry;
1140 } 1343 }
1141 1344
1142 deactivate_task(rq, next_task, 0); 1345 deactivate_task(rq, next_task, 0);
@@ -1147,23 +1350,12 @@ static int push_rt_task(struct rq *rq)
1147 1350
1148 double_unlock_balance(rq, lowest_rq); 1351 double_unlock_balance(rq, lowest_rq);
1149 1352
1150 ret = 1;
1151out: 1353out:
1152 put_task_struct(next_task); 1354 put_task_struct(next_task);
1153 1355
1154 return ret; 1356 return 1;
1155} 1357}
1156 1358
1157/*
1158 * TODO: Currently we just use the second highest prio task on
1159 * the queue, and stop when it can't migrate (or there's
1160 * no more RT tasks). There may be a case where a lower
1161 * priority RT task has a different affinity than the
1162 * higher RT task. In this case the lower RT task could
1163 * possibly be able to migrate where as the higher priority
1164 * RT task could not. We currently ignore this issue.
1165 * Enhancements are welcome!
1166 */
1167static void push_rt_tasks(struct rq *rq) 1359static void push_rt_tasks(struct rq *rq)
1168{ 1360{
1169 /* push_rt_task will return true if it moved an RT */ 1361 /* push_rt_task will return true if it moved an RT */
@@ -1174,33 +1366,35 @@ static void push_rt_tasks(struct rq *rq)
1174static int pull_rt_task(struct rq *this_rq) 1366static int pull_rt_task(struct rq *this_rq)
1175{ 1367{
1176 int this_cpu = this_rq->cpu, ret = 0, cpu; 1368 int this_cpu = this_rq->cpu, ret = 0, cpu;
1177 struct task_struct *p, *next; 1369 struct task_struct *p;
1178 struct rq *src_rq; 1370 struct rq *src_rq;
1179 1371
1180 if (likely(!rt_overloaded(this_rq))) 1372 if (likely(!rt_overloaded(this_rq)))
1181 return 0; 1373 return 0;
1182 1374
1183 next = pick_next_task_rt(this_rq);
1184
1185 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1375 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1186 if (this_cpu == cpu) 1376 if (this_cpu == cpu)
1187 continue; 1377 continue;
1188 1378
1189 src_rq = cpu_rq(cpu); 1379 src_rq = cpu_rq(cpu);
1380
1381 /*
1382 * Don't bother taking the src_rq->lock if the next highest
1383 * task is known to be lower-priority than our current task.
1384 * This may look racy, but if this value is about to go
1385 * logically higher, the src_rq will push this task away.
1386 * And if its going logically lower, we do not care
1387 */
1388 if (src_rq->rt.highest_prio.next >=
1389 this_rq->rt.highest_prio.curr)
1390 continue;
1391
1190 /* 1392 /*
1191 * We can potentially drop this_rq's lock in 1393 * We can potentially drop this_rq's lock in
1192 * double_lock_balance, and another CPU could 1394 * double_lock_balance, and another CPU could
1193 * steal our next task - hence we must cause 1395 * alter this_rq
1194 * the caller to recalculate the next task
1195 * in that case:
1196 */ 1396 */
1197 if (double_lock_balance(this_rq, src_rq)) { 1397 double_lock_balance(this_rq, src_rq);
1198 struct task_struct *old_next = next;
1199
1200 next = pick_next_task_rt(this_rq);
1201 if (next != old_next)
1202 ret = 1;
1203 }
1204 1398
1205 /* 1399 /*
1206 * Are there still pullable RT tasks? 1400 * Are there still pullable RT tasks?
@@ -1214,7 +1408,7 @@ static int pull_rt_task(struct rq *this_rq)
1214 * Do we have an RT task that preempts 1408 * Do we have an RT task that preempts
1215 * the to-be-scheduled task? 1409 * the to-be-scheduled task?
1216 */ 1410 */
1217 if (p && (!next || (p->prio < next->prio))) { 1411 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1218 WARN_ON(p == src_rq->curr); 1412 WARN_ON(p == src_rq->curr);
1219 WARN_ON(!p->se.on_rq); 1413 WARN_ON(!p->se.on_rq);
1220 1414
@@ -1224,12 +1418,9 @@ static int pull_rt_task(struct rq *this_rq)
1224 * This is just that p is wakeing up and hasn't 1418 * This is just that p is wakeing up and hasn't
1225 * had a chance to schedule. We only pull 1419 * had a chance to schedule. We only pull
1226 * p if it is lower in priority than the 1420 * p if it is lower in priority than the
1227 * current task on the run queue or 1421 * current task on the run queue
1228 * this_rq next task is lower in prio than
1229 * the current task on that rq.
1230 */ 1422 */
1231 if (p->prio < src_rq->curr->prio || 1423 if (p->prio < src_rq->curr->prio)
1232 (next && next->prio < src_rq->curr->prio))
1233 goto skip; 1424 goto skip;
1234 1425
1235 ret = 1; 1426 ret = 1;
@@ -1242,13 +1433,7 @@ static int pull_rt_task(struct rq *this_rq)
1242 * case there's an even higher prio task 1433 * case there's an even higher prio task
1243 * in another runqueue. (low likelyhood 1434 * in another runqueue. (low likelyhood
1244 * but possible) 1435 * but possible)
1245 *
1246 * Update next so that we won't pick a task
1247 * on another cpu with a priority lower (or equal)
1248 * than the one we just picked.
1249 */ 1436 */
1250 next = p;
1251
1252 } 1437 }
1253 skip: 1438 skip:
1254 double_unlock_balance(this_rq, src_rq); 1439 double_unlock_balance(this_rq, src_rq);
@@ -1260,24 +1445,27 @@ static int pull_rt_task(struct rq *this_rq)
1260static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1445static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1261{ 1446{
1262 /* Try to pull RT tasks here if we lower this rq's prio */ 1447 /* Try to pull RT tasks here if we lower this rq's prio */
1263 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1448 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1264 pull_rt_task(rq); 1449 pull_rt_task(rq);
1265} 1450}
1266 1451
1452/*
1453 * assumes rq->lock is held
1454 */
1455static int needs_post_schedule_rt(struct rq *rq)
1456{
1457 return has_pushable_tasks(rq);
1458}
1459
1267static void post_schedule_rt(struct rq *rq) 1460static void post_schedule_rt(struct rq *rq)
1268{ 1461{
1269 /* 1462 /*
1270 * If we have more than one rt_task queued, then 1463 * This is only called if needs_post_schedule_rt() indicates that
1271 * see if we can push the other rt_tasks off to other CPUS. 1464 * we need to push tasks away
1272 * Note we may release the rq lock, and since
1273 * the lock was owned by prev, we need to release it
1274 * first via finish_lock_switch and then reaquire it here.
1275 */ 1465 */
1276 if (unlikely(rq->rt.overloaded)) { 1466 spin_lock_irq(&rq->lock);
1277 spin_lock_irq(&rq->lock); 1467 push_rt_tasks(rq);
1278 push_rt_tasks(rq); 1468 spin_unlock_irq(&rq->lock);
1279 spin_unlock_irq(&rq->lock);
1280 }
1281} 1469}
1282 1470
1283/* 1471/*
@@ -1288,7 +1476,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1288{ 1476{
1289 if (!task_running(rq, p) && 1477 if (!task_running(rq, p) &&
1290 !test_tsk_need_resched(rq->curr) && 1478 !test_tsk_need_resched(rq->curr) &&
1291 rq->rt.overloaded) 1479 has_pushable_tasks(rq) &&
1480 p->rt.nr_cpus_allowed > 1)
1292 push_rt_tasks(rq); 1481 push_rt_tasks(rq);
1293} 1482}
1294 1483
@@ -1324,6 +1513,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1324 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1513 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1325 struct rq *rq = task_rq(p); 1514 struct rq *rq = task_rq(p);
1326 1515
1516 if (!task_current(rq, p)) {
1517 /*
1518 * Make sure we dequeue this task from the pushable list
1519 * before going further. It will either remain off of
1520 * the list because we are no longer pushable, or it
1521 * will be requeued.
1522 */
1523 if (p->rt.nr_cpus_allowed > 1)
1524 dequeue_pushable_task(rq, p);
1525
1526 /*
1527 * Requeue if our weight is changing and still > 1
1528 */
1529 if (weight > 1)
1530 enqueue_pushable_task(rq, p);
1531
1532 }
1533
1327 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1534 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1328 rq->rt.rt_nr_migratory++; 1535 rq->rt.rt_nr_migratory++;
1329 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1536 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1331,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1331 rq->rt.rt_nr_migratory--; 1538 rq->rt.rt_nr_migratory--;
1332 } 1539 }
1333 1540
1334 update_rt_migration(rq); 1541 update_rt_migration(&rq->rt);
1335 } 1542 }
1336 1543
1337 cpumask_copy(&p->cpus_allowed, new_mask); 1544 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1346,7 +1553,7 @@ static void rq_online_rt(struct rq *rq)
1346 1553
1347 __enable_runtime(rq); 1554 __enable_runtime(rq);
1348 1555
1349 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1556 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1350} 1557}
1351 1558
1352/* Assumes rq->lock is held */ 1559/* Assumes rq->lock is held */
@@ -1438,7 +1645,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1438 * can release the rq lock and p could migrate. 1645 * can release the rq lock and p could migrate.
1439 * Only reschedule if p is still on the same runqueue. 1646 * Only reschedule if p is still on the same runqueue.
1440 */ 1647 */
1441 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1648 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1442 resched_task(p); 1649 resched_task(p);
1443#else 1650#else
1444 /* For UP simply resched on drop of prio */ 1651 /* For UP simply resched on drop of prio */
@@ -1509,6 +1716,9 @@ static void set_curr_task_rt(struct rq *rq)
1509 struct task_struct *p = rq->curr; 1716 struct task_struct *p = rq->curr;
1510 1717
1511 p->se.exec_start = rq->clock; 1718 p->se.exec_start = rq->clock;
1719
1720 /* The running task is never eligible for pushing */
1721 dequeue_pushable_task(rq, p);
1512} 1722}
1513 1723
1514static const struct sched_class rt_sched_class = { 1724static const struct sched_class rt_sched_class = {
@@ -1531,6 +1741,7 @@ static const struct sched_class rt_sched_class = {
1531 .rq_online = rq_online_rt, 1741 .rq_online = rq_online_rt,
1532 .rq_offline = rq_offline_rt, 1742 .rq_offline = rq_offline_rt,
1533 .pre_schedule = pre_schedule_rt, 1743 .pre_schedule = pre_schedule_rt,
1744 .needs_post_schedule = needs_post_schedule_rt,
1534 .post_schedule = post_schedule_rt, 1745 .post_schedule = post_schedule_rt,
1535 .task_wake_up = task_wake_up_rt, 1746 .task_wake_up = task_wake_up_rt,
1536 .switched_from = switched_from_rt, 1747 .switched_from = switched_from_rt,