diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/latencytop.c | 83 | ||||
-rw-r--r-- | kernel/sched.c | 129 | ||||
-rw-r--r-- | kernel/sched_debug.c | 1 | ||||
-rw-r--r-- | kernel/sched_fair.c | 59 | ||||
-rw-r--r-- | kernel/sched_features.h | 3 | ||||
-rw-r--r-- | kernel/sched_rt.c | 537 | ||||
-rw-r--r-- | kernel/user.c | 3 |
7 files changed, 614 insertions, 201 deletions
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 449db466bdbc..ca07c5c0c914 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
@@ -9,6 +9,44 @@ | |||
9 | * as published by the Free Software Foundation; version 2 | 9 | * as published by the Free Software Foundation; version 2 |
10 | * of the License. | 10 | * of the License. |
11 | */ | 11 | */ |
12 | |||
13 | /* | ||
14 | * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is | ||
15 | * used by the "latencytop" userspace tool. The latency that is tracked is not | ||
16 | * the 'traditional' interrupt latency (which is primarily caused by something | ||
17 | * else consuming CPU), but instead, it is the latency an application encounters | ||
18 | * because the kernel sleeps on its behalf for various reasons. | ||
19 | * | ||
20 | * This code tracks 2 levels of statistics: | ||
21 | * 1) System level latency | ||
22 | * 2) Per process latency | ||
23 | * | ||
24 | * The latency is stored in fixed sized data structures in an accumulated form; | ||
25 | * if the "same" latency cause is hit twice, this will be tracked as one entry | ||
26 | * in the data structure. Both the count, total accumulated latency and maximum | ||
27 | * latency are tracked in this data structure. When the fixed size structure is | ||
28 | * full, no new causes are tracked until the buffer is flushed by writing to | ||
29 | * the /proc file; the userspace tool does this on a regular basis. | ||
30 | * | ||
31 | * A latency cause is identified by a stringified backtrace at the point that | ||
32 | * the scheduler gets invoked. The userland tool will use this string to | ||
33 | * identify the cause of the latency in human readable form. | ||
34 | * | ||
35 | * The information is exported via /proc/latency_stats and /proc/<pid>/latency. | ||
36 | * These files look like this: | ||
37 | * | ||
38 | * Latency Top version : v0.1 | ||
39 | * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl | ||
40 | * | | | | | ||
41 | * | | | +----> the stringified backtrace | ||
42 | * | | +---------> The maximum latency for this entry in microseconds | ||
43 | * | +--------------> The accumulated latency for this entry (microseconds) | ||
44 | * +-------------------> The number of times this entry is hit | ||
45 | * | ||
46 | * (note: the average latency is the accumulated latency divided by the number | ||
47 | * of times) | ||
48 | */ | ||
49 | |||
12 | #include <linux/latencytop.h> | 50 | #include <linux/latencytop.h> |
13 | #include <linux/kallsyms.h> | 51 | #include <linux/kallsyms.h> |
14 | #include <linux/seq_file.h> | 52 | #include <linux/seq_file.h> |
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record | |||
72 | firstnonnull = i; | 110 | firstnonnull = i; |
73 | continue; | 111 | continue; |
74 | } | 112 | } |
75 | for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { | 113 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
76 | unsigned long record = lat->backtrace[q]; | 114 | unsigned long record = lat->backtrace[q]; |
77 | 115 | ||
78 | if (latency_record[i].backtrace[q] != record) { | 116 | if (latency_record[i].backtrace[q] != record) { |
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record | |||
101 | memcpy(&latency_record[i], lat, sizeof(struct latency_record)); | 139 | memcpy(&latency_record[i], lat, sizeof(struct latency_record)); |
102 | } | 140 | } |
103 | 141 | ||
104 | static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) | 142 | /* |
143 | * Iterator to store a backtrace into a latency record entry | ||
144 | */ | ||
145 | static inline void store_stacktrace(struct task_struct *tsk, | ||
146 | struct latency_record *lat) | ||
105 | { | 147 | { |
106 | struct stack_trace trace; | 148 | struct stack_trace trace; |
107 | 149 | ||
108 | memset(&trace, 0, sizeof(trace)); | 150 | memset(&trace, 0, sizeof(trace)); |
109 | trace.max_entries = LT_BACKTRACEDEPTH; | 151 | trace.max_entries = LT_BACKTRACEDEPTH; |
110 | trace.entries = &lat->backtrace[0]; | 152 | trace.entries = &lat->backtrace[0]; |
111 | trace.skip = 0; | ||
112 | save_stack_trace_tsk(tsk, &trace); | 153 | save_stack_trace_tsk(tsk, &trace); |
113 | } | 154 | } |
114 | 155 | ||
156 | /** | ||
157 | * __account_scheduler_latency - record an occured latency | ||
158 | * @tsk - the task struct of the task hitting the latency | ||
159 | * @usecs - the duration of the latency in microseconds | ||
160 | * @inter - 1 if the sleep was interruptible, 0 if uninterruptible | ||
161 | * | ||
162 | * This function is the main entry point for recording latency entries | ||
163 | * as called by the scheduler. | ||
164 | * | ||
165 | * This function has a few special cases to deal with normal 'non-latency' | ||
166 | * sleeps: specifically, interruptible sleep longer than 5 msec is skipped | ||
167 | * since this usually is caused by waiting for events via select() and co. | ||
168 | * | ||
169 | * Negative latencies (caused by time going backwards) are also explicitly | ||
170 | * skipped. | ||
171 | */ | ||
115 | void __sched | 172 | void __sched |
116 | account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | 173 | __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) |
117 | { | 174 | { |
118 | unsigned long flags; | 175 | unsigned long flags; |
119 | int i, q; | 176 | int i, q; |
120 | struct latency_record lat; | 177 | struct latency_record lat; |
121 | 178 | ||
122 | if (!latencytop_enabled) | ||
123 | return; | ||
124 | |||
125 | /* Long interruptible waits are generally user requested... */ | 179 | /* Long interruptible waits are generally user requested... */ |
126 | if (inter && usecs > 5000) | 180 | if (inter && usecs > 5000) |
127 | return; | 181 | return; |
128 | 182 | ||
183 | /* Negative sleeps are time going backwards */ | ||
184 | /* Zero-time sleeps are non-interesting */ | ||
185 | if (usecs <= 0) | ||
186 | return; | ||
187 | |||
129 | memset(&lat, 0, sizeof(lat)); | 188 | memset(&lat, 0, sizeof(lat)); |
130 | lat.count = 1; | 189 | lat.count = 1; |
131 | lat.time = usecs; | 190 | lat.time = usecs; |
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |||
143 | if (tsk->latency_record_count >= LT_SAVECOUNT) | 202 | if (tsk->latency_record_count >= LT_SAVECOUNT) |
144 | goto out_unlock; | 203 | goto out_unlock; |
145 | 204 | ||
146 | for (i = 0; i < LT_SAVECOUNT ; i++) { | 205 | for (i = 0; i < LT_SAVECOUNT; i++) { |
147 | struct latency_record *mylat; | 206 | struct latency_record *mylat; |
148 | int same = 1; | 207 | int same = 1; |
149 | 208 | ||
150 | mylat = &tsk->latency_record[i]; | 209 | mylat = &tsk->latency_record[i]; |
151 | for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { | 210 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
152 | unsigned long record = lat.backtrace[q]; | 211 | unsigned long record = lat.backtrace[q]; |
153 | 212 | ||
154 | if (mylat->backtrace[q] != record) { | 213 | if (mylat->backtrace[q] != record) { |
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v) | |||
186 | for (i = 0; i < MAXLR; i++) { | 245 | for (i = 0; i < MAXLR; i++) { |
187 | if (latency_record[i].backtrace[0]) { | 246 | if (latency_record[i].backtrace[0]) { |
188 | int q; | 247 | int q; |
189 | seq_printf(m, "%i %li %li ", | 248 | seq_printf(m, "%i %lu %lu ", |
190 | latency_record[i].count, | 249 | latency_record[i].count, |
191 | latency_record[i].time, | 250 | latency_record[i].time, |
192 | latency_record[i].max); | 251 | latency_record[i].max); |
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp) | |||
223 | return single_open(filp, lstats_show, NULL); | 282 | return single_open(filp, lstats_show, NULL); |
224 | } | 283 | } |
225 | 284 | ||
226 | static struct file_operations lstats_fops = { | 285 | static const struct file_operations lstats_fops = { |
227 | .open = lstats_open, | 286 | .open = lstats_open, |
228 | .read = seq_read, | 287 | .read = seq_read, |
229 | .write = lstats_write, | 288 | .write = lstats_write, |
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void) | |||
236 | proc_create("latency_stats", 0644, NULL, &lstats_fops); | 295 | proc_create("latency_stats", 0644, NULL, &lstats_fops); |
237 | return 0; | 296 | return 0; |
238 | } | 297 | } |
239 | __initcall(init_lstats_procfs); | 298 | device_initcall(init_lstats_procfs); |
diff --git a/kernel/sched.c b/kernel/sched.c index 410eec404133..5faf5d482fcd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -467,11 +467,17 @@ struct rt_rq { | |||
467 | struct rt_prio_array active; | 467 | struct rt_prio_array active; |
468 | unsigned long rt_nr_running; | 468 | unsigned long rt_nr_running; |
469 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 469 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
470 | int highest_prio; /* highest queued rt task prio */ | 470 | struct { |
471 | int curr; /* highest queued rt task prio */ | ||
472 | #ifdef CONFIG_SMP | ||
473 | int next; /* next highest */ | ||
474 | #endif | ||
475 | } highest_prio; | ||
471 | #endif | 476 | #endif |
472 | #ifdef CONFIG_SMP | 477 | #ifdef CONFIG_SMP |
473 | unsigned long rt_nr_migratory; | 478 | unsigned long rt_nr_migratory; |
474 | int overloaded; | 479 | int overloaded; |
480 | struct plist_head pushable_tasks; | ||
475 | #endif | 481 | #endif |
476 | int rt_throttled; | 482 | int rt_throttled; |
477 | u64 rt_time; | 483 | u64 rt_time; |
@@ -1610,21 +1616,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1610 | 1616 | ||
1611 | #endif | 1617 | #endif |
1612 | 1618 | ||
1619 | #ifdef CONFIG_PREEMPT | ||
1620 | |||
1613 | /* | 1621 | /* |
1614 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | 1622 | * fair double_lock_balance: Safely acquires both rq->locks in a fair |
1623 | * way at the expense of forcing extra atomic operations in all | ||
1624 | * invocations. This assures that the double_lock is acquired using the | ||
1625 | * same underlying policy as the spinlock_t on this architecture, which | ||
1626 | * reduces latency compared to the unfair variant below. However, it | ||
1627 | * also adds more overhead and therefore may reduce throughput. | ||
1615 | */ | 1628 | */ |
1616 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | 1629 | static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest) |
1630 | __releases(this_rq->lock) | ||
1631 | __acquires(busiest->lock) | ||
1632 | __acquires(this_rq->lock) | ||
1633 | { | ||
1634 | spin_unlock(&this_rq->lock); | ||
1635 | double_rq_lock(this_rq, busiest); | ||
1636 | |||
1637 | return 1; | ||
1638 | } | ||
1639 | |||
1640 | #else | ||
1641 | /* | ||
1642 | * Unfair double_lock_balance: Optimizes throughput at the expense of | ||
1643 | * latency by eliminating extra atomic operations when the locks are | ||
1644 | * already in proper order on entry. This favors lower cpu-ids and will | ||
1645 | * grant the double lock to lower cpus over higher ids under contention, | ||
1646 | * regardless of entry order into the function. | ||
1647 | */ | ||
1648 | static int _double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
1617 | __releases(this_rq->lock) | 1649 | __releases(this_rq->lock) |
1618 | __acquires(busiest->lock) | 1650 | __acquires(busiest->lock) |
1619 | __acquires(this_rq->lock) | 1651 | __acquires(this_rq->lock) |
1620 | { | 1652 | { |
1621 | int ret = 0; | 1653 | int ret = 0; |
1622 | 1654 | ||
1623 | if (unlikely(!irqs_disabled())) { | ||
1624 | /* printk() doesn't work good under rq->lock */ | ||
1625 | spin_unlock(&this_rq->lock); | ||
1626 | BUG_ON(1); | ||
1627 | } | ||
1628 | if (unlikely(!spin_trylock(&busiest->lock))) { | 1655 | if (unlikely(!spin_trylock(&busiest->lock))) { |
1629 | if (busiest < this_rq) { | 1656 | if (busiest < this_rq) { |
1630 | spin_unlock(&this_rq->lock); | 1657 | spin_unlock(&this_rq->lock); |
@@ -1637,6 +1664,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
1637 | return ret; | 1664 | return ret; |
1638 | } | 1665 | } |
1639 | 1666 | ||
1667 | #endif /* CONFIG_PREEMPT */ | ||
1668 | |||
1669 | /* | ||
1670 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
1671 | */ | ||
1672 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
1673 | { | ||
1674 | if (unlikely(!irqs_disabled())) { | ||
1675 | /* printk() doesn't work good under rq->lock */ | ||
1676 | spin_unlock(&this_rq->lock); | ||
1677 | BUG_ON(1); | ||
1678 | } | ||
1679 | |||
1680 | return _double_lock_balance(this_rq, busiest); | ||
1681 | } | ||
1682 | |||
1640 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | 1683 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) |
1641 | __releases(busiest->lock) | 1684 | __releases(busiest->lock) |
1642 | { | 1685 | { |
@@ -1705,6 +1748,9 @@ static void update_avg(u64 *avg, u64 sample) | |||
1705 | 1748 | ||
1706 | static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) | 1749 | static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) |
1707 | { | 1750 | { |
1751 | if (wakeup) | ||
1752 | p->se.start_runtime = p->se.sum_exec_runtime; | ||
1753 | |||
1708 | sched_info_queued(p); | 1754 | sched_info_queued(p); |
1709 | p->sched_class->enqueue_task(rq, p, wakeup); | 1755 | p->sched_class->enqueue_task(rq, p, wakeup); |
1710 | p->se.on_rq = 1; | 1756 | p->se.on_rq = 1; |
@@ -1712,10 +1758,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) | |||
1712 | 1758 | ||
1713 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) | 1759 | static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) |
1714 | { | 1760 | { |
1715 | if (sleep && p->se.last_wakeup) { | 1761 | if (sleep) { |
1716 | update_avg(&p->se.avg_overlap, | 1762 | if (p->se.last_wakeup) { |
1717 | p->se.sum_exec_runtime - p->se.last_wakeup); | 1763 | update_avg(&p->se.avg_overlap, |
1718 | p->se.last_wakeup = 0; | 1764 | p->se.sum_exec_runtime - p->se.last_wakeup); |
1765 | p->se.last_wakeup = 0; | ||
1766 | } else { | ||
1767 | update_avg(&p->se.avg_wakeup, | ||
1768 | sysctl_sched_wakeup_granularity); | ||
1769 | } | ||
1719 | } | 1770 | } |
1720 | 1771 | ||
1721 | sched_info_dequeued(p); | 1772 | sched_info_dequeued(p); |
@@ -2345,6 +2396,22 @@ out_activate: | |||
2345 | activate_task(rq, p, 1); | 2396 | activate_task(rq, p, 1); |
2346 | success = 1; | 2397 | success = 1; |
2347 | 2398 | ||
2399 | /* | ||
2400 | * Only attribute actual wakeups done by this task. | ||
2401 | */ | ||
2402 | if (!in_interrupt()) { | ||
2403 | struct sched_entity *se = ¤t->se; | ||
2404 | u64 sample = se->sum_exec_runtime; | ||
2405 | |||
2406 | if (se->last_wakeup) | ||
2407 | sample -= se->last_wakeup; | ||
2408 | else | ||
2409 | sample -= se->start_runtime; | ||
2410 | update_avg(&se->avg_wakeup, sample); | ||
2411 | |||
2412 | se->last_wakeup = se->sum_exec_runtime; | ||
2413 | } | ||
2414 | |||
2348 | out_running: | 2415 | out_running: |
2349 | trace_sched_wakeup(rq, p, success); | 2416 | trace_sched_wakeup(rq, p, success); |
2350 | check_preempt_curr(rq, p, sync); | 2417 | check_preempt_curr(rq, p, sync); |
@@ -2355,8 +2422,6 @@ out_running: | |||
2355 | p->sched_class->task_wake_up(rq, p); | 2422 | p->sched_class->task_wake_up(rq, p); |
2356 | #endif | 2423 | #endif |
2357 | out: | 2424 | out: |
2358 | current->se.last_wakeup = current->se.sum_exec_runtime; | ||
2359 | |||
2360 | task_rq_unlock(rq, &flags); | 2425 | task_rq_unlock(rq, &flags); |
2361 | 2426 | ||
2362 | return success; | 2427 | return success; |
@@ -2386,6 +2451,8 @@ static void __sched_fork(struct task_struct *p) | |||
2386 | p->se.prev_sum_exec_runtime = 0; | 2451 | p->se.prev_sum_exec_runtime = 0; |
2387 | p->se.last_wakeup = 0; | 2452 | p->se.last_wakeup = 0; |
2388 | p->se.avg_overlap = 0; | 2453 | p->se.avg_overlap = 0; |
2454 | p->se.start_runtime = 0; | ||
2455 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | ||
2389 | 2456 | ||
2390 | #ifdef CONFIG_SCHEDSTATS | 2457 | #ifdef CONFIG_SCHEDSTATS |
2391 | p->se.wait_start = 0; | 2458 | p->se.wait_start = 0; |
@@ -2448,6 +2515,8 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
2448 | /* Want to start with kernel preemption disabled. */ | 2515 | /* Want to start with kernel preemption disabled. */ |
2449 | task_thread_info(p)->preempt_count = 1; | 2516 | task_thread_info(p)->preempt_count = 1; |
2450 | #endif | 2517 | #endif |
2518 | plist_node_init(&p->pushable_tasks, MAX_PRIO); | ||
2519 | |||
2451 | put_cpu(); | 2520 | put_cpu(); |
2452 | } | 2521 | } |
2453 | 2522 | ||
@@ -2588,6 +2657,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2588 | { | 2657 | { |
2589 | struct mm_struct *mm = rq->prev_mm; | 2658 | struct mm_struct *mm = rq->prev_mm; |
2590 | long prev_state; | 2659 | long prev_state; |
2660 | #ifdef CONFIG_SMP | ||
2661 | int post_schedule = 0; | ||
2662 | |||
2663 | if (current->sched_class->needs_post_schedule) | ||
2664 | post_schedule = current->sched_class->needs_post_schedule(rq); | ||
2665 | #endif | ||
2591 | 2666 | ||
2592 | rq->prev_mm = NULL; | 2667 | rq->prev_mm = NULL; |
2593 | 2668 | ||
@@ -2606,7 +2681,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
2606 | finish_arch_switch(prev); | 2681 | finish_arch_switch(prev); |
2607 | finish_lock_switch(rq, prev); | 2682 | finish_lock_switch(rq, prev); |
2608 | #ifdef CONFIG_SMP | 2683 | #ifdef CONFIG_SMP |
2609 | if (current->sched_class->post_schedule) | 2684 | if (post_schedule) |
2610 | current->sched_class->post_schedule(rq); | 2685 | current->sched_class->post_schedule(rq); |
2611 | #endif | 2686 | #endif |
2612 | 2687 | ||
@@ -2987,6 +3062,16 @@ next: | |||
2987 | pulled++; | 3062 | pulled++; |
2988 | rem_load_move -= p->se.load.weight; | 3063 | rem_load_move -= p->se.load.weight; |
2989 | 3064 | ||
3065 | #ifdef CONFIG_PREEMPT | ||
3066 | /* | ||
3067 | * NEWIDLE balancing is a source of latency, so preemptible kernels | ||
3068 | * will stop after the first task is pulled to minimize the critical | ||
3069 | * section. | ||
3070 | */ | ||
3071 | if (idle == CPU_NEWLY_IDLE) | ||
3072 | goto out; | ||
3073 | #endif | ||
3074 | |||
2990 | /* | 3075 | /* |
2991 | * We only want to steal up to the prescribed amount of weighted load. | 3076 | * We only want to steal up to the prescribed amount of weighted load. |
2992 | */ | 3077 | */ |
@@ -3033,9 +3118,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
3033 | sd, idle, all_pinned, &this_best_prio); | 3118 | sd, idle, all_pinned, &this_best_prio); |
3034 | class = class->next; | 3119 | class = class->next; |
3035 | 3120 | ||
3121 | #ifdef CONFIG_PREEMPT | ||
3122 | /* | ||
3123 | * NEWIDLE balancing is a source of latency, so preemptible | ||
3124 | * kernels will stop after the first task is pulled to minimize | ||
3125 | * the critical section. | ||
3126 | */ | ||
3036 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) | 3127 | if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) |
3037 | break; | 3128 | break; |
3038 | 3129 | #endif | |
3039 | } while (class && max_load_move > total_load_moved); | 3130 | } while (class && max_load_move > total_load_moved); |
3040 | 3131 | ||
3041 | return total_load_moved > 0; | 3132 | return total_load_moved > 0; |
@@ -8218,11 +8309,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
8218 | __set_bit(MAX_RT_PRIO, array->bitmap); | 8309 | __set_bit(MAX_RT_PRIO, array->bitmap); |
8219 | 8310 | ||
8220 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 8311 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
8221 | rt_rq->highest_prio = MAX_RT_PRIO; | 8312 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
8313 | #ifdef CONFIG_SMP | ||
8314 | rt_rq->highest_prio.next = MAX_RT_PRIO; | ||
8315 | #endif | ||
8222 | #endif | 8316 | #endif |
8223 | #ifdef CONFIG_SMP | 8317 | #ifdef CONFIG_SMP |
8224 | rt_rq->rt_nr_migratory = 0; | 8318 | rt_rq->rt_nr_migratory = 0; |
8225 | rt_rq->overloaded = 0; | 8319 | rt_rq->overloaded = 0; |
8320 | plist_head_init(&rq->rt.pushable_tasks, &rq->lock); | ||
8226 | #endif | 8321 | #endif |
8227 | 8322 | ||
8228 | rt_rq->rt_time = 0; | 8323 | rt_rq->rt_time = 0; |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 16eeba4e4169..2b1260f0e800 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -397,6 +397,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
397 | PN(se.vruntime); | 397 | PN(se.vruntime); |
398 | PN(se.sum_exec_runtime); | 398 | PN(se.sum_exec_runtime); |
399 | PN(se.avg_overlap); | 399 | PN(se.avg_overlap); |
400 | PN(se.avg_wakeup); | ||
400 | 401 | ||
401 | nr_switches = p->nvcsw + p->nivcsw; | 402 | nr_switches = p->nvcsw + p->nivcsw; |
402 | 403 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 0566f2a03c42..3816f217f119 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1314,16 +1314,63 @@ out: | |||
1314 | } | 1314 | } |
1315 | #endif /* CONFIG_SMP */ | 1315 | #endif /* CONFIG_SMP */ |
1316 | 1316 | ||
1317 | static unsigned long wakeup_gran(struct sched_entity *se) | 1317 | /* |
1318 | * Adaptive granularity | ||
1319 | * | ||
1320 | * se->avg_wakeup gives the average time a task runs until it does a wakeup, | ||
1321 | * with the limit of wakeup_gran -- when it never does a wakeup. | ||
1322 | * | ||
1323 | * So the smaller avg_wakeup is the faster we want this task to preempt, | ||
1324 | * but we don't want to treat the preemptee unfairly and therefore allow it | ||
1325 | * to run for at least the amount of time we'd like to run. | ||
1326 | * | ||
1327 | * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one | ||
1328 | * | ||
1329 | * NOTE: we use *nr_running to scale with load, this nicely matches the | ||
1330 | * degrading latency on load. | ||
1331 | */ | ||
1332 | static unsigned long | ||
1333 | adaptive_gran(struct sched_entity *curr, struct sched_entity *se) | ||
1334 | { | ||
1335 | u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | ||
1336 | u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running; | ||
1337 | u64 gran = 0; | ||
1338 | |||
1339 | if (this_run < expected_wakeup) | ||
1340 | gran = expected_wakeup - this_run; | ||
1341 | |||
1342 | return min_t(s64, gran, sysctl_sched_wakeup_granularity); | ||
1343 | } | ||
1344 | |||
1345 | static unsigned long | ||
1346 | wakeup_gran(struct sched_entity *curr, struct sched_entity *se) | ||
1318 | { | 1347 | { |
1319 | unsigned long gran = sysctl_sched_wakeup_granularity; | 1348 | unsigned long gran = sysctl_sched_wakeup_granularity; |
1320 | 1349 | ||
1350 | if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN)) | ||
1351 | gran = adaptive_gran(curr, se); | ||
1352 | |||
1321 | /* | 1353 | /* |
1322 | * More easily preempt - nice tasks, while not making it harder for | 1354 | * Since its curr running now, convert the gran from real-time |
1323 | * + nice tasks. | 1355 | * to virtual-time in his units. |
1324 | */ | 1356 | */ |
1325 | if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) | 1357 | if (sched_feat(ASYM_GRAN)) { |
1326 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | 1358 | /* |
1359 | * By using 'se' instead of 'curr' we penalize light tasks, so | ||
1360 | * they get preempted easier. That is, if 'se' < 'curr' then | ||
1361 | * the resulting gran will be larger, therefore penalizing the | ||
1362 | * lighter, if otoh 'se' > 'curr' then the resulting gran will | ||
1363 | * be smaller, again penalizing the lighter task. | ||
1364 | * | ||
1365 | * This is especially important for buddies when the leftmost | ||
1366 | * task is higher priority than the buddy. | ||
1367 | */ | ||
1368 | if (unlikely(se->load.weight != NICE_0_LOAD)) | ||
1369 | gran = calc_delta_fair(gran, se); | ||
1370 | } else { | ||
1371 | if (unlikely(curr->load.weight != NICE_0_LOAD)) | ||
1372 | gran = calc_delta_fair(gran, curr); | ||
1373 | } | ||
1327 | 1374 | ||
1328 | return gran; | 1375 | return gran; |
1329 | } | 1376 | } |
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | |||
1350 | if (vdiff <= 0) | 1397 | if (vdiff <= 0) |
1351 | return -1; | 1398 | return -1; |
1352 | 1399 | ||
1353 | gran = wakeup_gran(curr); | 1400 | gran = wakeup_gran(curr, se); |
1354 | if (vdiff > gran) | 1401 | if (vdiff > gran) |
1355 | return 1; | 1402 | return 1; |
1356 | 1403 | ||
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index da5d93b5d2c6..76f61756e677 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -1,5 +1,6 @@ | |||
1 | SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) | 1 | SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) |
2 | SCHED_FEAT(NORMALIZED_SLEEPER, 1) | 2 | SCHED_FEAT(NORMALIZED_SLEEPER, 0) |
3 | SCHED_FEAT(ADAPTIVE_GRAN, 1) | ||
3 | SCHED_FEAT(WAKEUP_PREEMPT, 1) | 4 | SCHED_FEAT(WAKEUP_PREEMPT, 1) |
4 | SCHED_FEAT(START_DEBIT, 1) | 5 | SCHED_FEAT(START_DEBIT, 1) |
5 | SCHED_FEAT(AFFINE_WAKEUPS, 1) | 6 | SCHED_FEAT(AFFINE_WAKEUPS, 1) |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index bac1061cea2f..c79dc7844012 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -3,6 +3,40 @@ | |||
3 | * policies) | 3 | * policies) |
4 | */ | 4 | */ |
5 | 5 | ||
6 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | ||
7 | { | ||
8 | return container_of(rt_se, struct task_struct, rt); | ||
9 | } | ||
10 | |||
11 | #ifdef CONFIG_RT_GROUP_SCHED | ||
12 | |||
13 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
14 | { | ||
15 | return rt_rq->rq; | ||
16 | } | ||
17 | |||
18 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
19 | { | ||
20 | return rt_se->rt_rq; | ||
21 | } | ||
22 | |||
23 | #else /* CONFIG_RT_GROUP_SCHED */ | ||
24 | |||
25 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
26 | { | ||
27 | return container_of(rt_rq, struct rq, rt); | ||
28 | } | ||
29 | |||
30 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
31 | { | ||
32 | struct task_struct *p = rt_task_of(rt_se); | ||
33 | struct rq *rq = task_rq(p); | ||
34 | |||
35 | return &rq->rt; | ||
36 | } | ||
37 | |||
38 | #endif /* CONFIG_RT_GROUP_SCHED */ | ||
39 | |||
6 | #ifdef CONFIG_SMP | 40 | #ifdef CONFIG_SMP |
7 | 41 | ||
8 | static inline int rt_overloaded(struct rq *rq) | 42 | static inline int rt_overloaded(struct rq *rq) |
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq) | |||
37 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); | 71 | cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); |
38 | } | 72 | } |
39 | 73 | ||
40 | static void update_rt_migration(struct rq *rq) | 74 | static void update_rt_migration(struct rt_rq *rt_rq) |
41 | { | 75 | { |
42 | if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { | 76 | if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) { |
43 | if (!rq->rt.overloaded) { | 77 | if (!rt_rq->overloaded) { |
44 | rt_set_overload(rq); | 78 | rt_set_overload(rq_of_rt_rq(rt_rq)); |
45 | rq->rt.overloaded = 1; | 79 | rt_rq->overloaded = 1; |
46 | } | 80 | } |
47 | } else if (rq->rt.overloaded) { | 81 | } else if (rt_rq->overloaded) { |
48 | rt_clear_overload(rq); | 82 | rt_clear_overload(rq_of_rt_rq(rt_rq)); |
49 | rq->rt.overloaded = 0; | 83 | rt_rq->overloaded = 0; |
50 | } | 84 | } |
51 | } | 85 | } |
52 | #endif /* CONFIG_SMP */ | ||
53 | 86 | ||
54 | static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | 87 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
88 | { | ||
89 | if (rt_se->nr_cpus_allowed > 1) | ||
90 | rt_rq->rt_nr_migratory++; | ||
91 | |||
92 | update_rt_migration(rt_rq); | ||
93 | } | ||
94 | |||
95 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
96 | { | ||
97 | if (rt_se->nr_cpus_allowed > 1) | ||
98 | rt_rq->rt_nr_migratory--; | ||
99 | |||
100 | update_rt_migration(rt_rq); | ||
101 | } | ||
102 | |||
103 | static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | ||
104 | { | ||
105 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | ||
106 | plist_node_init(&p->pushable_tasks, p->prio); | ||
107 | plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); | ||
108 | } | ||
109 | |||
110 | static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | ||
111 | { | ||
112 | plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); | ||
113 | } | ||
114 | |||
115 | #else | ||
116 | |||
117 | static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p) | ||
55 | { | 118 | { |
56 | return container_of(rt_se, struct task_struct, rt); | ||
57 | } | 119 | } |
58 | 120 | ||
121 | static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | static inline | ||
126 | void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
127 | { | ||
128 | } | ||
129 | |||
130 | static inline | ||
131 | void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
132 | { | ||
133 | } | ||
134 | |||
135 | #endif /* CONFIG_SMP */ | ||
136 | |||
59 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) | 137 | static inline int on_rt_rq(struct sched_rt_entity *rt_se) |
60 | { | 138 | { |
61 | return !list_empty(&rt_se->run_list); | 139 | return !list_empty(&rt_se->run_list); |
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
79 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 157 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
80 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) | 158 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) |
81 | 159 | ||
82 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
83 | { | ||
84 | return rt_rq->rq; | ||
85 | } | ||
86 | |||
87 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
88 | { | ||
89 | return rt_se->rt_rq; | ||
90 | } | ||
91 | |||
92 | #define for_each_sched_rt_entity(rt_se) \ | 160 | #define for_each_sched_rt_entity(rt_se) \ |
93 | for (; rt_se; rt_se = rt_se->parent) | 161 | for (; rt_se; rt_se = rt_se->parent) |
94 | 162 | ||
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
108 | if (rt_rq->rt_nr_running) { | 176 | if (rt_rq->rt_nr_running) { |
109 | if (rt_se && !on_rt_rq(rt_se)) | 177 | if (rt_se && !on_rt_rq(rt_se)) |
110 | enqueue_rt_entity(rt_se); | 178 | enqueue_rt_entity(rt_se); |
111 | if (rt_rq->highest_prio < curr->prio) | 179 | if (rt_rq->highest_prio.curr < curr->prio) |
112 | resched_task(curr); | 180 | resched_task(curr); |
113 | } | 181 | } |
114 | } | 182 | } |
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
176 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 244 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
177 | for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) | 245 | for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) |
178 | 246 | ||
179 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | ||
180 | { | ||
181 | return container_of(rt_rq, struct rq, rt); | ||
182 | } | ||
183 | |||
184 | static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | ||
185 | { | ||
186 | struct task_struct *p = rt_task_of(rt_se); | ||
187 | struct rq *rq = task_rq(p); | ||
188 | |||
189 | return &rq->rt; | ||
190 | } | ||
191 | |||
192 | #define for_each_sched_rt_entity(rt_se) \ | 247 | #define for_each_sched_rt_entity(rt_se) \ |
193 | for (; rt_se; rt_se = NULL) | 248 | for (; rt_se; rt_se = NULL) |
194 | 249 | ||
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se) | |||
473 | struct rt_rq *rt_rq = group_rt_rq(rt_se); | 528 | struct rt_rq *rt_rq = group_rt_rq(rt_se); |
474 | 529 | ||
475 | if (rt_rq) | 530 | if (rt_rq) |
476 | return rt_rq->highest_prio; | 531 | return rt_rq->highest_prio.curr; |
477 | #endif | 532 | #endif |
478 | 533 | ||
479 | return rt_task_of(rt_se)->prio; | 534 | return rt_task_of(rt_se)->prio; |
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq) | |||
547 | } | 602 | } |
548 | } | 603 | } |
549 | 604 | ||
550 | static inline | 605 | #if defined CONFIG_SMP |
551 | void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 606 | |
607 | static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu); | ||
608 | |||
609 | static inline int next_prio(struct rq *rq) | ||
552 | { | 610 | { |
553 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | 611 | struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu); |
554 | rt_rq->rt_nr_running++; | 612 | |
555 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 613 | if (next && rt_prio(next->prio)) |
556 | if (rt_se_prio(rt_se) < rt_rq->highest_prio) { | 614 | return next->prio; |
557 | #ifdef CONFIG_SMP | 615 | else |
558 | struct rq *rq = rq_of_rt_rq(rt_rq); | 616 | return MAX_RT_PRIO; |
559 | #endif | 617 | } |
618 | |||
619 | static void | ||
620 | inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) | ||
621 | { | ||
622 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
623 | |||
624 | if (prio < prev_prio) { | ||
625 | |||
626 | /* | ||
627 | * If the new task is higher in priority than anything on the | ||
628 | * run-queue, we know that the previous high becomes our | ||
629 | * next-highest. | ||
630 | */ | ||
631 | rt_rq->highest_prio.next = prev_prio; | ||
560 | 632 | ||
561 | rt_rq->highest_prio = rt_se_prio(rt_se); | ||
562 | #ifdef CONFIG_SMP | ||
563 | if (rq->online) | 633 | if (rq->online) |
564 | cpupri_set(&rq->rd->cpupri, rq->cpu, | 634 | cpupri_set(&rq->rd->cpupri, rq->cpu, prio); |
565 | rt_se_prio(rt_se)); | ||
566 | #endif | ||
567 | } | ||
568 | #endif | ||
569 | #ifdef CONFIG_SMP | ||
570 | if (rt_se->nr_cpus_allowed > 1) { | ||
571 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
572 | 635 | ||
573 | rq->rt.rt_nr_migratory++; | 636 | } else if (prio == rt_rq->highest_prio.curr) |
574 | } | 637 | /* |
638 | * If the next task is equal in priority to the highest on | ||
639 | * the run-queue, then we implicitly know that the next highest | ||
640 | * task cannot be any lower than current | ||
641 | */ | ||
642 | rt_rq->highest_prio.next = prio; | ||
643 | else if (prio < rt_rq->highest_prio.next) | ||
644 | /* | ||
645 | * Otherwise, we need to recompute next-highest | ||
646 | */ | ||
647 | rt_rq->highest_prio.next = next_prio(rq); | ||
648 | } | ||
575 | 649 | ||
576 | update_rt_migration(rq_of_rt_rq(rt_rq)); | 650 | static void |
577 | #endif | 651 | dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) |
578 | #ifdef CONFIG_RT_GROUP_SCHED | 652 | { |
579 | if (rt_se_boosted(rt_se)) | 653 | struct rq *rq = rq_of_rt_rq(rt_rq); |
580 | rt_rq->rt_nr_boosted++; | ||
581 | 654 | ||
582 | if (rt_rq->tg) | 655 | if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next)) |
583 | start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); | 656 | rt_rq->highest_prio.next = next_prio(rq); |
584 | #else | 657 | |
585 | start_rt_bandwidth(&def_rt_bandwidth); | 658 | if (rq->online && rt_rq->highest_prio.curr != prev_prio) |
586 | #endif | 659 | cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); |
587 | } | 660 | } |
588 | 661 | ||
662 | #else /* CONFIG_SMP */ | ||
663 | |||
589 | static inline | 664 | static inline |
590 | void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 665 | void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} |
591 | { | 666 | static inline |
592 | #ifdef CONFIG_SMP | 667 | void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} |
593 | int highest_prio = rt_rq->highest_prio; | 668 | |
594 | #endif | 669 | #endif /* CONFIG_SMP */ |
595 | 670 | ||
596 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | ||
597 | WARN_ON(!rt_rq->rt_nr_running); | ||
598 | rt_rq->rt_nr_running--; | ||
599 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 671 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED |
672 | static void | ||
673 | inc_rt_prio(struct rt_rq *rt_rq, int prio) | ||
674 | { | ||
675 | int prev_prio = rt_rq->highest_prio.curr; | ||
676 | |||
677 | if (prio < prev_prio) | ||
678 | rt_rq->highest_prio.curr = prio; | ||
679 | |||
680 | inc_rt_prio_smp(rt_rq, prio, prev_prio); | ||
681 | } | ||
682 | |||
683 | static void | ||
684 | dec_rt_prio(struct rt_rq *rt_rq, int prio) | ||
685 | { | ||
686 | int prev_prio = rt_rq->highest_prio.curr; | ||
687 | |||
600 | if (rt_rq->rt_nr_running) { | 688 | if (rt_rq->rt_nr_running) { |
601 | struct rt_prio_array *array; | ||
602 | 689 | ||
603 | WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); | 690 | WARN_ON(prio < prev_prio); |
604 | if (rt_se_prio(rt_se) == rt_rq->highest_prio) { | 691 | |
605 | /* recalculate */ | 692 | /* |
606 | array = &rt_rq->active; | 693 | * This may have been our highest task, and therefore |
607 | rt_rq->highest_prio = | 694 | * we may have some recomputation to do |
695 | */ | ||
696 | if (prio == prev_prio) { | ||
697 | struct rt_prio_array *array = &rt_rq->active; | ||
698 | |||
699 | rt_rq->highest_prio.curr = | ||
608 | sched_find_first_bit(array->bitmap); | 700 | sched_find_first_bit(array->bitmap); |
609 | } /* otherwise leave rq->highest prio alone */ | 701 | } |
702 | |||
610 | } else | 703 | } else |
611 | rt_rq->highest_prio = MAX_RT_PRIO; | 704 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
612 | #endif | ||
613 | #ifdef CONFIG_SMP | ||
614 | if (rt_se->nr_cpus_allowed > 1) { | ||
615 | struct rq *rq = rq_of_rt_rq(rt_rq); | ||
616 | rq->rt.rt_nr_migratory--; | ||
617 | } | ||
618 | 705 | ||
619 | if (rt_rq->highest_prio != highest_prio) { | 706 | dec_rt_prio_smp(rt_rq, prio, prev_prio); |
620 | struct rq *rq = rq_of_rt_rq(rt_rq); | 707 | } |
621 | 708 | ||
622 | if (rq->online) | 709 | #else |
623 | cpupri_set(&rq->rd->cpupri, rq->cpu, | 710 | |
624 | rt_rq->highest_prio); | 711 | static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} |
625 | } | 712 | static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} |
713 | |||
714 | #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */ | ||
626 | 715 | ||
627 | update_rt_migration(rq_of_rt_rq(rt_rq)); | ||
628 | #endif /* CONFIG_SMP */ | ||
629 | #ifdef CONFIG_RT_GROUP_SCHED | 716 | #ifdef CONFIG_RT_GROUP_SCHED |
717 | |||
718 | static void | ||
719 | inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
720 | { | ||
721 | if (rt_se_boosted(rt_se)) | ||
722 | rt_rq->rt_nr_boosted++; | ||
723 | |||
724 | if (rt_rq->tg) | ||
725 | start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); | ||
726 | } | ||
727 | |||
728 | static void | ||
729 | dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
730 | { | ||
630 | if (rt_se_boosted(rt_se)) | 731 | if (rt_se_boosted(rt_se)) |
631 | rt_rq->rt_nr_boosted--; | 732 | rt_rq->rt_nr_boosted--; |
632 | 733 | ||
633 | WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); | 734 | WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); |
634 | #endif | 735 | } |
736 | |||
737 | #else /* CONFIG_RT_GROUP_SCHED */ | ||
738 | |||
739 | static void | ||
740 | inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
741 | { | ||
742 | start_rt_bandwidth(&def_rt_bandwidth); | ||
743 | } | ||
744 | |||
745 | static inline | ||
746 | void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} | ||
747 | |||
748 | #endif /* CONFIG_RT_GROUP_SCHED */ | ||
749 | |||
750 | static inline | ||
751 | void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
752 | { | ||
753 | int prio = rt_se_prio(rt_se); | ||
754 | |||
755 | WARN_ON(!rt_prio(prio)); | ||
756 | rt_rq->rt_nr_running++; | ||
757 | |||
758 | inc_rt_prio(rt_rq, prio); | ||
759 | inc_rt_migration(rt_se, rt_rq); | ||
760 | inc_rt_group(rt_se, rt_rq); | ||
761 | } | ||
762 | |||
763 | static inline | ||
764 | void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | ||
765 | { | ||
766 | WARN_ON(!rt_prio(rt_se_prio(rt_se))); | ||
767 | WARN_ON(!rt_rq->rt_nr_running); | ||
768 | rt_rq->rt_nr_running--; | ||
769 | |||
770 | dec_rt_prio(rt_rq, rt_se_prio(rt_se)); | ||
771 | dec_rt_migration(rt_se, rt_rq); | ||
772 | dec_rt_group(rt_se, rt_rq); | ||
635 | } | 773 | } |
636 | 774 | ||
637 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) | 775 | static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) |
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) | |||
718 | 856 | ||
719 | enqueue_rt_entity(rt_se); | 857 | enqueue_rt_entity(rt_se); |
720 | 858 | ||
859 | if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) | ||
860 | enqueue_pushable_task(rq, p); | ||
861 | |||
721 | inc_cpu_load(rq, p->se.load.weight); | 862 | inc_cpu_load(rq, p->se.load.weight); |
722 | } | 863 | } |
723 | 864 | ||
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) | |||
728 | update_curr_rt(rq); | 869 | update_curr_rt(rq); |
729 | dequeue_rt_entity(rt_se); | 870 | dequeue_rt_entity(rt_se); |
730 | 871 | ||
872 | dequeue_pushable_task(rq, p); | ||
873 | |||
731 | dec_cpu_load(rq, p->se.load.weight); | 874 | dec_cpu_load(rq, p->se.load.weight); |
732 | } | 875 | } |
733 | 876 | ||
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, | |||
878 | return next; | 1021 | return next; |
879 | } | 1022 | } |
880 | 1023 | ||
881 | static struct task_struct *pick_next_task_rt(struct rq *rq) | 1024 | static struct task_struct *_pick_next_task_rt(struct rq *rq) |
882 | { | 1025 | { |
883 | struct sched_rt_entity *rt_se; | 1026 | struct sched_rt_entity *rt_se; |
884 | struct task_struct *p; | 1027 | struct task_struct *p; |
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) | |||
900 | 1043 | ||
901 | p = rt_task_of(rt_se); | 1044 | p = rt_task_of(rt_se); |
902 | p->se.exec_start = rq->clock; | 1045 | p->se.exec_start = rq->clock; |
1046 | |||
1047 | return p; | ||
1048 | } | ||
1049 | |||
1050 | static struct task_struct *pick_next_task_rt(struct rq *rq) | ||
1051 | { | ||
1052 | struct task_struct *p = _pick_next_task_rt(rq); | ||
1053 | |||
1054 | /* The running task is never eligible for pushing */ | ||
1055 | if (p) | ||
1056 | dequeue_pushable_task(rq, p); | ||
1057 | |||
903 | return p; | 1058 | return p; |
904 | } | 1059 | } |
905 | 1060 | ||
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
907 | { | 1062 | { |
908 | update_curr_rt(rq); | 1063 | update_curr_rt(rq); |
909 | p->se.exec_start = 0; | 1064 | p->se.exec_start = 0; |
1065 | |||
1066 | /* | ||
1067 | * The previous task needs to be made eligible for pushing | ||
1068 | * if it is still active | ||
1069 | */ | ||
1070 | if (p->se.on_rq && p->rt.nr_cpus_allowed > 1) | ||
1071 | enqueue_pushable_task(rq, p); | ||
910 | } | 1072 | } |
911 | 1073 | ||
912 | #ifdef CONFIG_SMP | 1074 | #ifdef CONFIG_SMP |
@@ -1072,7 +1234,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1072 | } | 1234 | } |
1073 | 1235 | ||
1074 | /* If this rq is still suitable use it. */ | 1236 | /* If this rq is still suitable use it. */ |
1075 | if (lowest_rq->rt.highest_prio > task->prio) | 1237 | if (lowest_rq->rt.highest_prio.curr > task->prio) |
1076 | break; | 1238 | break; |
1077 | 1239 | ||
1078 | /* try again */ | 1240 | /* try again */ |
@@ -1083,6 +1245,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1083 | return lowest_rq; | 1245 | return lowest_rq; |
1084 | } | 1246 | } |
1085 | 1247 | ||
1248 | static inline int has_pushable_tasks(struct rq *rq) | ||
1249 | { | ||
1250 | return !plist_head_empty(&rq->rt.pushable_tasks); | ||
1251 | } | ||
1252 | |||
1253 | static struct task_struct *pick_next_pushable_task(struct rq *rq) | ||
1254 | { | ||
1255 | struct task_struct *p; | ||
1256 | |||
1257 | if (!has_pushable_tasks(rq)) | ||
1258 | return NULL; | ||
1259 | |||
1260 | p = plist_first_entry(&rq->rt.pushable_tasks, | ||
1261 | struct task_struct, pushable_tasks); | ||
1262 | |||
1263 | BUG_ON(rq->cpu != task_cpu(p)); | ||
1264 | BUG_ON(task_current(rq, p)); | ||
1265 | BUG_ON(p->rt.nr_cpus_allowed <= 1); | ||
1266 | |||
1267 | BUG_ON(!p->se.on_rq); | ||
1268 | BUG_ON(!rt_task(p)); | ||
1269 | |||
1270 | return p; | ||
1271 | } | ||
1272 | |||
1086 | /* | 1273 | /* |
1087 | * If the current CPU has more than one RT task, see if the non | 1274 | * If the current CPU has more than one RT task, see if the non |
1088 | * running task can migrate over to a CPU that is running a task | 1275 | * running task can migrate over to a CPU that is running a task |
@@ -1092,13 +1279,11 @@ static int push_rt_task(struct rq *rq) | |||
1092 | { | 1279 | { |
1093 | struct task_struct *next_task; | 1280 | struct task_struct *next_task; |
1094 | struct rq *lowest_rq; | 1281 | struct rq *lowest_rq; |
1095 | int ret = 0; | ||
1096 | int paranoid = RT_MAX_TRIES; | ||
1097 | 1282 | ||
1098 | if (!rq->rt.overloaded) | 1283 | if (!rq->rt.overloaded) |
1099 | return 0; | 1284 | return 0; |
1100 | 1285 | ||
1101 | next_task = pick_next_highest_task_rt(rq, -1); | 1286 | next_task = pick_next_pushable_task(rq); |
1102 | if (!next_task) | 1287 | if (!next_task) |
1103 | return 0; | 1288 | return 0; |
1104 | 1289 | ||
@@ -1127,16 +1312,34 @@ static int push_rt_task(struct rq *rq) | |||
1127 | struct task_struct *task; | 1312 | struct task_struct *task; |
1128 | /* | 1313 | /* |
1129 | * find lock_lowest_rq releases rq->lock | 1314 | * find lock_lowest_rq releases rq->lock |
1130 | * so it is possible that next_task has changed. | 1315 | * so it is possible that next_task has migrated. |
1131 | * If it has, then try again. | 1316 | * |
1317 | * We need to make sure that the task is still on the same | ||
1318 | * run-queue and is also still the next task eligible for | ||
1319 | * pushing. | ||
1132 | */ | 1320 | */ |
1133 | task = pick_next_highest_task_rt(rq, -1); | 1321 | task = pick_next_pushable_task(rq); |
1134 | if (unlikely(task != next_task) && task && paranoid--) { | 1322 | if (task_cpu(next_task) == rq->cpu && task == next_task) { |
1135 | put_task_struct(next_task); | 1323 | /* |
1136 | next_task = task; | 1324 | * If we get here, the task hasnt moved at all, but |
1137 | goto retry; | 1325 | * it has failed to push. We will not try again, |
1326 | * since the other cpus will pull from us when they | ||
1327 | * are ready. | ||
1328 | */ | ||
1329 | dequeue_pushable_task(rq, next_task); | ||
1330 | goto out; | ||
1138 | } | 1331 | } |
1139 | goto out; | 1332 | |
1333 | if (!task) | ||
1334 | /* No more tasks, just exit */ | ||
1335 | goto out; | ||
1336 | |||
1337 | /* | ||
1338 | * Something has shifted, try again. | ||
1339 | */ | ||
1340 | put_task_struct(next_task); | ||
1341 | next_task = task; | ||
1342 | goto retry; | ||
1140 | } | 1343 | } |
1141 | 1344 | ||
1142 | deactivate_task(rq, next_task, 0); | 1345 | deactivate_task(rq, next_task, 0); |
@@ -1147,23 +1350,12 @@ static int push_rt_task(struct rq *rq) | |||
1147 | 1350 | ||
1148 | double_unlock_balance(rq, lowest_rq); | 1351 | double_unlock_balance(rq, lowest_rq); |
1149 | 1352 | ||
1150 | ret = 1; | ||
1151 | out: | 1353 | out: |
1152 | put_task_struct(next_task); | 1354 | put_task_struct(next_task); |
1153 | 1355 | ||
1154 | return ret; | 1356 | return 1; |
1155 | } | 1357 | } |
1156 | 1358 | ||
1157 | /* | ||
1158 | * TODO: Currently we just use the second highest prio task on | ||
1159 | * the queue, and stop when it can't migrate (or there's | ||
1160 | * no more RT tasks). There may be a case where a lower | ||
1161 | * priority RT task has a different affinity than the | ||
1162 | * higher RT task. In this case the lower RT task could | ||
1163 | * possibly be able to migrate where as the higher priority | ||
1164 | * RT task could not. We currently ignore this issue. | ||
1165 | * Enhancements are welcome! | ||
1166 | */ | ||
1167 | static void push_rt_tasks(struct rq *rq) | 1359 | static void push_rt_tasks(struct rq *rq) |
1168 | { | 1360 | { |
1169 | /* push_rt_task will return true if it moved an RT */ | 1361 | /* push_rt_task will return true if it moved an RT */ |
@@ -1174,33 +1366,35 @@ static void push_rt_tasks(struct rq *rq) | |||
1174 | static int pull_rt_task(struct rq *this_rq) | 1366 | static int pull_rt_task(struct rq *this_rq) |
1175 | { | 1367 | { |
1176 | int this_cpu = this_rq->cpu, ret = 0, cpu; | 1368 | int this_cpu = this_rq->cpu, ret = 0, cpu; |
1177 | struct task_struct *p, *next; | 1369 | struct task_struct *p; |
1178 | struct rq *src_rq; | 1370 | struct rq *src_rq; |
1179 | 1371 | ||
1180 | if (likely(!rt_overloaded(this_rq))) | 1372 | if (likely(!rt_overloaded(this_rq))) |
1181 | return 0; | 1373 | return 0; |
1182 | 1374 | ||
1183 | next = pick_next_task_rt(this_rq); | ||
1184 | |||
1185 | for_each_cpu(cpu, this_rq->rd->rto_mask) { | 1375 | for_each_cpu(cpu, this_rq->rd->rto_mask) { |
1186 | if (this_cpu == cpu) | 1376 | if (this_cpu == cpu) |
1187 | continue; | 1377 | continue; |
1188 | 1378 | ||
1189 | src_rq = cpu_rq(cpu); | 1379 | src_rq = cpu_rq(cpu); |
1380 | |||
1381 | /* | ||
1382 | * Don't bother taking the src_rq->lock if the next highest | ||
1383 | * task is known to be lower-priority than our current task. | ||
1384 | * This may look racy, but if this value is about to go | ||
1385 | * logically higher, the src_rq will push this task away. | ||
1386 | * And if its going logically lower, we do not care | ||
1387 | */ | ||
1388 | if (src_rq->rt.highest_prio.next >= | ||
1389 | this_rq->rt.highest_prio.curr) | ||
1390 | continue; | ||
1391 | |||
1190 | /* | 1392 | /* |
1191 | * We can potentially drop this_rq's lock in | 1393 | * We can potentially drop this_rq's lock in |
1192 | * double_lock_balance, and another CPU could | 1394 | * double_lock_balance, and another CPU could |
1193 | * steal our next task - hence we must cause | 1395 | * alter this_rq |
1194 | * the caller to recalculate the next task | ||
1195 | * in that case: | ||
1196 | */ | 1396 | */ |
1197 | if (double_lock_balance(this_rq, src_rq)) { | 1397 | double_lock_balance(this_rq, src_rq); |
1198 | struct task_struct *old_next = next; | ||
1199 | |||
1200 | next = pick_next_task_rt(this_rq); | ||
1201 | if (next != old_next) | ||
1202 | ret = 1; | ||
1203 | } | ||
1204 | 1398 | ||
1205 | /* | 1399 | /* |
1206 | * Are there still pullable RT tasks? | 1400 | * Are there still pullable RT tasks? |
@@ -1214,7 +1408,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1214 | * Do we have an RT task that preempts | 1408 | * Do we have an RT task that preempts |
1215 | * the to-be-scheduled task? | 1409 | * the to-be-scheduled task? |
1216 | */ | 1410 | */ |
1217 | if (p && (!next || (p->prio < next->prio))) { | 1411 | if (p && (p->prio < this_rq->rt.highest_prio.curr)) { |
1218 | WARN_ON(p == src_rq->curr); | 1412 | WARN_ON(p == src_rq->curr); |
1219 | WARN_ON(!p->se.on_rq); | 1413 | WARN_ON(!p->se.on_rq); |
1220 | 1414 | ||
@@ -1224,12 +1418,9 @@ static int pull_rt_task(struct rq *this_rq) | |||
1224 | * This is just that p is wakeing up and hasn't | 1418 | * This is just that p is wakeing up and hasn't |
1225 | * had a chance to schedule. We only pull | 1419 | * had a chance to schedule. We only pull |
1226 | * p if it is lower in priority than the | 1420 | * p if it is lower in priority than the |
1227 | * current task on the run queue or | 1421 | * current task on the run queue |
1228 | * this_rq next task is lower in prio than | ||
1229 | * the current task on that rq. | ||
1230 | */ | 1422 | */ |
1231 | if (p->prio < src_rq->curr->prio || | 1423 | if (p->prio < src_rq->curr->prio) |
1232 | (next && next->prio < src_rq->curr->prio)) | ||
1233 | goto skip; | 1424 | goto skip; |
1234 | 1425 | ||
1235 | ret = 1; | 1426 | ret = 1; |
@@ -1242,13 +1433,7 @@ static int pull_rt_task(struct rq *this_rq) | |||
1242 | * case there's an even higher prio task | 1433 | * case there's an even higher prio task |
1243 | * in another runqueue. (low likelyhood | 1434 | * in another runqueue. (low likelyhood |
1244 | * but possible) | 1435 | * but possible) |
1245 | * | ||
1246 | * Update next so that we won't pick a task | ||
1247 | * on another cpu with a priority lower (or equal) | ||
1248 | * than the one we just picked. | ||
1249 | */ | 1436 | */ |
1250 | next = p; | ||
1251 | |||
1252 | } | 1437 | } |
1253 | skip: | 1438 | skip: |
1254 | double_unlock_balance(this_rq, src_rq); | 1439 | double_unlock_balance(this_rq, src_rq); |
@@ -1260,24 +1445,27 @@ static int pull_rt_task(struct rq *this_rq) | |||
1260 | static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) | 1445 | static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) |
1261 | { | 1446 | { |
1262 | /* Try to pull RT tasks here if we lower this rq's prio */ | 1447 | /* Try to pull RT tasks here if we lower this rq's prio */ |
1263 | if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) | 1448 | if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio) |
1264 | pull_rt_task(rq); | 1449 | pull_rt_task(rq); |
1265 | } | 1450 | } |
1266 | 1451 | ||
1452 | /* | ||
1453 | * assumes rq->lock is held | ||
1454 | */ | ||
1455 | static int needs_post_schedule_rt(struct rq *rq) | ||
1456 | { | ||
1457 | return has_pushable_tasks(rq); | ||
1458 | } | ||
1459 | |||
1267 | static void post_schedule_rt(struct rq *rq) | 1460 | static void post_schedule_rt(struct rq *rq) |
1268 | { | 1461 | { |
1269 | /* | 1462 | /* |
1270 | * If we have more than one rt_task queued, then | 1463 | * This is only called if needs_post_schedule_rt() indicates that |
1271 | * see if we can push the other rt_tasks off to other CPUS. | 1464 | * we need to push tasks away |
1272 | * Note we may release the rq lock, and since | ||
1273 | * the lock was owned by prev, we need to release it | ||
1274 | * first via finish_lock_switch and then reaquire it here. | ||
1275 | */ | 1465 | */ |
1276 | if (unlikely(rq->rt.overloaded)) { | 1466 | spin_lock_irq(&rq->lock); |
1277 | spin_lock_irq(&rq->lock); | 1467 | push_rt_tasks(rq); |
1278 | push_rt_tasks(rq); | 1468 | spin_unlock_irq(&rq->lock); |
1279 | spin_unlock_irq(&rq->lock); | ||
1280 | } | ||
1281 | } | 1469 | } |
1282 | 1470 | ||
1283 | /* | 1471 | /* |
@@ -1288,7 +1476,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p) | |||
1288 | { | 1476 | { |
1289 | if (!task_running(rq, p) && | 1477 | if (!task_running(rq, p) && |
1290 | !test_tsk_need_resched(rq->curr) && | 1478 | !test_tsk_need_resched(rq->curr) && |
1291 | rq->rt.overloaded) | 1479 | has_pushable_tasks(rq) && |
1480 | p->rt.nr_cpus_allowed > 1) | ||
1292 | push_rt_tasks(rq); | 1481 | push_rt_tasks(rq); |
1293 | } | 1482 | } |
1294 | 1483 | ||
@@ -1324,6 +1513,24 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1324 | if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { | 1513 | if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { |
1325 | struct rq *rq = task_rq(p); | 1514 | struct rq *rq = task_rq(p); |
1326 | 1515 | ||
1516 | if (!task_current(rq, p)) { | ||
1517 | /* | ||
1518 | * Make sure we dequeue this task from the pushable list | ||
1519 | * before going further. It will either remain off of | ||
1520 | * the list because we are no longer pushable, or it | ||
1521 | * will be requeued. | ||
1522 | */ | ||
1523 | if (p->rt.nr_cpus_allowed > 1) | ||
1524 | dequeue_pushable_task(rq, p); | ||
1525 | |||
1526 | /* | ||
1527 | * Requeue if our weight is changing and still > 1 | ||
1528 | */ | ||
1529 | if (weight > 1) | ||
1530 | enqueue_pushable_task(rq, p); | ||
1531 | |||
1532 | } | ||
1533 | |||
1327 | if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { | 1534 | if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { |
1328 | rq->rt.rt_nr_migratory++; | 1535 | rq->rt.rt_nr_migratory++; |
1329 | } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { | 1536 | } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { |
@@ -1331,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, | |||
1331 | rq->rt.rt_nr_migratory--; | 1538 | rq->rt.rt_nr_migratory--; |
1332 | } | 1539 | } |
1333 | 1540 | ||
1334 | update_rt_migration(rq); | 1541 | update_rt_migration(&rq->rt); |
1335 | } | 1542 | } |
1336 | 1543 | ||
1337 | cpumask_copy(&p->cpus_allowed, new_mask); | 1544 | cpumask_copy(&p->cpus_allowed, new_mask); |
@@ -1346,7 +1553,7 @@ static void rq_online_rt(struct rq *rq) | |||
1346 | 1553 | ||
1347 | __enable_runtime(rq); | 1554 | __enable_runtime(rq); |
1348 | 1555 | ||
1349 | cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); | 1556 | cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); |
1350 | } | 1557 | } |
1351 | 1558 | ||
1352 | /* Assumes rq->lock is held */ | 1559 | /* Assumes rq->lock is held */ |
@@ -1438,7 +1645,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p, | |||
1438 | * can release the rq lock and p could migrate. | 1645 | * can release the rq lock and p could migrate. |
1439 | * Only reschedule if p is still on the same runqueue. | 1646 | * Only reschedule if p is still on the same runqueue. |
1440 | */ | 1647 | */ |
1441 | if (p->prio > rq->rt.highest_prio && rq->curr == p) | 1648 | if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) |
1442 | resched_task(p); | 1649 | resched_task(p); |
1443 | #else | 1650 | #else |
1444 | /* For UP simply resched on drop of prio */ | 1651 | /* For UP simply resched on drop of prio */ |
@@ -1509,6 +1716,9 @@ static void set_curr_task_rt(struct rq *rq) | |||
1509 | struct task_struct *p = rq->curr; | 1716 | struct task_struct *p = rq->curr; |
1510 | 1717 | ||
1511 | p->se.exec_start = rq->clock; | 1718 | p->se.exec_start = rq->clock; |
1719 | |||
1720 | /* The running task is never eligible for pushing */ | ||
1721 | dequeue_pushable_task(rq, p); | ||
1512 | } | 1722 | } |
1513 | 1723 | ||
1514 | static const struct sched_class rt_sched_class = { | 1724 | static const struct sched_class rt_sched_class = { |
@@ -1531,6 +1741,7 @@ static const struct sched_class rt_sched_class = { | |||
1531 | .rq_online = rq_online_rt, | 1741 | .rq_online = rq_online_rt, |
1532 | .rq_offline = rq_offline_rt, | 1742 | .rq_offline = rq_offline_rt, |
1533 | .pre_schedule = pre_schedule_rt, | 1743 | .pre_schedule = pre_schedule_rt, |
1744 | .needs_post_schedule = needs_post_schedule_rt, | ||
1534 | .post_schedule = post_schedule_rt, | 1745 | .post_schedule = post_schedule_rt, |
1535 | .task_wake_up = task_wake_up_rt, | 1746 | .task_wake_up = task_wake_up_rt, |
1536 | .switched_from = switched_from_rt, | 1747 | .switched_from = switched_from_rt, |
diff --git a/kernel/user.c b/kernel/user.c index 477b6660f447..3551ac742395 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -72,6 +72,7 @@ static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) | |||
72 | static void uid_hash_remove(struct user_struct *up) | 72 | static void uid_hash_remove(struct user_struct *up) |
73 | { | 73 | { |
74 | hlist_del_init(&up->uidhash_node); | 74 | hlist_del_init(&up->uidhash_node); |
75 | put_user_ns(up->user_ns); | ||
75 | } | 76 | } |
76 | 77 | ||
77 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) | 78 | static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
@@ -334,7 +335,6 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
334 | atomic_inc(&up->__count); | 335 | atomic_inc(&up->__count); |
335 | spin_unlock_irqrestore(&uidhash_lock, flags); | 336 | spin_unlock_irqrestore(&uidhash_lock, flags); |
336 | 337 | ||
337 | put_user_ns(up->user_ns); | ||
338 | INIT_WORK(&up->work, remove_user_sysfs_dir); | 338 | INIT_WORK(&up->work, remove_user_sysfs_dir); |
339 | schedule_work(&up->work); | 339 | schedule_work(&up->work); |
340 | } | 340 | } |
@@ -357,7 +357,6 @@ static void free_user(struct user_struct *up, unsigned long flags) | |||
357 | sched_destroy_user(up); | 357 | sched_destroy_user(up); |
358 | key_put(up->uid_keyring); | 358 | key_put(up->uid_keyring); |
359 | key_put(up->session_keyring); | 359 | key_put(up->session_keyring); |
360 | put_user_ns(up->user_ns); | ||
361 | kmem_cache_free(uid_cachep, up); | 360 | kmem_cache_free(uid_cachep, up); |
362 | } | 361 | } |
363 | 362 | ||