aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scheduler/00-INDEX2
-rw-r--r--Documentation/scheduler/sched-coding.txt126
-rw-r--r--arch/x86/kernel/cpu/intel.c8
-rw-r--r--arch/x86/kernel/tsc.c9
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/latencytop.h10
-rw-r--r--include/linux/plist.h9
-rw-r--r--include/linux/sched.h17
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/latencytop.c83
-rw-r--r--kernel/sched.c982
-rw-r--r--kernel/sched_clock.c30
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/sched_fair.c59
-rw-r--r--kernel/sched_features.h3
-rw-r--r--kernel/sched_rt.c537
-rw-r--r--kernel/sched_stats.h7
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile4
-rw-r--r--lib/kernel_lock.c2
20 files changed, 1262 insertions, 642 deletions
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index aabcc3a089b..3c00c9c3219 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -2,8 +2,6 @@
2 - this file. 2 - this file.
3sched-arch.txt 3sched-arch.txt
4 - CPU Scheduler implementation hints for architecture specific code. 4 - CPU Scheduler implementation hints for architecture specific code.
5sched-coding.txt
6 - reference for various scheduler-related methods in the O(1) scheduler.
7sched-design-CFS.txt 5sched-design-CFS.txt
8 - goals, design and implementation of the Complete Fair Scheduler. 6 - goals, design and implementation of the Complete Fair Scheduler.
9sched-domains.txt 7sched-domains.txt
diff --git a/Documentation/scheduler/sched-coding.txt b/Documentation/scheduler/sched-coding.txt
deleted file mode 100644
index cbd8db752ac..00000000000
--- a/Documentation/scheduler/sched-coding.txt
+++ /dev/null
@@ -1,126 +0,0 @@
1 Reference for various scheduler-related methods in the O(1) scheduler
2 Robert Love <rml@tech9.net>, MontaVista Software
3
4
5Note most of these methods are local to kernel/sched.c - this is by design.
6The scheduler is meant to be self-contained and abstracted away. This document
7is primarily for understanding the scheduler, not interfacing to it. Some of
8the discussed interfaces, however, are general process/scheduling methods.
9They are typically defined in include/linux/sched.h.
10
11
12Main Scheduling Methods
13-----------------------
14
15void load_balance(runqueue_t *this_rq, int idle)
16 Attempts to pull tasks from one cpu to another to balance cpu usage,
17 if needed. This method is called explicitly if the runqueues are
18 imbalanced or periodically by the timer tick. Prior to calling,
19 the current runqueue must be locked and interrupts disabled.
20
21void schedule()
22 The main scheduling function. Upon return, the highest priority
23 process will be active.
24
25
26Locking
27-------
28
29Each runqueue has its own lock, rq->lock. When multiple runqueues need
30to be locked, lock acquires must be ordered by ascending &runqueue value.
31
32A specific runqueue is locked via
33
34 task_rq_lock(task_t pid, unsigned long *flags)
35
36which disables preemption, disables interrupts, and locks the runqueue pid is
37running on. Likewise,
38
39 task_rq_unlock(task_t pid, unsigned long *flags)
40
41unlocks the runqueue pid is running on, restores interrupts to their previous
42state, and reenables preemption.
43
44The routines
45
46 double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
47
48and
49
50 double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
51
52safely lock and unlock, respectively, the two specified runqueues. They do
53not, however, disable and restore interrupts. Users are required to do so
54manually before and after calls.
55
56
57Values
58------
59
60MAX_PRIO
61 The maximum priority of the system, stored in the task as task->prio.
62 Lower priorities are higher. Normal (non-RT) priorities range from
63 MAX_RT_PRIO to (MAX_PRIO - 1).
64MAX_RT_PRIO
65 The maximum real-time priority of the system. Valid RT priorities
66 range from 0 to (MAX_RT_PRIO - 1).
67MAX_USER_RT_PRIO
68 The maximum real-time priority that is exported to user-space. Should
69 always be equal to or less than MAX_RT_PRIO. Setting it less allows
70 kernel threads to have higher priorities than any user-space task.
71MIN_TIMESLICE
72MAX_TIMESLICE
73 Respectively, the minimum and maximum timeslices (quanta) of a process.
74
75Data
76----
77
78struct runqueue
79 The main per-CPU runqueue data structure.
80struct task_struct
81 The main per-process data structure.
82
83
84General Methods
85---------------
86
87cpu_rq(cpu)
88 Returns the runqueue of the specified cpu.
89this_rq()
90 Returns the runqueue of the current cpu.
91task_rq(pid)
92 Returns the runqueue which holds the specified pid.
93cpu_curr(cpu)
94 Returns the task currently running on the given cpu.
95rt_task(pid)
96 Returns true if pid is real-time, false if not.
97
98
99Process Control Methods
100-----------------------
101
102void set_user_nice(task_t *p, long nice)
103 Sets the "nice" value of task p to the given value.
104int setscheduler(pid_t pid, int policy, struct sched_param *param)
105 Sets the scheduling policy and parameters for the given pid.
106int set_cpus_allowed(task_t *p, unsigned long new_mask)
107 Sets a given task's CPU affinity and migrates it to a proper cpu.
108 Callers must have a valid reference to the task and assure the
109 task not exit prematurely. No locks can be held during the call.
110set_task_state(tsk, state_value)
111 Sets the given task's state to the given value.
112set_current_state(state_value)
113 Sets the current task's state to the given value.
114void set_tsk_need_resched(struct task_struct *tsk)
115 Sets need_resched in the given task.
116void clear_tsk_need_resched(struct task_struct *tsk)
117 Clears need_resched in the given task.
118void set_need_resched()
119 Sets need_resched in the current task.
120void clear_need_resched()
121 Clears need_resched in the current task.
122int need_resched()
123 Returns true if need_resched is set in the current task, false
124 otherwise.
125yield()
126 Place the current process at the end of the runqueue and call schedule.
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 24ff26a38ad..5fff00c70de 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -4,6 +4,7 @@
4#include <linux/string.h> 4#include <linux/string.h>
5#include <linux/bitops.h> 5#include <linux/bitops.h>
6#include <linux/smp.h> 6#include <linux/smp.h>
7#include <linux/sched.h>
7#include <linux/thread_info.h> 8#include <linux/thread_info.h>
8#include <linux/module.h> 9#include <linux/module.h>
9 10
@@ -56,11 +57,16 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
56 57
57 /* 58 /*
58 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate 59 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
59 * with P/T states and does not stop in deep C-states 60 * with P/T states and does not stop in deep C-states.
61 *
62 * It is also reliable across cores and sockets. (but not across
63 * cabinets - we turn it off in that case explicitly.)
60 */ 64 */
61 if (c->x86_power & (1 << 8)) { 65 if (c->x86_power & (1 << 8)) {
62 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 66 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
63 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); 67 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
68 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
69 sched_clock_stable = 1;
64 } 70 }
65 71
66} 72}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b8e7aaf7ef7..08afa1579e6 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -17,20 +17,21 @@
17#include <asm/delay.h> 17#include <asm/delay.h>
18#include <asm/hypervisor.h> 18#include <asm/hypervisor.h>
19 19
20unsigned int cpu_khz; /* TSC clocks / usec, not used here */ 20unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
21EXPORT_SYMBOL(cpu_khz); 21EXPORT_SYMBOL(cpu_khz);
22unsigned int tsc_khz; 22
23unsigned int __read_mostly tsc_khz;
23EXPORT_SYMBOL(tsc_khz); 24EXPORT_SYMBOL(tsc_khz);
24 25
25/* 26/*
26 * TSC can be unstable due to cpufreq or due to unsynced TSCs 27 * TSC can be unstable due to cpufreq or due to unsynced TSCs
27 */ 28 */
28static int tsc_unstable; 29static int __read_mostly tsc_unstable;
29 30
30/* native_sched_clock() is called before tsc_init(), so 31/* native_sched_clock() is called before tsc_init(), so
31 we must start with the TSC soft disabled to prevent 32 we must start with the TSC soft disabled to prevent
32 erroneous rdtsc usage on !cpu_has_tsc processors */ 33 erroneous rdtsc usage on !cpu_has_tsc processors */
33static int tsc_disabled = -1; 34static int __read_mostly tsc_disabled = -1;
34 35
35static int tsc_clocksource_reliable; 36static int tsc_clocksource_reliable;
36/* 37/*
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e752d973fa2..af1de95e711 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -147,6 +147,7 @@ extern struct cred init_cred;
147 .nr_cpus_allowed = NR_CPUS, \ 147 .nr_cpus_allowed = NR_CPUS, \
148 }, \ 148 }, \
149 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 149 .tasks = LIST_HEAD_INIT(tsk.tasks), \
150 .pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO), \
150 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ 151 .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
151 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ 152 .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
152 .real_parent = &tsk, \ 153 .real_parent = &tsk, \
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h
index 901c2d6377a..b0e99898527 100644
--- a/include/linux/latencytop.h
+++ b/include/linux/latencytop.h
@@ -9,6 +9,7 @@
9#ifndef _INCLUDE_GUARD_LATENCYTOP_H_ 9#ifndef _INCLUDE_GUARD_LATENCYTOP_H_
10#define _INCLUDE_GUARD_LATENCYTOP_H_ 10#define _INCLUDE_GUARD_LATENCYTOP_H_
11 11
12#include <linux/compiler.h>
12#ifdef CONFIG_LATENCYTOP 13#ifdef CONFIG_LATENCYTOP
13 14
14#define LT_SAVECOUNT 32 15#define LT_SAVECOUNT 32
@@ -24,7 +25,14 @@ struct latency_record {
24 25
25struct task_struct; 26struct task_struct;
26 27
27void account_scheduler_latency(struct task_struct *task, int usecs, int inter); 28extern int latencytop_enabled;
29void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
30static inline void
31account_scheduler_latency(struct task_struct *task, int usecs, int inter)
32{
33 if (unlikely(latencytop_enabled))
34 __account_scheduler_latency(task, usecs, inter);
35}
28 36
29void clear_all_latency_tracing(struct task_struct *p); 37void clear_all_latency_tracing(struct task_struct *p);
30 38
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 85de2f05587..45926d77d6a 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -96,6 +96,10 @@ struct plist_node {
96# define PLIST_HEAD_LOCK_INIT(_lock) 96# define PLIST_HEAD_LOCK_INIT(_lock)
97#endif 97#endif
98 98
99#define _PLIST_HEAD_INIT(head) \
100 .prio_list = LIST_HEAD_INIT((head).prio_list), \
101 .node_list = LIST_HEAD_INIT((head).node_list)
102
99/** 103/**
100 * PLIST_HEAD_INIT - static struct plist_head initializer 104 * PLIST_HEAD_INIT - static struct plist_head initializer
101 * @head: struct plist_head variable name 105 * @head: struct plist_head variable name
@@ -103,8 +107,7 @@ struct plist_node {
103 */ 107 */
104#define PLIST_HEAD_INIT(head, _lock) \ 108#define PLIST_HEAD_INIT(head, _lock) \
105{ \ 109{ \
106 .prio_list = LIST_HEAD_INIT((head).prio_list), \ 110 _PLIST_HEAD_INIT(head), \
107 .node_list = LIST_HEAD_INIT((head).node_list), \
108 PLIST_HEAD_LOCK_INIT(&(_lock)) \ 111 PLIST_HEAD_LOCK_INIT(&(_lock)) \
109} 112}
110 113
@@ -116,7 +119,7 @@ struct plist_node {
116#define PLIST_NODE_INIT(node, __prio) \ 119#define PLIST_NODE_INIT(node, __prio) \
117{ \ 120{ \
118 .prio = (__prio), \ 121 .prio = (__prio), \
119 .plist = PLIST_HEAD_INIT((node).plist, NULL), \ 122 .plist = { _PLIST_HEAD_INIT((node).plist) }, \
120} 123}
121 124
122/** 125/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c36f62e754..ff904b0606d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -998,6 +998,7 @@ struct sched_class {
998 struct rq *busiest, struct sched_domain *sd, 998 struct rq *busiest, struct sched_domain *sd,
999 enum cpu_idle_type idle); 999 enum cpu_idle_type idle);
1000 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); 1000 void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
1001 int (*needs_post_schedule) (struct rq *this_rq);
1001 void (*post_schedule) (struct rq *this_rq); 1002 void (*post_schedule) (struct rq *this_rq);
1002 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); 1003 void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1003 1004
@@ -1052,6 +1053,10 @@ struct sched_entity {
1052 u64 last_wakeup; 1053 u64 last_wakeup;
1053 u64 avg_overlap; 1054 u64 avg_overlap;
1054 1055
1056 u64 start_runtime;
1057 u64 avg_wakeup;
1058 u64 nr_migrations;
1059
1055#ifdef CONFIG_SCHEDSTATS 1060#ifdef CONFIG_SCHEDSTATS
1056 u64 wait_start; 1061 u64 wait_start;
1057 u64 wait_max; 1062 u64 wait_max;
@@ -1067,7 +1072,6 @@ struct sched_entity {
1067 u64 exec_max; 1072 u64 exec_max;
1068 u64 slice_max; 1073 u64 slice_max;
1069 1074
1070 u64 nr_migrations;
1071 u64 nr_migrations_cold; 1075 u64 nr_migrations_cold;
1072 u64 nr_failed_migrations_affine; 1076 u64 nr_failed_migrations_affine;
1073 u64 nr_failed_migrations_running; 1077 u64 nr_failed_migrations_running;
@@ -1164,6 +1168,7 @@ struct task_struct {
1164#endif 1168#endif
1165 1169
1166 struct list_head tasks; 1170 struct list_head tasks;
1171 struct plist_node pushable_tasks;
1167 1172
1168 struct mm_struct *mm, *active_mm; 1173 struct mm_struct *mm, *active_mm;
1169 1174
@@ -1675,6 +1680,16 @@ static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
1675 return set_cpus_allowed_ptr(p, &new_mask); 1680 return set_cpus_allowed_ptr(p, &new_mask);
1676} 1681}
1677 1682
1683/*
1684 * Architectures can set this to 1 if they have specified
1685 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
1686 * but then during bootup it turns out that sched_clock()
1687 * is reliable after all:
1688 */
1689#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
1690extern int sched_clock_stable;
1691#endif
1692
1678extern unsigned long long sched_clock(void); 1693extern unsigned long long sched_clock(void);
1679 1694
1680extern void sched_clock_init(void); 1695extern void sched_clock_init(void);
diff --git a/init/Kconfig b/init/Kconfig
index 6a5c5fed66c..68699137b14 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -966,7 +966,6 @@ config SLABINFO
966 966
967config RT_MUTEXES 967config RT_MUTEXES
968 boolean 968 boolean
969 select PLIST
970 969
971config BASE_SMALL 970config BASE_SMALL
972 int 971 int
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 449db466bdb..ca07c5c0c91 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -9,6 +9,44 @@
9 * as published by the Free Software Foundation; version 2 9 * as published by the Free Software Foundation; version 2
10 * of the License. 10 * of the License.
11 */ 11 */
12
13/*
14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
15 * used by the "latencytop" userspace tool. The latency that is tracked is not
16 * the 'traditional' interrupt latency (which is primarily caused by something
17 * else consuming CPU), but instead, it is the latency an application encounters
18 * because the kernel sleeps on its behalf for various reasons.
19 *
20 * This code tracks 2 levels of statistics:
21 * 1) System level latency
22 * 2) Per process latency
23 *
24 * The latency is stored in fixed sized data structures in an accumulated form;
25 * if the "same" latency cause is hit twice, this will be tracked as one entry
26 * in the data structure. Both the count, total accumulated latency and maximum
27 * latency are tracked in this data structure. When the fixed size structure is
28 * full, no new causes are tracked until the buffer is flushed by writing to
29 * the /proc file; the userspace tool does this on a regular basis.
30 *
31 * A latency cause is identified by a stringified backtrace at the point that
32 * the scheduler gets invoked. The userland tool will use this string to
33 * identify the cause of the latency in human readable form.
34 *
35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
36 * These files look like this:
37 *
38 * Latency Top version : v0.1
39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
40 * | | | |
41 * | | | +----> the stringified backtrace
42 * | | +---------> The maximum latency for this entry in microseconds
43 * | +--------------> The accumulated latency for this entry (microseconds)
44 * +-------------------> The number of times this entry is hit
45 *
46 * (note: the average latency is the accumulated latency divided by the number
47 * of times)
48 */
49
12#include <linux/latencytop.h> 50#include <linux/latencytop.h>
13#include <linux/kallsyms.h> 51#include <linux/kallsyms.h>
14#include <linux/seq_file.h> 52#include <linux/seq_file.h>
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
72 firstnonnull = i; 110 firstnonnull = i;
73 continue; 111 continue;
74 } 112 }
75 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
76 unsigned long record = lat->backtrace[q]; 114 unsigned long record = lat->backtrace[q];
77 115
78 if (latency_record[i].backtrace[q] != record) { 116 if (latency_record[i].backtrace[q] != record) {
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
101 memcpy(&latency_record[i], lat, sizeof(struct latency_record)); 139 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
102} 140}
103 141
104static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) 142/*
143 * Iterator to store a backtrace into a latency record entry
144 */
145static inline void store_stacktrace(struct task_struct *tsk,
146 struct latency_record *lat)
105{ 147{
106 struct stack_trace trace; 148 struct stack_trace trace;
107 149
108 memset(&trace, 0, sizeof(trace)); 150 memset(&trace, 0, sizeof(trace));
109 trace.max_entries = LT_BACKTRACEDEPTH; 151 trace.max_entries = LT_BACKTRACEDEPTH;
110 trace.entries = &lat->backtrace[0]; 152 trace.entries = &lat->backtrace[0];
111 trace.skip = 0;
112 save_stack_trace_tsk(tsk, &trace); 153 save_stack_trace_tsk(tsk, &trace);
113} 154}
114 155
156/**
157 * __account_scheduler_latency - record an occured latency
158 * @tsk - the task struct of the task hitting the latency
159 * @usecs - the duration of the latency in microseconds
160 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
161 *
162 * This function is the main entry point for recording latency entries
163 * as called by the scheduler.
164 *
165 * This function has a few special cases to deal with normal 'non-latency'
166 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
167 * since this usually is caused by waiting for events via select() and co.
168 *
169 * Negative latencies (caused by time going backwards) are also explicitly
170 * skipped.
171 */
115void __sched 172void __sched
116account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) 173__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
117{ 174{
118 unsigned long flags; 175 unsigned long flags;
119 int i, q; 176 int i, q;
120 struct latency_record lat; 177 struct latency_record lat;
121 178
122 if (!latencytop_enabled)
123 return;
124
125 /* Long interruptible waits are generally user requested... */ 179 /* Long interruptible waits are generally user requested... */
126 if (inter && usecs > 5000) 180 if (inter && usecs > 5000)
127 return; 181 return;
128 182
183 /* Negative sleeps are time going backwards */
184 /* Zero-time sleeps are non-interesting */
185 if (usecs <= 0)
186 return;
187
129 memset(&lat, 0, sizeof(lat)); 188 memset(&lat, 0, sizeof(lat));
130 lat.count = 1; 189 lat.count = 1;
131 lat.time = usecs; 190 lat.time = usecs;
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
143 if (tsk->latency_record_count >= LT_SAVECOUNT) 202 if (tsk->latency_record_count >= LT_SAVECOUNT)
144 goto out_unlock; 203 goto out_unlock;
145 204
146 for (i = 0; i < LT_SAVECOUNT ; i++) { 205 for (i = 0; i < LT_SAVECOUNT; i++) {
147 struct latency_record *mylat; 206 struct latency_record *mylat;
148 int same = 1; 207 int same = 1;
149 208
150 mylat = &tsk->latency_record[i]; 209 mylat = &tsk->latency_record[i];
151 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 210 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
152 unsigned long record = lat.backtrace[q]; 211 unsigned long record = lat.backtrace[q];
153 212
154 if (mylat->backtrace[q] != record) { 213 if (mylat->backtrace[q] != record) {
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
186 for (i = 0; i < MAXLR; i++) { 245 for (i = 0; i < MAXLR; i++) {
187 if (latency_record[i].backtrace[0]) { 246 if (latency_record[i].backtrace[0]) {
188 int q; 247 int q;
189 seq_printf(m, "%i %li %li ", 248 seq_printf(m, "%i %lu %lu ",
190 latency_record[i].count, 249 latency_record[i].count,
191 latency_record[i].time, 250 latency_record[i].time,
192 latency_record[i].max); 251 latency_record[i].max);
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
223 return single_open(filp, lstats_show, NULL); 282 return single_open(filp, lstats_show, NULL);
224} 283}
225 284
226static struct file_operations lstats_fops = { 285static const struct file_operations lstats_fops = {
227 .open = lstats_open, 286 .open = lstats_open,
228 .read = seq_read, 287 .read = seq_read,
229 .write = lstats_write, 288 .write = lstats_write,
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
236 proc_create("latency_stats", 0644, NULL, &lstats_fops); 295 proc_create("latency_stats", 0644, NULL, &lstats_fops);
237 return 0; 296 return 0;
238} 297}
239__initcall(init_lstats_procfs); 298device_initcall(init_lstats_procfs);
diff --git a/kernel/sched.c b/kernel/sched.c
index 8e2558c2ba6..9f8506d68fd 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -331,6 +331,13 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
331 */ 331 */
332static DEFINE_SPINLOCK(task_group_lock); 332static DEFINE_SPINLOCK(task_group_lock);
333 333
334#ifdef CONFIG_SMP
335static int root_task_group_empty(void)
336{
337 return list_empty(&root_task_group.children);
338}
339#endif
340
334#ifdef CONFIG_FAIR_GROUP_SCHED 341#ifdef CONFIG_FAIR_GROUP_SCHED
335#ifdef CONFIG_USER_SCHED 342#ifdef CONFIG_USER_SCHED
336# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 343# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
@@ -391,6 +398,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
391 398
392#else 399#else
393 400
401#ifdef CONFIG_SMP
402static int root_task_group_empty(void)
403{
404 return 1;
405}
406#endif
407
394static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 408static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
395static inline struct task_group *task_group(struct task_struct *p) 409static inline struct task_group *task_group(struct task_struct *p)
396{ 410{
@@ -467,11 +481,17 @@ struct rt_rq {
467 struct rt_prio_array active; 481 struct rt_prio_array active;
468 unsigned long rt_nr_running; 482 unsigned long rt_nr_running;
469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 483#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 int highest_prio; /* highest queued rt task prio */ 484 struct {
485 int curr; /* highest queued rt task prio */
486#ifdef CONFIG_SMP
487 int next; /* next highest */
488#endif
489 } highest_prio;
471#endif 490#endif
472#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
473 unsigned long rt_nr_migratory; 492 unsigned long rt_nr_migratory;
474 int overloaded; 493 int overloaded;
494 struct plist_head pushable_tasks;
475#endif 495#endif
476 int rt_throttled; 496 int rt_throttled;
477 u64 rt_time; 497 u64 rt_time;
@@ -549,7 +569,6 @@ struct rq {
549 unsigned long nr_running; 569 unsigned long nr_running;
550 #define CPU_LOAD_IDX_MAX 5 570 #define CPU_LOAD_IDX_MAX 5
551 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 571 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
552 unsigned char idle_at_tick;
553#ifdef CONFIG_NO_HZ 572#ifdef CONFIG_NO_HZ
554 unsigned long last_tick_seen; 573 unsigned long last_tick_seen;
555 unsigned char in_nohz_recently; 574 unsigned char in_nohz_recently;
@@ -590,6 +609,7 @@ struct rq {
590 struct root_domain *rd; 609 struct root_domain *rd;
591 struct sched_domain *sd; 610 struct sched_domain *sd;
592 611
612 unsigned char idle_at_tick;
593 /* For active balancing */ 613 /* For active balancing */
594 int active_balance; 614 int active_balance;
595 int push_cpu; 615 int push_cpu;
@@ -618,9 +638,6 @@ struct rq {
618 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 638 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
619 639
620 /* sys_sched_yield() stats */ 640 /* sys_sched_yield() stats */
621 unsigned int yld_exp_empty;
622 unsigned int yld_act_empty;
623 unsigned int yld_both_empty;
624 unsigned int yld_count; 641 unsigned int yld_count;
625 642
626 /* schedule() stats */ 643 /* schedule() stats */
@@ -1183,10 +1200,10 @@ static void resched_task(struct task_struct *p)
1183 1200
1184 assert_spin_locked(&task_rq(p)->lock); 1201 assert_spin_locked(&task_rq(p)->lock);
1185 1202
1186 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) 1203 if (test_tsk_need_resched(p))
1187 return; 1204 return;
1188 1205
1189 set_tsk_thread_flag(p, TIF_NEED_RESCHED); 1206 set_tsk_need_resched(p);
1190 1207
1191 cpu = task_cpu(p); 1208 cpu = task_cpu(p);
1192 if (cpu == smp_processor_id()) 1209 if (cpu == smp_processor_id())
@@ -1242,7 +1259,7 @@ void wake_up_idle_cpu(int cpu)
1242 * lockless. The worst case is that the other CPU runs the 1259 * lockless. The worst case is that the other CPU runs the
1243 * idle task through an additional NOOP schedule() 1260 * idle task through an additional NOOP schedule()
1244 */ 1261 */
1245 set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); 1262 set_tsk_need_resched(rq->idle);
1246 1263
1247 /* NEED_RESCHED must be visible before we test polling */ 1264 /* NEED_RESCHED must be visible before we test polling */
1248 smp_mb(); 1265 smp_mb();
@@ -1610,21 +1627,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1610 1627
1611#endif 1628#endif
1612 1629
1630#ifdef CONFIG_PREEMPT
1631
1613/* 1632/*
1614 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1633 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1634 * way at the expense of forcing extra atomic operations in all
1635 * invocations. This assures that the double_lock is acquired using the
1636 * same underlying policy as the spinlock_t on this architecture, which
1637 * reduces latency compared to the unfair variant below. However, it
1638 * also adds more overhead and therefore may reduce throughput.
1615 */ 1639 */
1616static int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1640static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(this_rq->lock)
1642 __acquires(busiest->lock)
1643 __acquires(this_rq->lock)
1644{
1645 spin_unlock(&this_rq->lock);
1646 double_rq_lock(this_rq, busiest);
1647
1648 return 1;
1649}
1650
1651#else
1652/*
1653 * Unfair double_lock_balance: Optimizes throughput at the expense of
1654 * latency by eliminating extra atomic operations when the locks are
1655 * already in proper order on entry. This favors lower cpu-ids and will
1656 * grant the double lock to lower cpus over higher ids under contention,
1657 * regardless of entry order into the function.
1658 */
1659static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1617 __releases(this_rq->lock) 1660 __releases(this_rq->lock)
1618 __acquires(busiest->lock) 1661 __acquires(busiest->lock)
1619 __acquires(this_rq->lock) 1662 __acquires(this_rq->lock)
1620{ 1663{
1621 int ret = 0; 1664 int ret = 0;
1622 1665
1623 if (unlikely(!irqs_disabled())) {
1624 /* printk() doesn't work good under rq->lock */
1625 spin_unlock(&this_rq->lock);
1626 BUG_ON(1);
1627 }
1628 if (unlikely(!spin_trylock(&busiest->lock))) { 1666 if (unlikely(!spin_trylock(&busiest->lock))) {
1629 if (busiest < this_rq) { 1667 if (busiest < this_rq) {
1630 spin_unlock(&this_rq->lock); 1668 spin_unlock(&this_rq->lock);
@@ -1637,6 +1675,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1637 return ret; 1675 return ret;
1638} 1676}
1639 1677
1678#endif /* CONFIG_PREEMPT */
1679
1680/*
1681 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1682 */
1683static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1684{
1685 if (unlikely(!irqs_disabled())) {
1686 /* printk() doesn't work good under rq->lock */
1687 spin_unlock(&this_rq->lock);
1688 BUG_ON(1);
1689 }
1690
1691 return _double_lock_balance(this_rq, busiest);
1692}
1693
1640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1694static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock) 1695 __releases(busiest->lock)
1642{ 1696{
@@ -1705,6 +1759,9 @@ static void update_avg(u64 *avg, u64 sample)
1705 1759
1706static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1760static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1707{ 1761{
1762 if (wakeup)
1763 p->se.start_runtime = p->se.sum_exec_runtime;
1764
1708 sched_info_queued(p); 1765 sched_info_queued(p);
1709 p->sched_class->enqueue_task(rq, p, wakeup); 1766 p->sched_class->enqueue_task(rq, p, wakeup);
1710 p->se.on_rq = 1; 1767 p->se.on_rq = 1;
@@ -1712,10 +1769,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1712 1769
1713static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1770static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1714{ 1771{
1715 if (sleep && p->se.last_wakeup) { 1772 if (sleep) {
1716 update_avg(&p->se.avg_overlap, 1773 if (p->se.last_wakeup) {
1717 p->se.sum_exec_runtime - p->se.last_wakeup); 1774 update_avg(&p->se.avg_overlap,
1718 p->se.last_wakeup = 0; 1775 p->se.sum_exec_runtime - p->se.last_wakeup);
1776 p->se.last_wakeup = 0;
1777 } else {
1778 update_avg(&p->se.avg_wakeup,
1779 sysctl_sched_wakeup_granularity);
1780 }
1719 } 1781 }
1720 1782
1721 sched_info_dequeued(p); 1783 sched_info_dequeued(p);
@@ -2017,7 +2079,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2017 * it must be off the runqueue _entirely_, and not 2079 * it must be off the runqueue _entirely_, and not
2018 * preempted! 2080 * preempted!
2019 * 2081 *
2020 * So if it wa still runnable (but just not actively 2082 * So if it was still runnable (but just not actively
2021 * running right now), it's preempted, and we should 2083 * running right now), it's preempted, and we should
2022 * yield - it could be a while. 2084 * yield - it could be a while.
2023 */ 2085 */
@@ -2267,7 +2329,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2267 sync = 0; 2329 sync = 0;
2268 2330
2269#ifdef CONFIG_SMP 2331#ifdef CONFIG_SMP
2270 if (sched_feat(LB_WAKEUP_UPDATE)) { 2332 if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
2271 struct sched_domain *sd; 2333 struct sched_domain *sd;
2272 2334
2273 this_cpu = raw_smp_processor_id(); 2335 this_cpu = raw_smp_processor_id();
@@ -2345,6 +2407,22 @@ out_activate:
2345 activate_task(rq, p, 1); 2407 activate_task(rq, p, 1);
2346 success = 1; 2408 success = 1;
2347 2409
2410 /*
2411 * Only attribute actual wakeups done by this task.
2412 */
2413 if (!in_interrupt()) {
2414 struct sched_entity *se = &current->se;
2415 u64 sample = se->sum_exec_runtime;
2416
2417 if (se->last_wakeup)
2418 sample -= se->last_wakeup;
2419 else
2420 sample -= se->start_runtime;
2421 update_avg(&se->avg_wakeup, sample);
2422
2423 se->last_wakeup = se->sum_exec_runtime;
2424 }
2425
2348out_running: 2426out_running:
2349 trace_sched_wakeup(rq, p, success); 2427 trace_sched_wakeup(rq, p, success);
2350 check_preempt_curr(rq, p, sync); 2428 check_preempt_curr(rq, p, sync);
@@ -2355,8 +2433,6 @@ out_running:
2355 p->sched_class->task_wake_up(rq, p); 2433 p->sched_class->task_wake_up(rq, p);
2356#endif 2434#endif
2357out: 2435out:
2358 current->se.last_wakeup = current->se.sum_exec_runtime;
2359
2360 task_rq_unlock(rq, &flags); 2436 task_rq_unlock(rq, &flags);
2361 2437
2362 return success; 2438 return success;
@@ -2386,6 +2462,8 @@ static void __sched_fork(struct task_struct *p)
2386 p->se.prev_sum_exec_runtime = 0; 2462 p->se.prev_sum_exec_runtime = 0;
2387 p->se.last_wakeup = 0; 2463 p->se.last_wakeup = 0;
2388 p->se.avg_overlap = 0; 2464 p->se.avg_overlap = 0;
2465 p->se.start_runtime = 0;
2466 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2389 2467
2390#ifdef CONFIG_SCHEDSTATS 2468#ifdef CONFIG_SCHEDSTATS
2391 p->se.wait_start = 0; 2469 p->se.wait_start = 0;
@@ -2448,6 +2526,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2448 /* Want to start with kernel preemption disabled. */ 2526 /* Want to start with kernel preemption disabled. */
2449 task_thread_info(p)->preempt_count = 1; 2527 task_thread_info(p)->preempt_count = 1;
2450#endif 2528#endif
2529 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2530
2451 put_cpu(); 2531 put_cpu();
2452} 2532}
2453 2533
@@ -2491,7 +2571,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2491#ifdef CONFIG_PREEMPT_NOTIFIERS 2571#ifdef CONFIG_PREEMPT_NOTIFIERS
2492 2572
2493/** 2573/**
2494 * preempt_notifier_register - tell me when current is being being preempted & rescheduled 2574 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2495 * @notifier: notifier struct to register 2575 * @notifier: notifier struct to register
2496 */ 2576 */
2497void preempt_notifier_register(struct preempt_notifier *notifier) 2577void preempt_notifier_register(struct preempt_notifier *notifier)
@@ -2588,6 +2668,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2588{ 2668{
2589 struct mm_struct *mm = rq->prev_mm; 2669 struct mm_struct *mm = rq->prev_mm;
2590 long prev_state; 2670 long prev_state;
2671#ifdef CONFIG_SMP
2672 int post_schedule = 0;
2673
2674 if (current->sched_class->needs_post_schedule)
2675 post_schedule = current->sched_class->needs_post_schedule(rq);
2676#endif
2591 2677
2592 rq->prev_mm = NULL; 2678 rq->prev_mm = NULL;
2593 2679
@@ -2606,7 +2692,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2606 finish_arch_switch(prev); 2692 finish_arch_switch(prev);
2607 finish_lock_switch(rq, prev); 2693 finish_lock_switch(rq, prev);
2608#ifdef CONFIG_SMP 2694#ifdef CONFIG_SMP
2609 if (current->sched_class->post_schedule) 2695 if (post_schedule)
2610 current->sched_class->post_schedule(rq); 2696 current->sched_class->post_schedule(rq);
2611#endif 2697#endif
2612 2698
@@ -2913,6 +2999,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2913 struct sched_domain *sd, enum cpu_idle_type idle, 2999 struct sched_domain *sd, enum cpu_idle_type idle,
2914 int *all_pinned) 3000 int *all_pinned)
2915{ 3001{
3002 int tsk_cache_hot = 0;
2916 /* 3003 /*
2917 * We do not migrate tasks that are: 3004 * We do not migrate tasks that are:
2918 * 1) running (obviously), or 3005 * 1) running (obviously), or
@@ -2936,10 +3023,11 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2936 * 2) too many balance attempts have failed. 3023 * 2) too many balance attempts have failed.
2937 */ 3024 */
2938 3025
2939 if (!task_hot(p, rq->clock, sd) || 3026 tsk_cache_hot = task_hot(p, rq->clock, sd);
2940 sd->nr_balance_failed > sd->cache_nice_tries) { 3027 if (!tsk_cache_hot ||
3028 sd->nr_balance_failed > sd->cache_nice_tries) {
2941#ifdef CONFIG_SCHEDSTATS 3029#ifdef CONFIG_SCHEDSTATS
2942 if (task_hot(p, rq->clock, sd)) { 3030 if (tsk_cache_hot) {
2943 schedstat_inc(sd, lb_hot_gained[idle]); 3031 schedstat_inc(sd, lb_hot_gained[idle]);
2944 schedstat_inc(p, se.nr_forced_migrations); 3032 schedstat_inc(p, se.nr_forced_migrations);
2945 } 3033 }
@@ -2947,7 +3035,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2947 return 1; 3035 return 1;
2948 } 3036 }
2949 3037
2950 if (task_hot(p, rq->clock, sd)) { 3038 if (tsk_cache_hot) {
2951 schedstat_inc(p, se.nr_failed_migrations_hot); 3039 schedstat_inc(p, se.nr_failed_migrations_hot);
2952 return 0; 3040 return 0;
2953 } 3041 }
@@ -2987,6 +3075,16 @@ next:
2987 pulled++; 3075 pulled++;
2988 rem_load_move -= p->se.load.weight; 3076 rem_load_move -= p->se.load.weight;
2989 3077
3078#ifdef CONFIG_PREEMPT
3079 /*
3080 * NEWIDLE balancing is a source of latency, so preemptible kernels
3081 * will stop after the first task is pulled to minimize the critical
3082 * section.
3083 */
3084 if (idle == CPU_NEWLY_IDLE)
3085 goto out;
3086#endif
3087
2990 /* 3088 /*
2991 * We only want to steal up to the prescribed amount of weighted load. 3089 * We only want to steal up to the prescribed amount of weighted load.
2992 */ 3090 */
@@ -3033,9 +3131,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3033 sd, idle, all_pinned, &this_best_prio); 3131 sd, idle, all_pinned, &this_best_prio);
3034 class = class->next; 3132 class = class->next;
3035 3133
3134#ifdef CONFIG_PREEMPT
3135 /*
3136 * NEWIDLE balancing is a source of latency, so preemptible
3137 * kernels will stop after the first task is pulled to minimize
3138 * the critical section.
3139 */
3036 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 3140 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3037 break; 3141 break;
3038 3142#endif
3039 } while (class && max_load_move > total_load_moved); 3143 } while (class && max_load_move > total_load_moved);
3040 3144
3041 return total_load_moved > 0; 3145 return total_load_moved > 0;
@@ -3085,246 +3189,479 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3085 3189
3086 return 0; 3190 return 0;
3087} 3191}
3192/********** Helpers for find_busiest_group ************************/
3193/**
3194 * sd_lb_stats - Structure to store the statistics of a sched_domain
3195 * during load balancing.
3196 */
3197struct sd_lb_stats {
3198 struct sched_group *busiest; /* Busiest group in this sd */
3199 struct sched_group *this; /* Local group in this sd */
3200 unsigned long total_load; /* Total load of all groups in sd */
3201 unsigned long total_pwr; /* Total power of all groups in sd */
3202 unsigned long avg_load; /* Average load across all groups in sd */
3203
3204 /** Statistics of this group */
3205 unsigned long this_load;
3206 unsigned long this_load_per_task;
3207 unsigned long this_nr_running;
3208
3209 /* Statistics of the busiest group */
3210 unsigned long max_load;
3211 unsigned long busiest_load_per_task;
3212 unsigned long busiest_nr_running;
3213
3214 int group_imb; /* Is there imbalance in this sd */
3215#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3216 int power_savings_balance; /* Is powersave balance needed for this sd */
3217 struct sched_group *group_min; /* Least loaded group in sd */
3218 struct sched_group *group_leader; /* Group which relieves group_min */
3219 unsigned long min_load_per_task; /* load_per_task in group_min */
3220 unsigned long leader_nr_running; /* Nr running of group_leader */
3221 unsigned long min_nr_running; /* Nr running of group_min */
3222#endif
3223};
3088 3224
3089/* 3225/**
3090 * find_busiest_group finds and returns the busiest CPU group within the 3226 * sg_lb_stats - stats of a sched_group required for load_balancing
3091 * domain. It calculates and returns the amount of weighted load which 3227 */
3092 * should be moved to restore balance via the imbalance parameter. 3228struct sg_lb_stats {
3229 unsigned long avg_load; /*Avg load across the CPUs of the group */
3230 unsigned long group_load; /* Total load over the CPUs of the group */
3231 unsigned long sum_nr_running; /* Nr tasks running in the group */
3232 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3233 unsigned long group_capacity;
3234 int group_imb; /* Is there an imbalance in the group ? */
3235};
3236
3237/**
3238 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3239 * @group: The group whose first cpu is to be returned.
3093 */ 3240 */
3094static struct sched_group * 3241static inline unsigned int group_first_cpu(struct sched_group *group)
3095find_busiest_group(struct sched_domain *sd, int this_cpu,
3096 unsigned long *imbalance, enum cpu_idle_type idle,
3097 int *sd_idle, const struct cpumask *cpus, int *balance)
3098{ 3242{
3099 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 3243 return cpumask_first(sched_group_cpus(group));
3100 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 3244}
3101 unsigned long max_pull;
3102 unsigned long busiest_load_per_task, busiest_nr_running;
3103 unsigned long this_load_per_task, this_nr_running;
3104 int load_idx, group_imb = 0;
3105#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3106 int power_savings_balance = 1;
3107 unsigned long leader_nr_running = 0, min_load_per_task = 0;
3108 unsigned long min_nr_running = ULONG_MAX;
3109 struct sched_group *group_min = NULL, *group_leader = NULL;
3110#endif
3111 3245
3112 max_load = this_load = total_load = total_pwr = 0; 3246/**
3113 busiest_load_per_task = busiest_nr_running = 0; 3247 * get_sd_load_idx - Obtain the load index for a given sched domain.
3114 this_load_per_task = this_nr_running = 0; 3248 * @sd: The sched_domain whose load_idx is to be obtained.
3249 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3250 */
3251static inline int get_sd_load_idx(struct sched_domain *sd,
3252 enum cpu_idle_type idle)
3253{
3254 int load_idx;
3115 3255
3116 if (idle == CPU_NOT_IDLE) 3256 switch (idle) {
3257 case CPU_NOT_IDLE:
3117 load_idx = sd->busy_idx; 3258 load_idx = sd->busy_idx;
3118 else if (idle == CPU_NEWLY_IDLE) 3259 break;
3260
3261 case CPU_NEWLY_IDLE:
3119 load_idx = sd->newidle_idx; 3262 load_idx = sd->newidle_idx;
3120 else 3263 break;
3264 default:
3121 load_idx = sd->idle_idx; 3265 load_idx = sd->idle_idx;
3266 break;
3267 }
3122 3268
3123 do { 3269 return load_idx;
3124 unsigned long load, group_capacity, max_cpu_load, min_cpu_load; 3270}
3125 int local_group;
3126 int i;
3127 int __group_imb = 0;
3128 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3129 unsigned long sum_nr_running, sum_weighted_load;
3130 unsigned long sum_avg_load_per_task;
3131 unsigned long avg_load_per_task;
3132 3271
3133 local_group = cpumask_test_cpu(this_cpu,
3134 sched_group_cpus(group));
3135 3272
3136 if (local_group) 3273#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3137 balance_cpu = cpumask_first(sched_group_cpus(group)); 3274/**
3275 * init_sd_power_savings_stats - Initialize power savings statistics for
3276 * the given sched_domain, during load balancing.
3277 *
3278 * @sd: Sched domain whose power-savings statistics are to be initialized.
3279 * @sds: Variable containing the statistics for sd.
3280 * @idle: Idle status of the CPU at which we're performing load-balancing.
3281 */
3282static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3283 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3284{
3285 /*
3286 * Busy processors will not participate in power savings
3287 * balance.
3288 */
3289 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3290 sds->power_savings_balance = 0;
3291 else {
3292 sds->power_savings_balance = 1;
3293 sds->min_nr_running = ULONG_MAX;
3294 sds->leader_nr_running = 0;
3295 }
3296}
3138 3297
3139 /* Tally up the load of all CPUs in the group */ 3298/**
3140 sum_weighted_load = sum_nr_running = avg_load = 0; 3299 * update_sd_power_savings_stats - Update the power saving stats for a
3141 sum_avg_load_per_task = avg_load_per_task = 0; 3300 * sched_domain while performing load balancing.
3301 *
3302 * @group: sched_group belonging to the sched_domain under consideration.
3303 * @sds: Variable containing the statistics of the sched_domain
3304 * @local_group: Does group contain the CPU for which we're performing
3305 * load balancing ?
3306 * @sgs: Variable containing the statistics of the group.
3307 */
3308static inline void update_sd_power_savings_stats(struct sched_group *group,
3309 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3310{
3142 3311
3143 max_cpu_load = 0; 3312 if (!sds->power_savings_balance)
3144 min_cpu_load = ~0UL; 3313 return;
3145 3314
3146 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3315 /*
3147 struct rq *rq = cpu_rq(i); 3316 * If the local group is idle or completely loaded
3317 * no need to do power savings balance at this domain
3318 */
3319 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3320 !sds->this_nr_running))
3321 sds->power_savings_balance = 0;
3148 3322
3149 if (*sd_idle && rq->nr_running) 3323 /*
3150 *sd_idle = 0; 3324 * If a group is already running at full capacity or idle,
3325 * don't include that group in power savings calculations
3326 */
3327 if (!sds->power_savings_balance ||
3328 sgs->sum_nr_running >= sgs->group_capacity ||
3329 !sgs->sum_nr_running)
3330 return;
3151 3331
3152 /* Bias balancing toward cpus of our domain */ 3332 /*
3153 if (local_group) { 3333 * Calculate the group which has the least non-idle load.
3154 if (idle_cpu(i) && !first_idle_cpu) { 3334 * This is the group from where we need to pick up the load
3155 first_idle_cpu = 1; 3335 * for saving power
3156 balance_cpu = i; 3336 */
3157 } 3337 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3338 (sgs->sum_nr_running == sds->min_nr_running &&
3339 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3340 sds->group_min = group;
3341 sds->min_nr_running = sgs->sum_nr_running;
3342 sds->min_load_per_task = sgs->sum_weighted_load /
3343 sgs->sum_nr_running;
3344 }
3158 3345
3159 load = target_load(i, load_idx); 3346 /*
3160 } else { 3347 * Calculate the group which is almost near its
3161 load = source_load(i, load_idx); 3348 * capacity but still has some space to pick up some load
3162 if (load > max_cpu_load) 3349 * from other group and save more power
3163 max_cpu_load = load; 3350 */
3164 if (min_cpu_load > load) 3351 if (sgs->sum_nr_running > sgs->group_capacity - 1)
3165 min_cpu_load = load; 3352 return;
3166 }
3167 3353
3168 avg_load += load; 3354 if (sgs->sum_nr_running > sds->leader_nr_running ||
3169 sum_nr_running += rq->nr_running; 3355 (sgs->sum_nr_running == sds->leader_nr_running &&
3170 sum_weighted_load += weighted_cpuload(i); 3356 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3357 sds->group_leader = group;
3358 sds->leader_nr_running = sgs->sum_nr_running;
3359 }
3360}
3171 3361
3172 sum_avg_load_per_task += cpu_avg_load_per_task(i); 3362/**
3173 } 3363 * check_power_save_busiest_group - Check if we have potential to perform
3364 * some power-savings balance. If yes, set the busiest group to be
3365 * the least loaded group in the sched_domain, so that it's CPUs can
3366 * be put to idle.
3367 *
3368 * @sds: Variable containing the statistics of the sched_domain
3369 * under consideration.
3370 * @this_cpu: Cpu at which we're currently performing load-balancing.
3371 * @imbalance: Variable to store the imbalance.
3372 *
3373 * Returns 1 if there is potential to perform power-savings balance.
3374 * Else returns 0.
3375 */
3376static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3377 int this_cpu, unsigned long *imbalance)
3378{
3379 if (!sds->power_savings_balance)
3380 return 0;
3174 3381
3175 /* 3382 if (sds->this != sds->group_leader ||
3176 * First idle cpu or the first cpu(busiest) in this sched group 3383 sds->group_leader == sds->group_min)
3177 * is eligible for doing load balancing at this and above 3384 return 0;
3178 * domains. In the newly idle case, we will allow all the cpu's
3179 * to do the newly idle load balance.
3180 */
3181 if (idle != CPU_NEWLY_IDLE && local_group &&
3182 balance_cpu != this_cpu && balance) {
3183 *balance = 0;
3184 goto ret;
3185 }
3186 3385
3187 total_load += avg_load; 3386 *imbalance = sds->min_load_per_task;
3188 total_pwr += group->__cpu_power; 3387 sds->busiest = sds->group_min;
3189 3388
3190 /* Adjust by relative CPU power of the group */ 3389 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3191 avg_load = sg_div_cpu_power(group, 3390 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3192 avg_load * SCHED_LOAD_SCALE); 3391 group_first_cpu(sds->group_leader);
3392 }
3193 3393
3394 return 1;
3194 3395
3195 /* 3396}
3196 * Consider the group unbalanced when the imbalance is larger 3397#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3197 * than the average weight of two tasks. 3398static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3198 * 3399 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3199 * APZ: with cgroup the avg task weight can vary wildly and 3400{
3200 * might not be a suitable number - should we keep a 3401 return;
3201 * normalized nr_running number somewhere that negates 3402}
3202 * the hierarchy?
3203 */
3204 avg_load_per_task = sg_div_cpu_power(group,
3205 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3206 3403
3207 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 3404static inline void update_sd_power_savings_stats(struct sched_group *group,
3208 __group_imb = 1; 3405 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3406{
3407 return;
3408}
3409
3410static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3411 int this_cpu, unsigned long *imbalance)
3412{
3413 return 0;
3414}
3415#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3209 3416
3210 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3211 3417
3418/**
3419 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3420 * @group: sched_group whose statistics are to be updated.
3421 * @this_cpu: Cpu for which load balance is currently performed.
3422 * @idle: Idle status of this_cpu
3423 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3424 * @sd_idle: Idle status of the sched_domain containing group.
3425 * @local_group: Does group contain this_cpu.
3426 * @cpus: Set of cpus considered for load balancing.
3427 * @balance: Should we balance.
3428 * @sgs: variable to hold the statistics for this group.
3429 */
3430static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3431 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3432 int local_group, const struct cpumask *cpus,
3433 int *balance, struct sg_lb_stats *sgs)
3434{
3435 unsigned long load, max_cpu_load, min_cpu_load;
3436 int i;
3437 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3438 unsigned long sum_avg_load_per_task;
3439 unsigned long avg_load_per_task;
3440
3441 if (local_group)
3442 balance_cpu = group_first_cpu(group);
3443
3444 /* Tally up the load of all CPUs in the group */
3445 sum_avg_load_per_task = avg_load_per_task = 0;
3446 max_cpu_load = 0;
3447 min_cpu_load = ~0UL;
3448
3449 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3450 struct rq *rq = cpu_rq(i);
3451
3452 if (*sd_idle && rq->nr_running)
3453 *sd_idle = 0;
3454
3455 /* Bias balancing toward cpus of our domain */
3212 if (local_group) { 3456 if (local_group) {
3213 this_load = avg_load; 3457 if (idle_cpu(i) && !first_idle_cpu) {
3214 this = group; 3458 first_idle_cpu = 1;
3215 this_nr_running = sum_nr_running; 3459 balance_cpu = i;
3216 this_load_per_task = sum_weighted_load; 3460 }
3217 } else if (avg_load > max_load && 3461
3218 (sum_nr_running > group_capacity || __group_imb)) { 3462 load = target_load(i, load_idx);
3219 max_load = avg_load; 3463 } else {
3220 busiest = group; 3464 load = source_load(i, load_idx);
3221 busiest_nr_running = sum_nr_running; 3465 if (load > max_cpu_load)
3222 busiest_load_per_task = sum_weighted_load; 3466 max_cpu_load = load;
3223 group_imb = __group_imb; 3467 if (min_cpu_load > load)
3468 min_cpu_load = load;
3224 } 3469 }
3225 3470
3226#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3471 sgs->group_load += load;
3227 /* 3472 sgs->sum_nr_running += rq->nr_running;
3228 * Busy processors will not participate in power savings 3473 sgs->sum_weighted_load += weighted_cpuload(i);
3229 * balance.
3230 */
3231 if (idle == CPU_NOT_IDLE ||
3232 !(sd->flags & SD_POWERSAVINGS_BALANCE))
3233 goto group_next;
3234 3474
3235 /* 3475 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3236 * If the local group is idle or completely loaded 3476 }
3237 * no need to do power savings balance at this domain
3238 */
3239 if (local_group && (this_nr_running >= group_capacity ||
3240 !this_nr_running))
3241 power_savings_balance = 0;
3242 3477
3243 /* 3478 /*
3244 * If a group is already running at full capacity or idle, 3479 * First idle cpu or the first cpu(busiest) in this sched group
3245 * don't include that group in power savings calculations 3480 * is eligible for doing load balancing at this and above
3246 */ 3481 * domains. In the newly idle case, we will allow all the cpu's
3247 if (!power_savings_balance || sum_nr_running >= group_capacity 3482 * to do the newly idle load balance.
3248 || !sum_nr_running) 3483 */
3249 goto group_next; 3484 if (idle != CPU_NEWLY_IDLE && local_group &&
3485 balance_cpu != this_cpu && balance) {
3486 *balance = 0;
3487 return;
3488 }
3250 3489
3251 /* 3490 /* Adjust by relative CPU power of the group */
3252 * Calculate the group which has the least non-idle load. 3491 sgs->avg_load = sg_div_cpu_power(group,
3253 * This is the group from where we need to pick up the load 3492 sgs->group_load * SCHED_LOAD_SCALE);
3254 * for saving power
3255 */
3256 if ((sum_nr_running < min_nr_running) ||
3257 (sum_nr_running == min_nr_running &&
3258 cpumask_first(sched_group_cpus(group)) >
3259 cpumask_first(sched_group_cpus(group_min)))) {
3260 group_min = group;
3261 min_nr_running = sum_nr_running;
3262 min_load_per_task = sum_weighted_load /
3263 sum_nr_running;
3264 }
3265 3493
3266 /* 3494
3267 * Calculate the group which is almost near its 3495 /*
3268 * capacity but still has some space to pick up some load 3496 * Consider the group unbalanced when the imbalance is larger
3269 * from other group and save more power 3497 * than the average weight of two tasks.
3270 */ 3498 *
3271 if (sum_nr_running <= group_capacity - 1) { 3499 * APZ: with cgroup the avg task weight can vary wildly and
3272 if (sum_nr_running > leader_nr_running || 3500 * might not be a suitable number - should we keep a
3273 (sum_nr_running == leader_nr_running && 3501 * normalized nr_running number somewhere that negates
3274 cpumask_first(sched_group_cpus(group)) < 3502 * the hierarchy?
3275 cpumask_first(sched_group_cpus(group_leader)))) { 3503 */
3276 group_leader = group; 3504 avg_load_per_task = sg_div_cpu_power(group,
3277 leader_nr_running = sum_nr_running; 3505 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3278 } 3506
3507 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3508 sgs->group_imb = 1;
3509
3510 sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3511
3512}
3513
3514/**
3515 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
3516 * @sd: sched_domain whose statistics are to be updated.
3517 * @this_cpu: Cpu for which load balance is currently performed.
3518 * @idle: Idle status of this_cpu
3519 * @sd_idle: Idle status of the sched_domain containing group.
3520 * @cpus: Set of cpus considered for load balancing.
3521 * @balance: Should we balance.
3522 * @sds: variable to hold the statistics for this sched_domain.
3523 */
3524static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3525 enum cpu_idle_type idle, int *sd_idle,
3526 const struct cpumask *cpus, int *balance,
3527 struct sd_lb_stats *sds)
3528{
3529 struct sched_group *group = sd->groups;
3530 struct sg_lb_stats sgs;
3531 int load_idx;
3532
3533 init_sd_power_savings_stats(sd, sds, idle);
3534 load_idx = get_sd_load_idx(sd, idle);
3535
3536 do {
3537 int local_group;
3538
3539 local_group = cpumask_test_cpu(this_cpu,
3540 sched_group_cpus(group));
3541 memset(&sgs, 0, sizeof(sgs));
3542 update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
3543 local_group, cpus, balance, &sgs);
3544
3545 if (local_group && balance && !(*balance))
3546 return;
3547
3548 sds->total_load += sgs.group_load;
3549 sds->total_pwr += group->__cpu_power;
3550
3551 if (local_group) {
3552 sds->this_load = sgs.avg_load;
3553 sds->this = group;
3554 sds->this_nr_running = sgs.sum_nr_running;
3555 sds->this_load_per_task = sgs.sum_weighted_load;
3556 } else if (sgs.avg_load > sds->max_load &&
3557 (sgs.sum_nr_running > sgs.group_capacity ||
3558 sgs.group_imb)) {
3559 sds->max_load = sgs.avg_load;
3560 sds->busiest = group;
3561 sds->busiest_nr_running = sgs.sum_nr_running;
3562 sds->busiest_load_per_task = sgs.sum_weighted_load;
3563 sds->group_imb = sgs.group_imb;
3279 } 3564 }
3280group_next: 3565
3281#endif 3566 update_sd_power_savings_stats(group, sds, local_group, &sgs);
3282 group = group->next; 3567 group = group->next;
3283 } while (group != sd->groups); 3568 } while (group != sd->groups);
3284 3569
3285 if (!busiest || this_load >= max_load || busiest_nr_running == 0) 3570}
3286 goto out_balanced;
3287
3288 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
3289 3571
3290 if (this_load >= avg_load || 3572/**
3291 100*max_load <= sd->imbalance_pct*this_load) 3573 * fix_small_imbalance - Calculate the minor imbalance that exists
3292 goto out_balanced; 3574 * amongst the groups of a sched_domain, during
3575 * load balancing.
3576 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3577 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3578 * @imbalance: Variable to store the imbalance.
3579 */
3580static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3581 int this_cpu, unsigned long *imbalance)
3582{
3583 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3584 unsigned int imbn = 2;
3585
3586 if (sds->this_nr_running) {
3587 sds->this_load_per_task /= sds->this_nr_running;
3588 if (sds->busiest_load_per_task >
3589 sds->this_load_per_task)
3590 imbn = 1;
3591 } else
3592 sds->this_load_per_task =
3593 cpu_avg_load_per_task(this_cpu);
3293 3594
3294 busiest_load_per_task /= busiest_nr_running; 3595 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3295 if (group_imb) 3596 sds->busiest_load_per_task * imbn) {
3296 busiest_load_per_task = min(busiest_load_per_task, avg_load); 3597 *imbalance = sds->busiest_load_per_task;
3598 return;
3599 }
3297 3600
3298 /* 3601 /*
3299 * We're trying to get all the cpus to the average_load, so we don't 3602 * OK, we don't have enough imbalance to justify moving tasks,
3300 * want to push ourselves above the average load, nor do we wish to 3603 * however we may be able to increase total CPU power used by
3301 * reduce the max loaded cpu below the average load, as either of these 3604 * moving them.
3302 * actions would just result in more rebalancing later, and ping-pong
3303 * tasks around. Thus we look for the minimum possible imbalance.
3304 * Negative imbalances (*we* are more loaded than anyone else) will
3305 * be counted as no imbalance for these purposes -- we can't fix that
3306 * by pulling tasks to us. Be careful of negative numbers as they'll
3307 * appear as very large values with unsigned longs.
3308 */ 3605 */
3309 if (max_load <= busiest_load_per_task)
3310 goto out_balanced;
3311 3606
3607 pwr_now += sds->busiest->__cpu_power *
3608 min(sds->busiest_load_per_task, sds->max_load);
3609 pwr_now += sds->this->__cpu_power *
3610 min(sds->this_load_per_task, sds->this_load);
3611 pwr_now /= SCHED_LOAD_SCALE;
3612
3613 /* Amount of load we'd subtract */
3614 tmp = sg_div_cpu_power(sds->busiest,
3615 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3616 if (sds->max_load > tmp)
3617 pwr_move += sds->busiest->__cpu_power *
3618 min(sds->busiest_load_per_task, sds->max_load - tmp);
3619
3620 /* Amount of load we'd add */
3621 if (sds->max_load * sds->busiest->__cpu_power <
3622 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3623 tmp = sg_div_cpu_power(sds->this,
3624 sds->max_load * sds->busiest->__cpu_power);
3625 else
3626 tmp = sg_div_cpu_power(sds->this,
3627 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3628 pwr_move += sds->this->__cpu_power *
3629 min(sds->this_load_per_task, sds->this_load + tmp);
3630 pwr_move /= SCHED_LOAD_SCALE;
3631
3632 /* Move if we gain throughput */
3633 if (pwr_move > pwr_now)
3634 *imbalance = sds->busiest_load_per_task;
3635}
3636
3637/**
3638 * calculate_imbalance - Calculate the amount of imbalance present within the
3639 * groups of a given sched_domain during load balance.
3640 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3641 * @this_cpu: Cpu for which currently load balance is being performed.
3642 * @imbalance: The variable to store the imbalance.
3643 */
3644static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3645 unsigned long *imbalance)
3646{
3647 unsigned long max_pull;
3312 /* 3648 /*
3313 * In the presence of smp nice balancing, certain scenarios can have 3649 * In the presence of smp nice balancing, certain scenarios can have
3314 * max load less than avg load(as we skip the groups at or below 3650 * max load less than avg load(as we skip the groups at or below
3315 * its cpu_power, while calculating max_load..) 3651 * its cpu_power, while calculating max_load..)
3316 */ 3652 */
3317 if (max_load < avg_load) { 3653 if (sds->max_load < sds->avg_load) {
3318 *imbalance = 0; 3654 *imbalance = 0;
3319 goto small_imbalance; 3655 return fix_small_imbalance(sds, this_cpu, imbalance);
3320 } 3656 }
3321 3657
3322 /* Don't want to pull so many tasks that a group would go idle */ 3658 /* Don't want to pull so many tasks that a group would go idle */
3323 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); 3659 max_pull = min(sds->max_load - sds->avg_load,
3660 sds->max_load - sds->busiest_load_per_task);
3324 3661
3325 /* How much load to actually move to equalise the imbalance */ 3662 /* How much load to actually move to equalise the imbalance */
3326 *imbalance = min(max_pull * busiest->__cpu_power, 3663 *imbalance = min(max_pull * sds->busiest->__cpu_power,
3327 (avg_load - this_load) * this->__cpu_power) 3664 (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
3328 / SCHED_LOAD_SCALE; 3665 / SCHED_LOAD_SCALE;
3329 3666
3330 /* 3667 /*
@@ -3333,78 +3670,110 @@ group_next:
3333 * a think about bumping its value to force at least one task to be 3670 * a think about bumping its value to force at least one task to be
3334 * moved 3671 * moved
3335 */ 3672 */
3336 if (*imbalance < busiest_load_per_task) { 3673 if (*imbalance < sds->busiest_load_per_task)
3337 unsigned long tmp, pwr_now, pwr_move; 3674 return fix_small_imbalance(sds, this_cpu, imbalance);
3338 unsigned int imbn;
3339
3340small_imbalance:
3341 pwr_move = pwr_now = 0;
3342 imbn = 2;
3343 if (this_nr_running) {
3344 this_load_per_task /= this_nr_running;
3345 if (busiest_load_per_task > this_load_per_task)
3346 imbn = 1;
3347 } else
3348 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3349 3675
3350 if (max_load - this_load + busiest_load_per_task >= 3676}
3351 busiest_load_per_task * imbn) { 3677/******* find_busiest_group() helpers end here *********************/
3352 *imbalance = busiest_load_per_task;
3353 return busiest;
3354 }
3355 3678
3356 /* 3679/**
3357 * OK, we don't have enough imbalance to justify moving tasks, 3680 * find_busiest_group - Returns the busiest group within the sched_domain
3358 * however we may be able to increase total CPU power used by 3681 * if there is an imbalance. If there isn't an imbalance, and
3359 * moving them. 3682 * the user has opted for power-savings, it returns a group whose
3360 */ 3683 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3684 * such a group exists.
3685 *
3686 * Also calculates the amount of weighted load which should be moved
3687 * to restore balance.
3688 *
3689 * @sd: The sched_domain whose busiest group is to be returned.
3690 * @this_cpu: The cpu for which load balancing is currently being performed.
3691 * @imbalance: Variable which stores amount of weighted load which should
3692 * be moved to restore balance/put a group to idle.
3693 * @idle: The idle status of this_cpu.
3694 * @sd_idle: The idleness of sd
3695 * @cpus: The set of CPUs under consideration for load-balancing.
3696 * @balance: Pointer to a variable indicating if this_cpu
3697 * is the appropriate cpu to perform load balancing at this_level.
3698 *
3699 * Returns: - the busiest group if imbalance exists.
3700 * - If no imbalance and user has opted for power-savings balance,
3701 * return the least loaded group whose CPUs can be
3702 * put to idle by rebalancing its tasks onto our group.
3703 */
3704static struct sched_group *
3705find_busiest_group(struct sched_domain *sd, int this_cpu,
3706 unsigned long *imbalance, enum cpu_idle_type idle,
3707 int *sd_idle, const struct cpumask *cpus, int *balance)
3708{
3709 struct sd_lb_stats sds;
3361 3710
3362 pwr_now += busiest->__cpu_power * 3711 memset(&sds, 0, sizeof(sds));
3363 min(busiest_load_per_task, max_load);
3364 pwr_now += this->__cpu_power *
3365 min(this_load_per_task, this_load);
3366 pwr_now /= SCHED_LOAD_SCALE;
3367
3368 /* Amount of load we'd subtract */
3369 tmp = sg_div_cpu_power(busiest,
3370 busiest_load_per_task * SCHED_LOAD_SCALE);
3371 if (max_load > tmp)
3372 pwr_move += busiest->__cpu_power *
3373 min(busiest_load_per_task, max_load - tmp);
3374
3375 /* Amount of load we'd add */
3376 if (max_load * busiest->__cpu_power <
3377 busiest_load_per_task * SCHED_LOAD_SCALE)
3378 tmp = sg_div_cpu_power(this,
3379 max_load * busiest->__cpu_power);
3380 else
3381 tmp = sg_div_cpu_power(this,
3382 busiest_load_per_task * SCHED_LOAD_SCALE);
3383 pwr_move += this->__cpu_power *
3384 min(this_load_per_task, this_load + tmp);
3385 pwr_move /= SCHED_LOAD_SCALE;
3386 3712
3387 /* Move if we gain throughput */ 3713 /*
3388 if (pwr_move > pwr_now) 3714 * Compute the various statistics relavent for load balancing at
3389 *imbalance = busiest_load_per_task; 3715 * this level.
3390 } 3716 */
3717 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
3718 balance, &sds);
3719
3720 /* Cases where imbalance does not exist from POV of this_cpu */
3721 /* 1) this_cpu is not the appropriate cpu to perform load balancing
3722 * at this level.
3723 * 2) There is no busy sibling group to pull from.
3724 * 3) This group is the busiest group.
3725 * 4) This group is more busy than the avg busieness at this
3726 * sched_domain.
3727 * 5) The imbalance is within the specified limit.
3728 * 6) Any rebalance would lead to ping-pong
3729 */
3730 if (balance && !(*balance))
3731 goto ret;
3391 3732
3392 return busiest; 3733 if (!sds.busiest || sds.busiest_nr_running == 0)
3734 goto out_balanced;
3393 3735
3394out_balanced: 3736 if (sds.this_load >= sds.max_load)
3395#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3737 goto out_balanced;
3396 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3397 goto ret;
3398 3738
3399 if (this == group_leader && group_leader != group_min) { 3739 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3400 *imbalance = min_load_per_task; 3740
3401 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { 3741 if (sds.this_load >= sds.avg_load)
3402 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = 3742 goto out_balanced;
3403 cpumask_first(sched_group_cpus(group_leader)); 3743
3404 } 3744 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3405 return group_min; 3745 goto out_balanced;
3406 } 3746
3407#endif 3747 sds.busiest_load_per_task /= sds.busiest_nr_running;
3748 if (sds.group_imb)
3749 sds.busiest_load_per_task =
3750 min(sds.busiest_load_per_task, sds.avg_load);
3751
3752 /*
3753 * We're trying to get all the cpus to the average_load, so we don't
3754 * want to push ourselves above the average load, nor do we wish to
3755 * reduce the max loaded cpu below the average load, as either of these
3756 * actions would just result in more rebalancing later, and ping-pong
3757 * tasks around. Thus we look for the minimum possible imbalance.
3758 * Negative imbalances (*we* are more loaded than anyone else) will
3759 * be counted as no imbalance for these purposes -- we can't fix that
3760 * by pulling tasks to us. Be careful of negative numbers as they'll
3761 * appear as very large values with unsigned longs.
3762 */
3763 if (sds.max_load <= sds.busiest_load_per_task)
3764 goto out_balanced;
3765
3766 /* Looks like there is an imbalance. Compute it */
3767 calculate_imbalance(&sds, this_cpu, imbalance);
3768 return sds.busiest;
3769
3770out_balanced:
3771 /*
3772 * There is no obvious imbalance. But check if we can do some balancing
3773 * to save power.
3774 */
3775 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3776 return sds.busiest;
3408ret: 3777ret:
3409 *imbalance = 0; 3778 *imbalance = 0;
3410 return NULL; 3779 return NULL;
@@ -4057,6 +4426,11 @@ static void run_rebalance_domains(struct softirq_action *h)
4057#endif 4426#endif
4058} 4427}
4059 4428
4429static inline int on_null_domain(int cpu)
4430{
4431 return !rcu_dereference(cpu_rq(cpu)->sd);
4432}
4433
4060/* 4434/*
4061 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 4435 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4062 * 4436 *
@@ -4114,7 +4488,9 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4114 cpumask_test_cpu(cpu, nohz.cpu_mask)) 4488 cpumask_test_cpu(cpu, nohz.cpu_mask))
4115 return; 4489 return;
4116#endif 4490#endif
4117 if (time_after_eq(jiffies, rq->next_balance)) 4491 /* Don't need to rebalance while attached to NULL domain */
4492 if (time_after_eq(jiffies, rq->next_balance) &&
4493 likely(!on_null_domain(cpu)))
4118 raise_softirq(SCHED_SOFTIRQ); 4494 raise_softirq(SCHED_SOFTIRQ);
4119} 4495}
4120 4496
@@ -4508,11 +4884,33 @@ static inline void schedule_debug(struct task_struct *prev)
4508#endif 4884#endif
4509} 4885}
4510 4886
4887static void put_prev_task(struct rq *rq, struct task_struct *prev)
4888{
4889 if (prev->state == TASK_RUNNING) {
4890 u64 runtime = prev->se.sum_exec_runtime;
4891
4892 runtime -= prev->se.prev_sum_exec_runtime;
4893 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
4894
4895 /*
4896 * In order to avoid avg_overlap growing stale when we are
4897 * indeed overlapping and hence not getting put to sleep, grow
4898 * the avg_overlap on preemption.
4899 *
4900 * We use the average preemption runtime because that
4901 * correlates to the amount of cache footprint a task can
4902 * build up.
4903 */
4904 update_avg(&prev->se.avg_overlap, runtime);
4905 }
4906 prev->sched_class->put_prev_task(rq, prev);
4907}
4908
4511/* 4909/*
4512 * Pick up the highest-prio task: 4910 * Pick up the highest-prio task:
4513 */ 4911 */
4514static inline struct task_struct * 4912static inline struct task_struct *
4515pick_next_task(struct rq *rq, struct task_struct *prev) 4913pick_next_task(struct rq *rq)
4516{ 4914{
4517 const struct sched_class *class; 4915 const struct sched_class *class;
4518 struct task_struct *p; 4916 struct task_struct *p;
@@ -4586,8 +4984,8 @@ need_resched_nonpreemptible:
4586 if (unlikely(!rq->nr_running)) 4984 if (unlikely(!rq->nr_running))
4587 idle_balance(cpu, rq); 4985 idle_balance(cpu, rq);
4588 4986
4589 prev->sched_class->put_prev_task(rq, prev); 4987 put_prev_task(rq, prev);
4590 next = pick_next_task(rq, prev); 4988 next = pick_next_task(rq);
4591 4989
4592 if (likely(prev != next)) { 4990 if (likely(prev != next)) {
4593 sched_info_switch(prev, next); 4991 sched_info_switch(prev, next);
@@ -4642,7 +5040,7 @@ asmlinkage void __sched preempt_schedule(void)
4642 * between schedule and now. 5040 * between schedule and now.
4643 */ 5041 */
4644 barrier(); 5042 barrier();
4645 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5043 } while (need_resched());
4646} 5044}
4647EXPORT_SYMBOL(preempt_schedule); 5045EXPORT_SYMBOL(preempt_schedule);
4648 5046
@@ -4671,7 +5069,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4671 * between schedule and now. 5069 * between schedule and now.
4672 */ 5070 */
4673 barrier(); 5071 barrier();
4674 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5072 } while (need_resched());
4675} 5073}
4676 5074
4677#endif /* CONFIG_PREEMPT */ 5075#endif /* CONFIG_PREEMPT */
@@ -5145,7 +5543,7 @@ SYSCALL_DEFINE1(nice, int, increment)
5145 if (increment > 40) 5543 if (increment > 40)
5146 increment = 40; 5544 increment = 40;
5147 5545
5148 nice = PRIO_TO_NICE(current->static_prio) + increment; 5546 nice = TASK_NICE(current) + increment;
5149 if (nice < -20) 5547 if (nice < -20)
5150 nice = -20; 5548 nice = -20;
5151 if (nice > 19) 5549 if (nice > 19)
@@ -6423,7 +6821,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
6423 if (!rq->nr_running) 6821 if (!rq->nr_running)
6424 break; 6822 break;
6425 update_rq_clock(rq); 6823 update_rq_clock(rq);
6426 next = pick_next_task(rq, rq->curr); 6824 next = pick_next_task(rq);
6427 if (!next) 6825 if (!next)
6428 break; 6826 break;
6429 next->sched_class->put_prev_task(rq, next); 6827 next->sched_class->put_prev_task(rq, next);
@@ -8218,11 +8616,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8218 __set_bit(MAX_RT_PRIO, array->bitmap); 8616 __set_bit(MAX_RT_PRIO, array->bitmap);
8219 8617
8220#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 8618#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
8221 rt_rq->highest_prio = MAX_RT_PRIO; 8619 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8620#ifdef CONFIG_SMP
8621 rt_rq->highest_prio.next = MAX_RT_PRIO;
8622#endif
8222#endif 8623#endif
8223#ifdef CONFIG_SMP 8624#ifdef CONFIG_SMP
8224 rt_rq->rt_nr_migratory = 0; 8625 rt_rq->rt_nr_migratory = 0;
8225 rt_rq->overloaded = 0; 8626 rt_rq->overloaded = 0;
8627 plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
8226#endif 8628#endif
8227 8629
8228 rt_rq->rt_time = 0; 8630 rt_rq->rt_time = 0;
@@ -9598,7 +10000,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9598 struct cpuacct *ca; 10000 struct cpuacct *ca;
9599 int cpu; 10001 int cpu;
9600 10002
9601 if (!cpuacct_subsys.active) 10003 if (unlikely(!cpuacct_subsys.active))
9602 return; 10004 return;
9603 10005
9604 cpu = task_cpu(tsk); 10006 cpu = task_cpu(tsk);
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a0b0852414c..390f33234bd 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,11 +24,11 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/percpu.h>
30#include <linux/ktime.h>
31#include <linux/sched.h>
32 32
33/* 33/*
34 * Scheduler clock - returns current time in nanosec units. 34 * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
43static __read_mostly int sched_clock_running; 43static __read_mostly int sched_clock_running;
44 44
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46__read_mostly int sched_clock_stable;
46 47
47struct sched_clock_data { 48struct sched_clock_data {
48 /* 49 /*
@@ -87,7 +88,7 @@ void sched_clock_init(void)
87} 88}
88 89
89/* 90/*
90 * min,max except they take wrapping into account 91 * min, max except they take wrapping into account
91 */ 92 */
92 93
93static inline u64 wrap_min(u64 x, u64 y) 94static inline u64 wrap_min(u64 x, u64 y)
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
111 s64 delta = now - scd->tick_raw; 112 s64 delta = now - scd->tick_raw;
112 u64 clock, min_clock, max_clock; 113 u64 clock, min_clock, max_clock;
113 114
114 WARN_ON_ONCE(!irqs_disabled());
115
116 if (unlikely(delta < 0)) 115 if (unlikely(delta < 0))
117 delta = 0; 116 delta = 0;
118 117
119 /* 118 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 119 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 120 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC); 121 * scd->tick_gtod + TICK_NSEC);
123 */ 122 */
124 123
125 clock = scd->tick_gtod + delta; 124 clock = scd->tick_gtod + delta;
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
148 147
149u64 sched_clock_cpu(int cpu) 148u64 sched_clock_cpu(int cpu)
150{ 149{
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock, this_clock, remote_clock; 150 u64 now, clock, this_clock, remote_clock;
151 struct sched_clock_data *scd;
153 152
154 if (unlikely(!sched_clock_running)) 153 if (sched_clock_stable)
155 return 0ull; 154 return sched_clock();
156 155
156 scd = cpu_sdc(cpu);
157 WARN_ON_ONCE(!irqs_disabled()); 157 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 158 now = sched_clock();
159 159
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
195 195
196void sched_clock_tick(void) 196void sched_clock_tick(void)
197{ 197{
198 struct sched_clock_data *scd = this_scd(); 198 struct sched_clock_data *scd;
199 u64 now, now_gtod; 199 u64 now, now_gtod;
200 200
201 if (sched_clock_stable)
202 return;
203
201 if (unlikely(!sched_clock_running)) 204 if (unlikely(!sched_clock_running))
202 return; 205 return;
203 206
204 WARN_ON_ONCE(!irqs_disabled()); 207 WARN_ON_ONCE(!irqs_disabled());
205 208
209 scd = this_scd();
206 now_gtod = ktime_to_ns(ktime_get()); 210 now_gtod = ktime_to_ns(ktime_get());
207 now = sched_clock(); 211 now = sched_clock();
208 212
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
250 return sched_clock(); 254 return sched_clock();
251} 255}
252 256
253#endif 257#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
254 258
255unsigned long long cpu_clock(int cpu) 259unsigned long long cpu_clock(int cpu)
256{ 260{
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 16eeba4e416..467ca72f165 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
272 P(nr_switches); 272 P(nr_switches);
273 P(nr_load_updates); 273 P(nr_load_updates);
274 P(nr_uninterruptible); 274 P(nr_uninterruptible);
275 SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
276 PN(next_balance); 275 PN(next_balance);
277 P(curr->pid); 276 P(curr->pid);
278 PN(clock); 277 PN(clock);
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
287#ifdef CONFIG_SCHEDSTATS 286#ifdef CONFIG_SCHEDSTATS
288#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 287#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
289 288
290 P(yld_exp_empty);
291 P(yld_act_empty);
292 P(yld_both_empty);
293 P(yld_count); 289 P(yld_count);
294 290
295 P(sched_switch); 291 P(sched_switch);
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
314 u64 now = ktime_to_ns(ktime_get()); 310 u64 now = ktime_to_ns(ktime_get());
315 int cpu; 311 int cpu;
316 312
317 SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n", 313 SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
318 init_utsname()->release, 314 init_utsname()->release,
319 (int)strcspn(init_utsname()->version, " "), 315 (int)strcspn(init_utsname()->version, " "),
320 init_utsname()->version); 316 init_utsname()->version);
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
325 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 321 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
326#define PN(x) \ 322#define PN(x) \
327 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 323 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
324 P(jiffies);
328 PN(sysctl_sched_latency); 325 PN(sysctl_sched_latency);
329 PN(sysctl_sched_min_granularity); 326 PN(sysctl_sched_min_granularity);
330 PN(sysctl_sched_wakeup_granularity); 327 PN(sysctl_sched_wakeup_granularity);
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
397 PN(se.vruntime); 394 PN(se.vruntime);
398 PN(se.sum_exec_runtime); 395 PN(se.sum_exec_runtime);
399 PN(se.avg_overlap); 396 PN(se.avg_overlap);
397 PN(se.avg_wakeup);
400 398
401 nr_switches = p->nvcsw + p->nivcsw; 399 nr_switches = p->nvcsw + p->nivcsw;
402 400
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0566f2a03c4..3816f217f11 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1314,16 +1314,63 @@ out:
1314} 1314}
1315#endif /* CONFIG_SMP */ 1315#endif /* CONFIG_SMP */
1316 1316
1317static unsigned long wakeup_gran(struct sched_entity *se) 1317/*
1318 * Adaptive granularity
1319 *
1320 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1321 * with the limit of wakeup_gran -- when it never does a wakeup.
1322 *
1323 * So the smaller avg_wakeup is the faster we want this task to preempt,
1324 * but we don't want to treat the preemptee unfairly and therefore allow it
1325 * to run for at least the amount of time we'd like to run.
1326 *
1327 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1328 *
1329 * NOTE: we use *nr_running to scale with load, this nicely matches the
1330 * degrading latency on load.
1331 */
1332static unsigned long
1333adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1334{
1335 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1336 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1337 u64 gran = 0;
1338
1339 if (this_run < expected_wakeup)
1340 gran = expected_wakeup - this_run;
1341
1342 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1343}
1344
1345static unsigned long
1346wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1318{ 1347{
1319 unsigned long gran = sysctl_sched_wakeup_granularity; 1348 unsigned long gran = sysctl_sched_wakeup_granularity;
1320 1349
1350 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1351 gran = adaptive_gran(curr, se);
1352
1321 /* 1353 /*
1322 * More easily preempt - nice tasks, while not making it harder for 1354 * Since its curr running now, convert the gran from real-time
1323 * + nice tasks. 1355 * to virtual-time in his units.
1324 */ 1356 */
1325 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) 1357 if (sched_feat(ASYM_GRAN)) {
1326 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); 1358 /*
1359 * By using 'se' instead of 'curr' we penalize light tasks, so
1360 * they get preempted easier. That is, if 'se' < 'curr' then
1361 * the resulting gran will be larger, therefore penalizing the
1362 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1363 * be smaller, again penalizing the lighter task.
1364 *
1365 * This is especially important for buddies when the leftmost
1366 * task is higher priority than the buddy.
1367 */
1368 if (unlikely(se->load.weight != NICE_0_LOAD))
1369 gran = calc_delta_fair(gran, se);
1370 } else {
1371 if (unlikely(curr->load.weight != NICE_0_LOAD))
1372 gran = calc_delta_fair(gran, curr);
1373 }
1327 1374
1328 return gran; 1375 return gran;
1329} 1376}
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1350 if (vdiff <= 0) 1397 if (vdiff <= 0)
1351 return -1; 1398 return -1;
1352 1399
1353 gran = wakeup_gran(curr); 1400 gran = wakeup_gran(curr, se);
1354 if (vdiff > gran) 1401 if (vdiff > gran)
1355 return 1; 1402 return 1;
1356 1403
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index da5d93b5d2c..76f61756e67 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,5 +1,6 @@
1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) 1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
2SCHED_FEAT(NORMALIZED_SLEEPER, 1) 2SCHED_FEAT(NORMALIZED_SLEEPER, 0)
3SCHED_FEAT(ADAPTIVE_GRAN, 1)
3SCHED_FEAT(WAKEUP_PREEMPT, 1) 4SCHED_FEAT(WAKEUP_PREEMPT, 1)
4SCHED_FEAT(START_DEBIT, 1) 5SCHED_FEAT(START_DEBIT, 1)
5SCHED_FEAT(AFFINE_WAKEUPS, 1) 6SCHED_FEAT(AFFINE_WAKEUPS, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index bac1061cea2..c79dc784401 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,40 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{
8 return container_of(rt_se, struct task_struct, rt);
9}
10
11#ifdef CONFIG_RT_GROUP_SCHED
12
13static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
14{
15 return rt_rq->rq;
16}
17
18static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
19{
20 return rt_se->rt_rq;
21}
22
23#else /* CONFIG_RT_GROUP_SCHED */
24
25static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
26{
27 return container_of(rt_rq, struct rq, rt);
28}
29
30static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
31{
32 struct task_struct *p = rt_task_of(rt_se);
33 struct rq *rq = task_rq(p);
34
35 return &rq->rt;
36}
37
38#endif /* CONFIG_RT_GROUP_SCHED */
39
6#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
7 41
8static inline int rt_overloaded(struct rq *rq) 42static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 71 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 72}
39 73
40static void update_rt_migration(struct rq *rq) 74static void update_rt_migration(struct rt_rq *rt_rq)
41{ 75{
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { 76 if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) { 77 if (!rt_rq->overloaded) {
44 rt_set_overload(rq); 78 rt_set_overload(rq_of_rt_rq(rt_rq));
45 rq->rt.overloaded = 1; 79 rt_rq->overloaded = 1;
46 } 80 }
47 } else if (rq->rt.overloaded) { 81 } else if (rt_rq->overloaded) {
48 rt_clear_overload(rq); 82 rt_clear_overload(rq_of_rt_rq(rt_rq));
49 rq->rt.overloaded = 0; 83 rt_rq->overloaded = 0;
50 } 84 }
51} 85}
52#endif /* CONFIG_SMP */
53 86
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 87static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
88{
89 if (rt_se->nr_cpus_allowed > 1)
90 rt_rq->rt_nr_migratory++;
91
92 update_rt_migration(rt_rq);
93}
94
95static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
96{
97 if (rt_se->nr_cpus_allowed > 1)
98 rt_rq->rt_nr_migratory--;
99
100 update_rt_migration(rt_rq);
101}
102
103static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
104{
105 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
106 plist_node_init(&p->pushable_tasks, p->prio);
107 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
108}
109
110static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
111{
112 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
113}
114
115#else
116
117static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
55{ 118{
56 return container_of(rt_se, struct task_struct, rt);
57} 119}
58 120
121static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
122{
123}
124
125static inline
126void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
127{
128}
129
130static inline
131void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
132{
133}
134
135#endif /* CONFIG_SMP */
136
59static inline int on_rt_rq(struct sched_rt_entity *rt_se) 137static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{ 138{
61 return !list_empty(&rt_se->run_list); 139 return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
79#define for_each_leaf_rt_rq(rt_rq, rq) \ 157#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) 158 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81 159
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \ 160#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent) 161 for (; rt_se; rt_se = rt_se->parent)
94 162
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 176 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 177 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 178 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 179 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 180 resched_task(curr);
113 } 181 }
114} 182}
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
176#define for_each_leaf_rt_rq(rt_rq, rq) \ 244#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 245 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178 246
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \ 247#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL) 248 for (; rt_se; rt_se = NULL)
194 249
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 528 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 529
475 if (rt_rq) 530 if (rt_rq)
476 return rt_rq->highest_prio; 531 return rt_rq->highest_prio.curr;
477#endif 532#endif
478 533
479 return rt_task_of(rt_se)->prio; 534 return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
547 } 602 }
548} 603}
549 604
550static inline 605#if defined CONFIG_SMP
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 606
607static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
608
609static inline int next_prio(struct rq *rq)
552{ 610{
553 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 611 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
554 rt_rq->rt_nr_running++; 612
555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 613 if (next && rt_prio(next->prio))
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 614 return next->prio;
557#ifdef CONFIG_SMP 615 else
558 struct rq *rq = rq_of_rt_rq(rt_rq); 616 return MAX_RT_PRIO;
559#endif 617}
618
619static void
620inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
621{
622 struct rq *rq = rq_of_rt_rq(rt_rq);
623
624 if (prio < prev_prio) {
625
626 /*
627 * If the new task is higher in priority than anything on the
628 * run-queue, we know that the previous high becomes our
629 * next-highest.
630 */
631 rt_rq->highest_prio.next = prev_prio;
560 632
561 rt_rq->highest_prio = rt_se_prio(rt_se);
562#ifdef CONFIG_SMP
563 if (rq->online) 633 if (rq->online)
564 cpupri_set(&rq->rd->cpupri, rq->cpu, 634 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
565 rt_se_prio(rt_se));
566#endif
567 }
568#endif
569#ifdef CONFIG_SMP
570 if (rt_se->nr_cpus_allowed > 1) {
571 struct rq *rq = rq_of_rt_rq(rt_rq);
572 635
573 rq->rt.rt_nr_migratory++; 636 } else if (prio == rt_rq->highest_prio.curr)
574 } 637 /*
638 * If the next task is equal in priority to the highest on
639 * the run-queue, then we implicitly know that the next highest
640 * task cannot be any lower than current
641 */
642 rt_rq->highest_prio.next = prio;
643 else if (prio < rt_rq->highest_prio.next)
644 /*
645 * Otherwise, we need to recompute next-highest
646 */
647 rt_rq->highest_prio.next = next_prio(rq);
648}
575 649
576 update_rt_migration(rq_of_rt_rq(rt_rq)); 650static void
577#endif 651dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
578#ifdef CONFIG_RT_GROUP_SCHED 652{
579 if (rt_se_boosted(rt_se)) 653 struct rq *rq = rq_of_rt_rq(rt_rq);
580 rt_rq->rt_nr_boosted++;
581 654
582 if (rt_rq->tg) 655 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
583 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 656 rt_rq->highest_prio.next = next_prio(rq);
584#else 657
585 start_rt_bandwidth(&def_rt_bandwidth); 658 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
586#endif 659 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
587} 660}
588 661
662#else /* CONFIG_SMP */
663
589static inline 664static inline
590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 665void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
591{ 666static inline
592#ifdef CONFIG_SMP 667void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
593 int highest_prio = rt_rq->highest_prio; 668
594#endif 669#endif /* CONFIG_SMP */
595 670
596 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
597 WARN_ON(!rt_rq->rt_nr_running);
598 rt_rq->rt_nr_running--;
599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 671#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
672static void
673inc_rt_prio(struct rt_rq *rt_rq, int prio)
674{
675 int prev_prio = rt_rq->highest_prio.curr;
676
677 if (prio < prev_prio)
678 rt_rq->highest_prio.curr = prio;
679
680 inc_rt_prio_smp(rt_rq, prio, prev_prio);
681}
682
683static void
684dec_rt_prio(struct rt_rq *rt_rq, int prio)
685{
686 int prev_prio = rt_rq->highest_prio.curr;
687
600 if (rt_rq->rt_nr_running) { 688 if (rt_rq->rt_nr_running) {
601 struct rt_prio_array *array;
602 689
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 690 WARN_ON(prio < prev_prio);
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 691
605 /* recalculate */ 692 /*
606 array = &rt_rq->active; 693 * This may have been our highest task, and therefore
607 rt_rq->highest_prio = 694 * we may have some recomputation to do
695 */
696 if (prio == prev_prio) {
697 struct rt_prio_array *array = &rt_rq->active;
698
699 rt_rq->highest_prio.curr =
608 sched_find_first_bit(array->bitmap); 700 sched_find_first_bit(array->bitmap);
609 } /* otherwise leave rq->highest prio alone */ 701 }
702
610 } else 703 } else
611 rt_rq->highest_prio = MAX_RT_PRIO; 704 rt_rq->highest_prio.curr = MAX_RT_PRIO;
612#endif
613#ifdef CONFIG_SMP
614 if (rt_se->nr_cpus_allowed > 1) {
615 struct rq *rq = rq_of_rt_rq(rt_rq);
616 rq->rt.rt_nr_migratory--;
617 }
618 705
619 if (rt_rq->highest_prio != highest_prio) { 706 dec_rt_prio_smp(rt_rq, prio, prev_prio);
620 struct rq *rq = rq_of_rt_rq(rt_rq); 707}
621 708
622 if (rq->online) 709#else
623 cpupri_set(&rq->rd->cpupri, rq->cpu, 710
624 rt_rq->highest_prio); 711static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
625 } 712static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
713
714#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
626 715
627 update_rt_migration(rq_of_rt_rq(rt_rq));
628#endif /* CONFIG_SMP */
629#ifdef CONFIG_RT_GROUP_SCHED 716#ifdef CONFIG_RT_GROUP_SCHED
717
718static void
719inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
720{
721 if (rt_se_boosted(rt_se))
722 rt_rq->rt_nr_boosted++;
723
724 if (rt_rq->tg)
725 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
726}
727
728static void
729dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
730{
630 if (rt_se_boosted(rt_se)) 731 if (rt_se_boosted(rt_se))
631 rt_rq->rt_nr_boosted--; 732 rt_rq->rt_nr_boosted--;
632 733
633 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 734 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
634#endif 735}
736
737#else /* CONFIG_RT_GROUP_SCHED */
738
739static void
740inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
741{
742 start_rt_bandwidth(&def_rt_bandwidth);
743}
744
745static inline
746void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
747
748#endif /* CONFIG_RT_GROUP_SCHED */
749
750static inline
751void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
752{
753 int prio = rt_se_prio(rt_se);
754
755 WARN_ON(!rt_prio(prio));
756 rt_rq->rt_nr_running++;
757
758 inc_rt_prio(rt_rq, prio);
759 inc_rt_migration(rt_se, rt_rq);
760 inc_rt_group(rt_se, rt_rq);
761}
762
763static inline
764void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
765{
766 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
767 WARN_ON(!rt_rq->rt_nr_running);
768 rt_rq->rt_nr_running--;
769
770 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
771 dec_rt_migration(rt_se, rt_rq);
772 dec_rt_group(rt_se, rt_rq);
635} 773}
636 774
637static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) 775static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
718 856
719 enqueue_rt_entity(rt_se); 857 enqueue_rt_entity(rt_se);
720 858
859 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
860 enqueue_pushable_task(rq, p);
861
721 inc_cpu_load(rq, p->se.load.weight); 862 inc_cpu_load(rq, p->se.load.weight);
722} 863}
723 864
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
728 update_curr_rt(rq); 869 update_curr_rt(rq);
729 dequeue_rt_entity(rt_se); 870 dequeue_rt_entity(rt_se);
730 871
872 dequeue_pushable_task(rq, p);
873
731 dec_cpu_load(rq, p->se.load.weight); 874 dec_cpu_load(rq, p->se.load.weight);
732} 875}
733 876
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
878 return next; 1021 return next;
879} 1022}
880 1023
881static struct task_struct *pick_next_task_rt(struct rq *rq) 1024static struct task_struct *_pick_next_task_rt(struct rq *rq)
882{ 1025{
883 struct sched_rt_entity *rt_se; 1026 struct sched_rt_entity *rt_se;
884 struct task_struct *p; 1027 struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
900 1043
901 p = rt_task_of(rt_se); 1044 p = rt_task_of(rt_se);
902 p->se.exec_start = rq->clock; 1045 p->se.exec_start = rq->clock;
1046
1047 return p;
1048}
1049
1050static struct task_struct *pick_next_task_rt(struct rq *rq)
1051{
1052 struct task_struct *p = _pick_next_task_rt(rq);
1053
1054 /* The running task is never eligible for pushing */
1055 if (p)
1056 dequeue_pushable_task(rq, p);
1057
903 return p; 1058 return p;
904} 1059}
905 1060
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
907{ 1062{
908 update_curr_rt(rq); 1063 update_curr_rt(rq);
909 p->se.exec_start = 0; 1064 p->se.exec_start = 0;
1065
1066 /*
1067 * The previous task needs to be made eligible for pushing
1068 * if it is still active
1069 */
1070 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1071 enqueue_pushable_task(rq, p);
910} 1072}
911 1073
912#ifdef CONFIG_SMP 1074#ifdef CONFIG_SMP
@@ -1072,7 +1234,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1072 } 1234 }
1073 1235
1074 /* If this rq is still suitable use it. */ 1236 /* If this rq is still suitable use it. */
1075 if (lowest_rq->rt.highest_prio > task->prio) 1237 if (lowest_rq->rt.highest_prio.curr > task->prio)
1076 break; 1238 break;
1077 1239
1078 /* try again */ 1240 /* try again */
@@ -1083,6 +1245,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1083 return lowest_rq; 1245 return lowest_rq;
1084} 1246}
1085 1247
1248static inline int has_pushable_tasks(struct rq *rq)
1249{
1250 return !plist_head_empty(&rq->rt.pushable_tasks);
1251}
1252
1253static struct task_struct *pick_next_pushable_task(struct rq *rq)
1254{
1255 struct task_struct *p;
1256
1257 if (!has_pushable_tasks(rq))
1258 return NULL;
1259
1260 p = plist_first_entry(&rq->rt.pushable_tasks,
1261 struct task_struct, pushable_tasks);
1262
1263 BUG_ON(rq->cpu != task_cpu(p));
1264 BUG_ON(task_current(rq, p));
1265 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1266
1267 BUG_ON(!p->se.on_rq);
1268 BUG_ON(!rt_task(p));
1269
1270 return p;
1271}
1272
1086/* 1273/*
1087 * If the current CPU has more than one RT task, see if the non 1274 * If the current CPU has more than one RT task, see if the non
1088 * running task can migrate over to a CPU that is running a task 1275 * running task can migrate over to a CPU that is running a task
@@ -1092,13 +1279,11 @@ static int push_rt_task(struct rq *rq)
1092{ 1279{
1093 struct task_struct *next_task; 1280 struct task_struct *next_task;
1094 struct rq *lowest_rq; 1281 struct rq *lowest_rq;
1095 int ret = 0;
1096 int paranoid = RT_MAX_TRIES;
1097 1282
1098 if (!rq->rt.overloaded) 1283 if (!rq->rt.overloaded)
1099 return 0; 1284 return 0;
1100 1285
1101 next_task = pick_next_highest_task_rt(rq, -1); 1286 next_task = pick_next_pushable_task(rq);
1102 if (!next_task) 1287 if (!next_task)
1103 return 0; 1288 return 0;
1104 1289
@@ -1127,16 +1312,34 @@ static int push_rt_task(struct rq *rq)
1127 struct task_struct *task; 1312 struct task_struct *task;
1128 /* 1313 /*
1129 * find lock_lowest_rq releases rq->lock 1314 * find lock_lowest_rq releases rq->lock
1130 * so it is possible that next_task has changed. 1315 * so it is possible that next_task has migrated.
1131 * If it has, then try again. 1316 *
1317 * We need to make sure that the task is still on the same
1318 * run-queue and is also still the next task eligible for
1319 * pushing.
1132 */ 1320 */
1133 task = pick_next_highest_task_rt(rq, -1); 1321 task = pick_next_pushable_task(rq);
1134 if (unlikely(task != next_task) && task && paranoid--) { 1322 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1135 put_task_struct(next_task); 1323 /*
1136 next_task = task; 1324 * If we get here, the task hasnt moved at all, but
1137 goto retry; 1325 * it has failed to push. We will not try again,
1326 * since the other cpus will pull from us when they
1327 * are ready.
1328 */
1329 dequeue_pushable_task(rq, next_task);
1330 goto out;
1138 } 1331 }
1139 goto out; 1332
1333 if (!task)
1334 /* No more tasks, just exit */
1335 goto out;
1336
1337 /*
1338 * Something has shifted, try again.
1339 */
1340 put_task_struct(next_task);
1341 next_task = task;
1342 goto retry;
1140 } 1343 }
1141 1344
1142 deactivate_task(rq, next_task, 0); 1345 deactivate_task(rq, next_task, 0);
@@ -1147,23 +1350,12 @@ static int push_rt_task(struct rq *rq)
1147 1350
1148 double_unlock_balance(rq, lowest_rq); 1351 double_unlock_balance(rq, lowest_rq);
1149 1352
1150 ret = 1;
1151out: 1353out:
1152 put_task_struct(next_task); 1354 put_task_struct(next_task);
1153 1355
1154 return ret; 1356 return 1;
1155} 1357}
1156 1358
1157/*
1158 * TODO: Currently we just use the second highest prio task on
1159 * the queue, and stop when it can't migrate (or there's
1160 * no more RT tasks). There may be a case where a lower
1161 * priority RT task has a different affinity than the
1162 * higher RT task. In this case the lower RT task could
1163 * possibly be able to migrate where as the higher priority
1164 * RT task could not. We currently ignore this issue.
1165 * Enhancements are welcome!
1166 */
1167static void push_rt_tasks(struct rq *rq) 1359static void push_rt_tasks(struct rq *rq)
1168{ 1360{
1169 /* push_rt_task will return true if it moved an RT */ 1361 /* push_rt_task will return true if it moved an RT */
@@ -1174,33 +1366,35 @@ static void push_rt_tasks(struct rq *rq)
1174static int pull_rt_task(struct rq *this_rq) 1366static int pull_rt_task(struct rq *this_rq)
1175{ 1367{
1176 int this_cpu = this_rq->cpu, ret = 0, cpu; 1368 int this_cpu = this_rq->cpu, ret = 0, cpu;
1177 struct task_struct *p, *next; 1369 struct task_struct *p;
1178 struct rq *src_rq; 1370 struct rq *src_rq;
1179 1371
1180 if (likely(!rt_overloaded(this_rq))) 1372 if (likely(!rt_overloaded(this_rq)))
1181 return 0; 1373 return 0;
1182 1374
1183 next = pick_next_task_rt(this_rq);
1184
1185 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1375 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1186 if (this_cpu == cpu) 1376 if (this_cpu == cpu)
1187 continue; 1377 continue;
1188 1378
1189 src_rq = cpu_rq(cpu); 1379 src_rq = cpu_rq(cpu);
1380
1381 /*
1382 * Don't bother taking the src_rq->lock if the next highest
1383 * task is known to be lower-priority than our current task.
1384 * This may look racy, but if this value is about to go
1385 * logically higher, the src_rq will push this task away.
1386 * And if its going logically lower, we do not care
1387 */
1388 if (src_rq->rt.highest_prio.next >=
1389 this_rq->rt.highest_prio.curr)
1390 continue;
1391
1190 /* 1392 /*
1191 * We can potentially drop this_rq's lock in 1393 * We can potentially drop this_rq's lock in
1192 * double_lock_balance, and another CPU could 1394 * double_lock_balance, and another CPU could
1193 * steal our next task - hence we must cause 1395 * alter this_rq
1194 * the caller to recalculate the next task
1195 * in that case:
1196 */ 1396 */
1197 if (double_lock_balance(this_rq, src_rq)) { 1397 double_lock_balance(this_rq, src_rq);
1198 struct task_struct *old_next = next;
1199
1200 next = pick_next_task_rt(this_rq);
1201 if (next != old_next)
1202 ret = 1;
1203 }
1204 1398
1205 /* 1399 /*
1206 * Are there still pullable RT tasks? 1400 * Are there still pullable RT tasks?
@@ -1214,7 +1408,7 @@ static int pull_rt_task(struct rq *this_rq)
1214 * Do we have an RT task that preempts 1408 * Do we have an RT task that preempts
1215 * the to-be-scheduled task? 1409 * the to-be-scheduled task?
1216 */ 1410 */
1217 if (p && (!next || (p->prio < next->prio))) { 1411 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1218 WARN_ON(p == src_rq->curr); 1412 WARN_ON(p == src_rq->curr);
1219 WARN_ON(!p->se.on_rq); 1413 WARN_ON(!p->se.on_rq);
1220 1414
@@ -1224,12 +1418,9 @@ static int pull_rt_task(struct rq *this_rq)
1224 * This is just that p is wakeing up and hasn't 1418 * This is just that p is wakeing up and hasn't
1225 * had a chance to schedule. We only pull 1419 * had a chance to schedule. We only pull
1226 * p if it is lower in priority than the 1420 * p if it is lower in priority than the
1227 * current task on the run queue or 1421 * current task on the run queue
1228 * this_rq next task is lower in prio than
1229 * the current task on that rq.
1230 */ 1422 */
1231 if (p->prio < src_rq->curr->prio || 1423 if (p->prio < src_rq->curr->prio)
1232 (next && next->prio < src_rq->curr->prio))
1233 goto skip; 1424 goto skip;
1234 1425
1235 ret = 1; 1426 ret = 1;
@@ -1242,13 +1433,7 @@ static int pull_rt_task(struct rq *this_rq)
1242 * case there's an even higher prio task 1433 * case there's an even higher prio task
1243 * in another runqueue. (low likelyhood 1434 * in another runqueue. (low likelyhood
1244 * but possible) 1435 * but possible)
1245 *
1246 * Update next so that we won't pick a task
1247 * on another cpu with a priority lower (or equal)
1248 * than the one we just picked.
1249 */ 1436 */
1250 next = p;
1251
1252 } 1437 }
1253 skip: 1438 skip:
1254 double_unlock_balance(this_rq, src_rq); 1439 double_unlock_balance(this_rq, src_rq);
@@ -1260,24 +1445,27 @@ static int pull_rt_task(struct rq *this_rq)
1260static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1445static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1261{ 1446{
1262 /* Try to pull RT tasks here if we lower this rq's prio */ 1447 /* Try to pull RT tasks here if we lower this rq's prio */
1263 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1448 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1264 pull_rt_task(rq); 1449 pull_rt_task(rq);
1265} 1450}
1266 1451
1452/*
1453 * assumes rq->lock is held
1454 */
1455static int needs_post_schedule_rt(struct rq *rq)
1456{
1457 return has_pushable_tasks(rq);
1458}
1459
1267static void post_schedule_rt(struct rq *rq) 1460static void post_schedule_rt(struct rq *rq)
1268{ 1461{
1269 /* 1462 /*
1270 * If we have more than one rt_task queued, then 1463 * This is only called if needs_post_schedule_rt() indicates that
1271 * see if we can push the other rt_tasks off to other CPUS. 1464 * we need to push tasks away
1272 * Note we may release the rq lock, and since
1273 * the lock was owned by prev, we need to release it
1274 * first via finish_lock_switch and then reaquire it here.
1275 */ 1465 */
1276 if (unlikely(rq->rt.overloaded)) { 1466 spin_lock_irq(&rq->lock);
1277 spin_lock_irq(&rq->lock); 1467 push_rt_tasks(rq);
1278 push_rt_tasks(rq); 1468 spin_unlock_irq(&rq->lock);
1279 spin_unlock_irq(&rq->lock);
1280 }
1281} 1469}
1282 1470
1283/* 1471/*
@@ -1288,7 +1476,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1288{ 1476{
1289 if (!task_running(rq, p) && 1477 if (!task_running(rq, p) &&
1290 !test_tsk_need_resched(rq->curr) && 1478 !test_tsk_need_resched(rq->curr) &&
1291 rq->rt.overloaded) 1479 has_pushable_tasks(rq) &&
1480 p->rt.nr_cpus_allowed > 1)
1292 push_rt_tasks(rq); 1481 push_rt_tasks(rq);
1293} 1482}
1294 1483
@@ -1324,6 +1513,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1324 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1513 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1325 struct rq *rq = task_rq(p); 1514 struct rq *rq = task_rq(p);
1326 1515
1516 if (!task_current(rq, p)) {
1517 /*
1518 * Make sure we dequeue this task from the pushable list
1519 * before going further. It will either remain off of
1520 * the list because we are no longer pushable, or it
1521 * will be requeued.
1522 */
1523 if (p->rt.nr_cpus_allowed > 1)
1524 dequeue_pushable_task(rq, p);
1525
1526 /*
1527 * Requeue if our weight is changing and still > 1
1528 */
1529 if (weight > 1)
1530 enqueue_pushable_task(rq, p);
1531
1532 }
1533
1327 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1534 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1328 rq->rt.rt_nr_migratory++; 1535 rq->rt.rt_nr_migratory++;
1329 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1536 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1331,7 +1538,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1331 rq->rt.rt_nr_migratory--; 1538 rq->rt.rt_nr_migratory--;
1332 } 1539 }
1333 1540
1334 update_rt_migration(rq); 1541 update_rt_migration(&rq->rt);
1335 } 1542 }
1336 1543
1337 cpumask_copy(&p->cpus_allowed, new_mask); 1544 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1346,7 +1553,7 @@ static void rq_online_rt(struct rq *rq)
1346 1553
1347 __enable_runtime(rq); 1554 __enable_runtime(rq);
1348 1555
1349 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1556 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1350} 1557}
1351 1558
1352/* Assumes rq->lock is held */ 1559/* Assumes rq->lock is held */
@@ -1438,7 +1645,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1438 * can release the rq lock and p could migrate. 1645 * can release the rq lock and p could migrate.
1439 * Only reschedule if p is still on the same runqueue. 1646 * Only reschedule if p is still on the same runqueue.
1440 */ 1647 */
1441 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1648 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1442 resched_task(p); 1649 resched_task(p);
1443#else 1650#else
1444 /* For UP simply resched on drop of prio */ 1651 /* For UP simply resched on drop of prio */
@@ -1509,6 +1716,9 @@ static void set_curr_task_rt(struct rq *rq)
1509 struct task_struct *p = rq->curr; 1716 struct task_struct *p = rq->curr;
1510 1717
1511 p->se.exec_start = rq->clock; 1718 p->se.exec_start = rq->clock;
1719
1720 /* The running task is never eligible for pushing */
1721 dequeue_pushable_task(rq, p);
1512} 1722}
1513 1723
1514static const struct sched_class rt_sched_class = { 1724static const struct sched_class rt_sched_class = {
@@ -1531,6 +1741,7 @@ static const struct sched_class rt_sched_class = {
1531 .rq_online = rq_online_rt, 1741 .rq_online = rq_online_rt,
1532 .rq_offline = rq_offline_rt, 1742 .rq_offline = rq_offline_rt,
1533 .pre_schedule = pre_schedule_rt, 1743 .pre_schedule = pre_schedule_rt,
1744 .needs_post_schedule = needs_post_schedule_rt,
1534 .post_schedule = post_schedule_rt, 1745 .post_schedule = post_schedule_rt,
1535 .task_wake_up = task_wake_up_rt, 1746 .task_wake_up = task_wake_up_rt,
1536 .switched_from = switched_from_rt, 1747 .switched_from = switched_from_rt,
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a8f93dd374e..32d2bd4061b 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -4,7 +4,7 @@
4 * bump this up when changing the output format or the meaning of an existing 4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort) 5 * format, so that tools can adapt (or abort)
6 */ 6 */
7#define SCHEDSTAT_VERSION 14 7#define SCHEDSTAT_VERSION 15
8 8
9static int show_schedstat(struct seq_file *seq, void *v) 9static int show_schedstat(struct seq_file *seq, void *v)
10{ 10{
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
26 26
27 /* runqueue-specific stats */ 27 /* runqueue-specific stats */
28 seq_printf(seq, 28 seq_printf(seq,
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu", 29 "cpu%d %u %u %u %u %u %u %llu %llu %lu",
30 cpu, rq->yld_both_empty, 30 cpu, rq->yld_count,
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle, 31 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local, 32 rq->ttwu_count, rq->ttwu_local,
34 rq->rq_cpu_time, 33 rq->rq_cpu_time,
diff --git a/lib/Kconfig b/lib/Kconfig
index cea9e30a88f..54aaf4feaf6 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -136,12 +136,6 @@ config TEXTSEARCH_BM
136config TEXTSEARCH_FSM 136config TEXTSEARCH_FSM
137 tristate 137 tristate
138 138
139#
140# plist support is select#ed if needed
141#
142config PLIST
143 boolean
144
145config HAS_IOMEM 139config HAS_IOMEM
146 boolean 140 boolean
147 depends on !NO_IOMEM 141 depends on !NO_IOMEM
diff --git a/lib/Makefile b/lib/Makefile
index 0dd9229fab7..8bdc647e6d6 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o \ 11 rbtree.o radix-tree.o dump_stack.o \
12 idr.o int_sqrt.o extable.o prio_tree.o \ 12 idr.o int_sqrt.o extable.o prio_tree.o \
13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o irq_regs.o reciprocal_div.o argv_split.o \
14 proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o 14 proportions.o prio_heap.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o
15 16
16lib-$(CONFIG_MMU) += ioremap.o 17lib-$(CONFIG_MMU) += ioremap.o
17lib-$(CONFIG_SMP) += cpumask.o 18lib-$(CONFIG_SMP) += cpumask.o
@@ -40,7 +41,6 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
40lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o 41lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
41obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o 42obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
42obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 43obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
43obj-$(CONFIG_PLIST) += plist.o
44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o 44obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
45obj-$(CONFIG_DEBUG_LIST) += list_debug.o 45obj-$(CONFIG_DEBUG_LIST) += list_debug.o
46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o 46obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 01a3c22c1b5..39f1029e352 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
39int __lockfunc __reacquire_kernel_lock(void) 39int __lockfunc __reacquire_kernel_lock(void)
40{ 40{
41 while (!_raw_spin_trylock(&kernel_flag)) { 41 while (!_raw_spin_trylock(&kernel_flag)) {
42 if (test_thread_flag(TIF_NEED_RESCHED)) 42 if (need_resched())
43 return -EAGAIN; 43 return -EAGAIN;
44 cpu_relax(); 44 cpu_relax();
45 } 45 }