aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-11-15 11:14:39 -0500
committerIngo Molnar <mingo@elte.hu>2011-11-17 06:20:22 -0500
commit391e43da797a96aeb65410281891f6d0b0e9611c (patch)
tree0ce6784525a5a8f75b377170cf1a7d60abccea29 /kernel/sched
parent029632fbb7b7c9d85063cc9eb470de6c54873df3 (diff)
sched: Move all scheduler bits into kernel/sched/
There's too many sched*.[ch] files in kernel/, give them their own directory. (No code changed, other than Makefile glue added.) Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/Makefile20
-rw-r--r--kernel/sched/auto_group.c258
-rw-r--r--kernel/sched/auto_group.h64
-rw-r--r--kernel/sched/clock.c350
-rw-r--r--kernel/sched/core.c8101
-rw-r--r--kernel/sched/cpupri.c241
-rw-r--r--kernel/sched/cpupri.h34
-rw-r--r--kernel/sched/debug.c510
-rw-r--r--kernel/sched/fair.c5601
-rw-r--r--kernel/sched/features.h70
-rw-r--r--kernel/sched/idle_task.c99
-rw-r--r--kernel/sched/rt.c2045
-rw-r--r--kernel/sched/sched.h1064
-rw-r--r--kernel/sched/stats.c111
-rw-r--r--kernel/sched/stats.h233
-rw-r--r--kernel/sched/stop_task.c108
16 files changed, 18909 insertions, 0 deletions
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
new file mode 100644
index 000000000000..9a7dd35102a3
--- /dev/null
+++ b/kernel/sched/Makefile
@@ -0,0 +1,20 @@
1ifdef CONFIG_FUNCTION_TRACER
2CFLAGS_REMOVE_clock.o = -pg
3endif
4
5ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
6# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
7# needed for x86 only. Why this used to be enabled for all architectures is beyond
8# me. I suspect most platforms don't need this, but until we know that for sure
9# I turn this off for IA-64 only. Andreas Schwab says it's also needed on m68k
10# to get a correct value for the wait-channel (WCHAN in ps). --davidm
11CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
12endif
13
14obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o
15obj-$(CONFIG_SMP) += cpupri.o
16obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
17obj-$(CONFIG_SCHEDSTATS) += stats.o
18obj-$(CONFIG_SCHED_DEBUG) += debug.o
19
20
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
new file mode 100644
index 000000000000..e8a1f83ee0e7
--- /dev/null
+++ b/kernel/sched/auto_group.c
@@ -0,0 +1,258 @@
1#ifdef CONFIG_SCHED_AUTOGROUP
2
3#include "sched.h"
4
5#include <linux/proc_fs.h>
6#include <linux/seq_file.h>
7#include <linux/kallsyms.h>
8#include <linux/utsname.h>
9#include <linux/security.h>
10#include <linux/export.h>
11
12unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
13static struct autogroup autogroup_default;
14static atomic_t autogroup_seq_nr;
15
16void __init autogroup_init(struct task_struct *init_task)
17{
18 autogroup_default.tg = &root_task_group;
19 kref_init(&autogroup_default.kref);
20 init_rwsem(&autogroup_default.lock);
21 init_task->signal->autogroup = &autogroup_default;
22}
23
24void autogroup_free(struct task_group *tg)
25{
26 kfree(tg->autogroup);
27}
28
29static inline void autogroup_destroy(struct kref *kref)
30{
31 struct autogroup *ag = container_of(kref, struct autogroup, kref);
32
33#ifdef CONFIG_RT_GROUP_SCHED
34 /* We've redirected RT tasks to the root task group... */
35 ag->tg->rt_se = NULL;
36 ag->tg->rt_rq = NULL;
37#endif
38 sched_destroy_group(ag->tg);
39}
40
41static inline void autogroup_kref_put(struct autogroup *ag)
42{
43 kref_put(&ag->kref, autogroup_destroy);
44}
45
46static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
47{
48 kref_get(&ag->kref);
49 return ag;
50}
51
52static inline struct autogroup *autogroup_task_get(struct task_struct *p)
53{
54 struct autogroup *ag;
55 unsigned long flags;
56
57 if (!lock_task_sighand(p, &flags))
58 return autogroup_kref_get(&autogroup_default);
59
60 ag = autogroup_kref_get(p->signal->autogroup);
61 unlock_task_sighand(p, &flags);
62
63 return ag;
64}
65
66static inline struct autogroup *autogroup_create(void)
67{
68 struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
69 struct task_group *tg;
70
71 if (!ag)
72 goto out_fail;
73
74 tg = sched_create_group(&root_task_group);
75
76 if (IS_ERR(tg))
77 goto out_free;
78
79 kref_init(&ag->kref);
80 init_rwsem(&ag->lock);
81 ag->id = atomic_inc_return(&autogroup_seq_nr);
82 ag->tg = tg;
83#ifdef CONFIG_RT_GROUP_SCHED
84 /*
85 * Autogroup RT tasks are redirected to the root task group
86 * so we don't have to move tasks around upon policy change,
87 * or flail around trying to allocate bandwidth on the fly.
88 * A bandwidth exception in __sched_setscheduler() allows
89 * the policy change to proceed. Thereafter, task_group()
90 * returns &root_task_group, so zero bandwidth is required.
91 */
92 free_rt_sched_group(tg);
93 tg->rt_se = root_task_group.rt_se;
94 tg->rt_rq = root_task_group.rt_rq;
95#endif
96 tg->autogroup = ag;
97
98 return ag;
99
100out_free:
101 kfree(ag);
102out_fail:
103 if (printk_ratelimit()) {
104 printk(KERN_WARNING "autogroup_create: %s failure.\n",
105 ag ? "sched_create_group()" : "kmalloc()");
106 }
107
108 return autogroup_kref_get(&autogroup_default);
109}
110
111bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
112{
113 if (tg != &root_task_group)
114 return false;
115
116 if (p->sched_class != &fair_sched_class)
117 return false;
118
119 /*
120 * We can only assume the task group can't go away on us if
121 * autogroup_move_group() can see us on ->thread_group list.
122 */
123 if (p->flags & PF_EXITING)
124 return false;
125
126 return true;
127}
128
129static void
130autogroup_move_group(struct task_struct *p, struct autogroup *ag)
131{
132 struct autogroup *prev;
133 struct task_struct *t;
134 unsigned long flags;
135
136 BUG_ON(!lock_task_sighand(p, &flags));
137
138 prev = p->signal->autogroup;
139 if (prev == ag) {
140 unlock_task_sighand(p, &flags);
141 return;
142 }
143
144 p->signal->autogroup = autogroup_kref_get(ag);
145
146 if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
147 goto out;
148
149 t = p;
150 do {
151 sched_move_task(t);
152 } while_each_thread(p, t);
153
154out:
155 unlock_task_sighand(p, &flags);
156 autogroup_kref_put(prev);
157}
158
159/* Allocates GFP_KERNEL, cannot be called under any spinlock */
160void sched_autogroup_create_attach(struct task_struct *p)
161{
162 struct autogroup *ag = autogroup_create();
163
164 autogroup_move_group(p, ag);
165 /* drop extra reference added by autogroup_create() */
166 autogroup_kref_put(ag);
167}
168EXPORT_SYMBOL(sched_autogroup_create_attach);
169
170/* Cannot be called under siglock. Currently has no users */
171void sched_autogroup_detach(struct task_struct *p)
172{
173 autogroup_move_group(p, &autogroup_default);
174}
175EXPORT_SYMBOL(sched_autogroup_detach);
176
177void sched_autogroup_fork(struct signal_struct *sig)
178{
179 sig->autogroup = autogroup_task_get(current);
180}
181
182void sched_autogroup_exit(struct signal_struct *sig)
183{
184 autogroup_kref_put(sig->autogroup);
185}
186
187static int __init setup_autogroup(char *str)
188{
189 sysctl_sched_autogroup_enabled = 0;
190
191 return 1;
192}
193
194__setup("noautogroup", setup_autogroup);
195
196#ifdef CONFIG_PROC_FS
197
198int proc_sched_autogroup_set_nice(struct task_struct *p, int *nice)
199{
200 static unsigned long next = INITIAL_JIFFIES;
201 struct autogroup *ag;
202 int err;
203
204 if (*nice < -20 || *nice > 19)
205 return -EINVAL;
206
207 err = security_task_setnice(current, *nice);
208 if (err)
209 return err;
210
211 if (*nice < 0 && !can_nice(current, *nice))
212 return -EPERM;
213
214 /* this is a heavy operation taking global locks.. */
215 if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
216 return -EAGAIN;
217
218 next = HZ / 10 + jiffies;
219 ag = autogroup_task_get(p);
220
221 down_write(&ag->lock);
222 err = sched_group_set_shares(ag->tg, prio_to_weight[*nice + 20]);
223 if (!err)
224 ag->nice = *nice;
225 up_write(&ag->lock);
226
227 autogroup_kref_put(ag);
228
229 return err;
230}
231
232void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
233{
234 struct autogroup *ag = autogroup_task_get(p);
235
236 if (!task_group_is_autogroup(ag->tg))
237 goto out;
238
239 down_read(&ag->lock);
240 seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
241 up_read(&ag->lock);
242
243out:
244 autogroup_kref_put(ag);
245}
246#endif /* CONFIG_PROC_FS */
247
248#ifdef CONFIG_SCHED_DEBUG
249int autogroup_path(struct task_group *tg, char *buf, int buflen)
250{
251 if (!task_group_is_autogroup(tg))
252 return 0;
253
254 return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
255}
256#endif /* CONFIG_SCHED_DEBUG */
257
258#endif /* CONFIG_SCHED_AUTOGROUP */
diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h
new file mode 100644
index 000000000000..8bd047142816
--- /dev/null
+++ b/kernel/sched/auto_group.h
@@ -0,0 +1,64 @@
1#ifdef CONFIG_SCHED_AUTOGROUP
2
3#include <linux/kref.h>
4#include <linux/rwsem.h>
5
6struct autogroup {
7 /*
8 * reference doesn't mean how many thread attach to this
9 * autogroup now. It just stands for the number of task
10 * could use this autogroup.
11 */
12 struct kref kref;
13 struct task_group *tg;
14 struct rw_semaphore lock;
15 unsigned long id;
16 int nice;
17};
18
19extern void autogroup_init(struct task_struct *init_task);
20extern void autogroup_free(struct task_group *tg);
21
22static inline bool task_group_is_autogroup(struct task_group *tg)
23{
24 return !!tg->autogroup;
25}
26
27extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
28
29static inline struct task_group *
30autogroup_task_group(struct task_struct *p, struct task_group *tg)
31{
32 int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
33
34 if (enabled && task_wants_autogroup(p, tg))
35 return p->signal->autogroup->tg;
36
37 return tg;
38}
39
40extern int autogroup_path(struct task_group *tg, char *buf, int buflen);
41
42#else /* !CONFIG_SCHED_AUTOGROUP */
43
44static inline void autogroup_init(struct task_struct *init_task) { }
45static inline void autogroup_free(struct task_group *tg) { }
46static inline bool task_group_is_autogroup(struct task_group *tg)
47{
48 return 0;
49}
50
51static inline struct task_group *
52autogroup_task_group(struct task_struct *p, struct task_group *tg)
53{
54 return tg;
55}
56
57#ifdef CONFIG_SCHED_DEBUG
58static inline int autogroup_path(struct task_group *tg, char *buf, int buflen)
59{
60 return 0;
61}
62#endif
63
64#endif /* CONFIG_SCHED_AUTOGROUP */
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
new file mode 100644
index 000000000000..c685e31492df
--- /dev/null
+++ b/kernel/sched/clock.c
@@ -0,0 +1,350 @@
1/*
2 * sched_clock for unstable cpu clocks
3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 *
6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
8 *
9 * Based on code by:
10 * Ingo Molnar <mingo@redhat.com>
11 * Guillaume Chazarain <guichaz@gmail.com>
12 *
13 *
14 * What:
15 *
16 * cpu_clock(i) provides a fast (execution time) high resolution
17 * clock with bounded drift between CPUs. The value of cpu_clock(i)
18 * is monotonic for constant i. The timestamp returned is in nanoseconds.
19 *
20 * ######################### BIG FAT WARNING ##########################
21 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
22 * # go backwards !! #
23 * ####################################################################
24 *
25 * There is no strict promise about the base, although it tends to start
26 * at 0 on boot (but people really shouldn't rely on that).
27 *
28 * cpu_clock(i) -- can be used from any context, including NMI.
29 * sched_clock_cpu(i) -- must be used with local IRQs disabled (implied by NMI)
30 * local_clock() -- is cpu_clock() on the current cpu.
31 *
32 * How:
33 *
34 * The implementation either uses sched_clock() when
35 * !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
36 * sched_clock() is assumed to provide these properties (mostly it means
37 * the architecture provides a globally synchronized highres time source).
38 *
39 * Otherwise it tries to create a semi stable clock from a mixture of other
40 * clocks, including:
41 *
42 * - GTOD (clock monotomic)
43 * - sched_clock()
44 * - explicit idle events
45 *
46 * We use GTOD as base and use sched_clock() deltas to improve resolution. The
47 * deltas are filtered to provide monotonicity and keeping it within an
48 * expected window.
49 *
50 * Furthermore, explicit sleep and wakeup hooks allow us to account for time
51 * that is otherwise invisible (TSC gets stopped).
52 *
53 *
54 * Notes:
55 *
56 * The !IRQ-safetly of sched_clock() and sched_clock_cpu() comes from things
57 * like cpufreq interrupts that can change the base clock (TSC) multiplier
58 * and cause funny jumps in time -- although the filtering provided by
59 * sched_clock_cpu() should mitigate serious artifacts we cannot rely on it
60 * in general since for !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK we fully rely on
61 * sched_clock().
62 */
63#include <linux/spinlock.h>
64#include <linux/hardirq.h>
65#include <linux/export.h>
66#include <linux/percpu.h>
67#include <linux/ktime.h>
68#include <linux/sched.h>
69
70/*
71 * Scheduler clock - returns current time in nanosec units.
72 * This is default implementation.
73 * Architectures and sub-architectures can override this.
74 */
75unsigned long long __attribute__((weak)) sched_clock(void)
76{
77 return (unsigned long long)(jiffies - INITIAL_JIFFIES)
78 * (NSEC_PER_SEC / HZ);
79}
80EXPORT_SYMBOL_GPL(sched_clock);
81
82__read_mostly int sched_clock_running;
83
84#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
85__read_mostly int sched_clock_stable;
86
87struct sched_clock_data {
88 u64 tick_raw;
89 u64 tick_gtod;
90 u64 clock;
91};
92
93static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
94
95static inline struct sched_clock_data *this_scd(void)
96{
97 return &__get_cpu_var(sched_clock_data);
98}
99
100static inline struct sched_clock_data *cpu_sdc(int cpu)
101{
102 return &per_cpu(sched_clock_data, cpu);
103}
104
105void sched_clock_init(void)
106{
107 u64 ktime_now = ktime_to_ns(ktime_get());
108 int cpu;
109
110 for_each_possible_cpu(cpu) {
111 struct sched_clock_data *scd = cpu_sdc(cpu);
112
113 scd->tick_raw = 0;
114 scd->tick_gtod = ktime_now;
115 scd->clock = ktime_now;
116 }
117
118 sched_clock_running = 1;
119}
120
121/*
122 * min, max except they take wrapping into account
123 */
124
125static inline u64 wrap_min(u64 x, u64 y)
126{
127 return (s64)(x - y) < 0 ? x : y;
128}
129
130static inline u64 wrap_max(u64 x, u64 y)
131{
132 return (s64)(x - y) > 0 ? x : y;
133}
134
135/*
136 * update the percpu scd from the raw @now value
137 *
138 * - filter out backward motion
139 * - use the GTOD tick value to create a window to filter crazy TSC values
140 */
141static u64 sched_clock_local(struct sched_clock_data *scd)
142{
143 u64 now, clock, old_clock, min_clock, max_clock;
144 s64 delta;
145
146again:
147 now = sched_clock();
148 delta = now - scd->tick_raw;
149 if (unlikely(delta < 0))
150 delta = 0;
151
152 old_clock = scd->clock;
153
154 /*
155 * scd->clock = clamp(scd->tick_gtod + delta,
156 * max(scd->tick_gtod, scd->clock),
157 * scd->tick_gtod + TICK_NSEC);
158 */
159
160 clock = scd->tick_gtod + delta;
161 min_clock = wrap_max(scd->tick_gtod, old_clock);
162 max_clock = wrap_max(old_clock, scd->tick_gtod + TICK_NSEC);
163
164 clock = wrap_max(clock, min_clock);
165 clock = wrap_min(clock, max_clock);
166
167 if (cmpxchg64(&scd->clock, old_clock, clock) != old_clock)
168 goto again;
169
170 return clock;
171}
172
173static u64 sched_clock_remote(struct sched_clock_data *scd)
174{
175 struct sched_clock_data *my_scd = this_scd();
176 u64 this_clock, remote_clock;
177 u64 *ptr, old_val, val;
178
179 sched_clock_local(my_scd);
180again:
181 this_clock = my_scd->clock;
182 remote_clock = scd->clock;
183
184 /*
185 * Use the opportunity that we have both locks
186 * taken to couple the two clocks: we take the
187 * larger time as the latest time for both
188 * runqueues. (this creates monotonic movement)
189 */
190 if (likely((s64)(remote_clock - this_clock) < 0)) {
191 ptr = &scd->clock;
192 old_val = remote_clock;
193 val = this_clock;
194 } else {
195 /*
196 * Should be rare, but possible:
197 */
198 ptr = &my_scd->clock;
199 old_val = this_clock;
200 val = remote_clock;
201 }
202
203 if (cmpxchg64(ptr, old_val, val) != old_val)
204 goto again;
205
206 return val;
207}
208
209/*
210 * Similar to cpu_clock(), but requires local IRQs to be disabled.
211 *
212 * See cpu_clock().
213 */
214u64 sched_clock_cpu(int cpu)
215{
216 struct sched_clock_data *scd;
217 u64 clock;
218
219 WARN_ON_ONCE(!irqs_disabled());
220
221 if (sched_clock_stable)
222 return sched_clock();
223
224 if (unlikely(!sched_clock_running))
225 return 0ull;
226
227 scd = cpu_sdc(cpu);
228
229 if (cpu != smp_processor_id())
230 clock = sched_clock_remote(scd);
231 else
232 clock = sched_clock_local(scd);
233
234 return clock;
235}
236
237void sched_clock_tick(void)
238{
239 struct sched_clock_data *scd;
240 u64 now, now_gtod;
241
242 if (sched_clock_stable)
243 return;
244
245 if (unlikely(!sched_clock_running))
246 return;
247
248 WARN_ON_ONCE(!irqs_disabled());
249
250 scd = this_scd();
251 now_gtod = ktime_to_ns(ktime_get());
252 now = sched_clock();
253
254 scd->tick_raw = now;
255 scd->tick_gtod = now_gtod;
256 sched_clock_local(scd);
257}
258
259/*
260 * We are going deep-idle (irqs are disabled):
261 */
262void sched_clock_idle_sleep_event(void)
263{
264 sched_clock_cpu(smp_processor_id());
265}
266EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event);
267
268/*
269 * We just idled delta nanoseconds (called with irqs disabled):
270 */
271void sched_clock_idle_wakeup_event(u64 delta_ns)
272{
273 if (timekeeping_suspended)
274 return;
275
276 sched_clock_tick();
277 touch_softlockup_watchdog();
278}
279EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
280
281/*
282 * As outlined at the top, provides a fast, high resolution, nanosecond
283 * time source that is monotonic per cpu argument and has bounded drift
284 * between cpus.
285 *
286 * ######################### BIG FAT WARNING ##########################
287 * # when comparing cpu_clock(i) to cpu_clock(j) for i != j, time can #
288 * # go backwards !! #
289 * ####################################################################
290 */
291u64 cpu_clock(int cpu)
292{
293 u64 clock;
294 unsigned long flags;
295
296 local_irq_save(flags);
297 clock = sched_clock_cpu(cpu);
298 local_irq_restore(flags);
299
300 return clock;
301}
302
303/*
304 * Similar to cpu_clock() for the current cpu. Time will only be observed
305 * to be monotonic if care is taken to only compare timestampt taken on the
306 * same CPU.
307 *
308 * See cpu_clock().
309 */
310u64 local_clock(void)
311{
312 u64 clock;
313 unsigned long flags;
314
315 local_irq_save(flags);
316 clock = sched_clock_cpu(smp_processor_id());
317 local_irq_restore(flags);
318
319 return clock;
320}
321
322#else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
323
324void sched_clock_init(void)
325{
326 sched_clock_running = 1;
327}
328
329u64 sched_clock_cpu(int cpu)
330{
331 if (unlikely(!sched_clock_running))
332 return 0;
333
334 return sched_clock();
335}
336
337u64 cpu_clock(int cpu)
338{
339 return sched_clock_cpu(cpu);
340}
341
342u64 local_clock(void)
343{
344 return sched_clock_cpu(0);
345}
346
347#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
348
349EXPORT_SYMBOL_GPL(cpu_clock);
350EXPORT_SYMBOL_GPL(local_clock);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
new file mode 100644
index 000000000000..ca8fd44145ac
--- /dev/null
+++ b/kernel/sched/core.c
@@ -0,0 +1,8101 @@
1/*
2 * kernel/sched/core.c
3 *
4 * Kernel scheduler and related syscalls
5 *
6 * Copyright (C) 1991-2002 Linus Torvalds
7 *
8 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
9 * make semaphores SMP safe
10 * 1998-11-19 Implemented schedule_timeout() and related stuff
11 * by Andrea Arcangeli
12 * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
13 * hybrid priority-list and round-robin design with
14 * an array-switch method of distributing timeslices
15 * and per-CPU runqueues. Cleanups and useful suggestions
16 * by Davide Libenzi, preemptible kernel bits by Robert Love.
17 * 2003-09-03 Interactivity tuning by Con Kolivas.
18 * 2004-04-02 Scheduler domains code by Nick Piggin
19 * 2007-04-15 Work begun on replacing all interactivity tuning with a
20 * fair scheduling design by Con Kolivas.
21 * 2007-05-05 Load balancing (smp-nice) and other improvements
22 * by Peter Williams
23 * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
24 * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
25 * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
26 * Thomas Gleixner, Mike Kravetz
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/nmi.h>
32#include <linux/init.h>
33#include <linux/uaccess.h>
34#include <linux/highmem.h>
35#include <asm/mmu_context.h>
36#include <linux/interrupt.h>
37#include <linux/capability.h>
38#include <linux/completion.h>
39#include <linux/kernel_stat.h>
40#include <linux/debug_locks.h>
41#include <linux/perf_event.h>
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/profile.h>
45#include <linux/freezer.h>
46#include <linux/vmalloc.h>
47#include <linux/blkdev.h>
48#include <linux/delay.h>
49#include <linux/pid_namespace.h>
50#include <linux/smp.h>
51#include <linux/threads.h>
52#include <linux/timer.h>
53#include <linux/rcupdate.h>
54#include <linux/cpu.h>
55#include <linux/cpuset.h>
56#include <linux/percpu.h>
57#include <linux/proc_fs.h>
58#include <linux/seq_file.h>
59#include <linux/sysctl.h>
60#include <linux/syscalls.h>
61#include <linux/times.h>
62#include <linux/tsacct_kern.h>
63#include <linux/kprobes.h>
64#include <linux/delayacct.h>
65#include <linux/unistd.h>
66#include <linux/pagemap.h>
67#include <linux/hrtimer.h>
68#include <linux/tick.h>
69#include <linux/debugfs.h>
70#include <linux/ctype.h>
71#include <linux/ftrace.h>
72#include <linux/slab.h>
73#include <linux/init_task.h>
74
75#include <asm/tlb.h>
76#include <asm/irq_regs.h>
77#ifdef CONFIG_PARAVIRT
78#include <asm/paravirt.h>
79#endif
80
81#include "sched.h"
82#include "../workqueue_sched.h"
83
84#define CREATE_TRACE_POINTS
85#include <trace/events/sched.h>
86
87void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
88{
89 unsigned long delta;
90 ktime_t soft, hard, now;
91
92 for (;;) {
93 if (hrtimer_active(period_timer))
94 break;
95
96 now = hrtimer_cb_get_time(period_timer);
97 hrtimer_forward(period_timer, now, period);
98
99 soft = hrtimer_get_softexpires(period_timer);
100 hard = hrtimer_get_expires(period_timer);
101 delta = ktime_to_ns(ktime_sub(hard, soft));
102 __hrtimer_start_range_ns(period_timer, soft, delta,
103 HRTIMER_MODE_ABS_PINNED, 0);
104 }
105}
106
107DEFINE_MUTEX(sched_domains_mutex);
108DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
109
110static void update_rq_clock_task(struct rq *rq, s64 delta);
111
112void update_rq_clock(struct rq *rq)
113{
114 s64 delta;
115
116 if (rq->skip_clock_update > 0)
117 return;
118
119 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
120 rq->clock += delta;
121 update_rq_clock_task(rq, delta);
122}
123
124/*
125 * Debugging: various feature bits
126 */
127
128#define SCHED_FEAT(name, enabled) \
129 (1UL << __SCHED_FEAT_##name) * enabled |
130
131const_debug unsigned int sysctl_sched_features =
132#include "features.h"
133 0;
134
135#undef SCHED_FEAT
136
137#ifdef CONFIG_SCHED_DEBUG
138#define SCHED_FEAT(name, enabled) \
139 #name ,
140
141static __read_mostly char *sched_feat_names[] = {
142#include "features.h"
143 NULL
144};
145
146#undef SCHED_FEAT
147
148static int sched_feat_show(struct seq_file *m, void *v)
149{
150 int i;
151
152 for (i = 0; sched_feat_names[i]; i++) {
153 if (!(sysctl_sched_features & (1UL << i)))
154 seq_puts(m, "NO_");
155 seq_printf(m, "%s ", sched_feat_names[i]);
156 }
157 seq_puts(m, "\n");
158
159 return 0;
160}
161
162static ssize_t
163sched_feat_write(struct file *filp, const char __user *ubuf,
164 size_t cnt, loff_t *ppos)
165{
166 char buf[64];
167 char *cmp;
168 int neg = 0;
169 int i;
170
171 if (cnt > 63)
172 cnt = 63;
173
174 if (copy_from_user(&buf, ubuf, cnt))
175 return -EFAULT;
176
177 buf[cnt] = 0;
178 cmp = strstrip(buf);
179
180 if (strncmp(cmp, "NO_", 3) == 0) {
181 neg = 1;
182 cmp += 3;
183 }
184
185 for (i = 0; sched_feat_names[i]; i++) {
186 if (strcmp(cmp, sched_feat_names[i]) == 0) {
187 if (neg)
188 sysctl_sched_features &= ~(1UL << i);
189 else
190 sysctl_sched_features |= (1UL << i);
191 break;
192 }
193 }
194
195 if (!sched_feat_names[i])
196 return -EINVAL;
197
198 *ppos += cnt;
199
200 return cnt;
201}
202
203static int sched_feat_open(struct inode *inode, struct file *filp)
204{
205 return single_open(filp, sched_feat_show, NULL);
206}
207
208static const struct file_operations sched_feat_fops = {
209 .open = sched_feat_open,
210 .write = sched_feat_write,
211 .read = seq_read,
212 .llseek = seq_lseek,
213 .release = single_release,
214};
215
216static __init int sched_init_debug(void)
217{
218 debugfs_create_file("sched_features", 0644, NULL, NULL,
219 &sched_feat_fops);
220
221 return 0;
222}
223late_initcall(sched_init_debug);
224
225#endif
226
227/*
228 * Number of tasks to iterate in a single balance run.
229 * Limited because this is done with IRQs disabled.
230 */
231const_debug unsigned int sysctl_sched_nr_migrate = 32;
232
233/*
234 * period over which we average the RT time consumption, measured
235 * in ms.
236 *
237 * default: 1s
238 */
239const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
240
241/*
242 * period over which we measure -rt task cpu usage in us.
243 * default: 1s
244 */
245unsigned int sysctl_sched_rt_period = 1000000;
246
247__read_mostly int scheduler_running;
248
249/*
250 * part of the period that we allow rt tasks to run in us.
251 * default: 0.95s
252 */
253int sysctl_sched_rt_runtime = 950000;
254
255
256
257/*
258 * __task_rq_lock - lock the rq @p resides on.
259 */
260static inline struct rq *__task_rq_lock(struct task_struct *p)
261 __acquires(rq->lock)
262{
263 struct rq *rq;
264
265 lockdep_assert_held(&p->pi_lock);
266
267 for (;;) {
268 rq = task_rq(p);
269 raw_spin_lock(&rq->lock);
270 if (likely(rq == task_rq(p)))
271 return rq;
272 raw_spin_unlock(&rq->lock);
273 }
274}
275
276/*
277 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
278 */
279static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
280 __acquires(p->pi_lock)
281 __acquires(rq->lock)
282{
283 struct rq *rq;
284
285 for (;;) {
286 raw_spin_lock_irqsave(&p->pi_lock, *flags);
287 rq = task_rq(p);
288 raw_spin_lock(&rq->lock);
289 if (likely(rq == task_rq(p)))
290 return rq;
291 raw_spin_unlock(&rq->lock);
292 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
293 }
294}
295
296static void __task_rq_unlock(struct rq *rq)
297 __releases(rq->lock)
298{
299 raw_spin_unlock(&rq->lock);
300}
301
302static inline void
303task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
304 __releases(rq->lock)
305 __releases(p->pi_lock)
306{
307 raw_spin_unlock(&rq->lock);
308 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
309}
310
311/*
312 * this_rq_lock - lock this runqueue and disable interrupts.
313 */
314static struct rq *this_rq_lock(void)
315 __acquires(rq->lock)
316{
317 struct rq *rq;
318
319 local_irq_disable();
320 rq = this_rq();
321 raw_spin_lock(&rq->lock);
322
323 return rq;
324}
325
326#ifdef CONFIG_SCHED_HRTICK
327/*
328 * Use HR-timers to deliver accurate preemption points.
329 *
330 * Its all a bit involved since we cannot program an hrt while holding the
331 * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
332 * reschedule event.
333 *
334 * When we get rescheduled we reprogram the hrtick_timer outside of the
335 * rq->lock.
336 */
337
338static void hrtick_clear(struct rq *rq)
339{
340 if (hrtimer_active(&rq->hrtick_timer))
341 hrtimer_cancel(&rq->hrtick_timer);
342}
343
344/*
345 * High-resolution timer tick.
346 * Runs from hardirq context with interrupts disabled.
347 */
348static enum hrtimer_restart hrtick(struct hrtimer *timer)
349{
350 struct rq *rq = container_of(timer, struct rq, hrtick_timer);
351
352 WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
353
354 raw_spin_lock(&rq->lock);
355 update_rq_clock(rq);
356 rq->curr->sched_class->task_tick(rq, rq->curr, 1);
357 raw_spin_unlock(&rq->lock);
358
359 return HRTIMER_NORESTART;
360}
361
362#ifdef CONFIG_SMP
363/*
364 * called from hardirq (IPI) context
365 */
366static void __hrtick_start(void *arg)
367{
368 struct rq *rq = arg;
369
370 raw_spin_lock(&rq->lock);
371 hrtimer_restart(&rq->hrtick_timer);
372 rq->hrtick_csd_pending = 0;
373 raw_spin_unlock(&rq->lock);
374}
375
376/*
377 * Called to set the hrtick timer state.
378 *
379 * called with rq->lock held and irqs disabled
380 */
381void hrtick_start(struct rq *rq, u64 delay)
382{
383 struct hrtimer *timer = &rq->hrtick_timer;
384 ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
385
386 hrtimer_set_expires(timer, time);
387
388 if (rq == this_rq()) {
389 hrtimer_restart(timer);
390 } else if (!rq->hrtick_csd_pending) {
391 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
392 rq->hrtick_csd_pending = 1;
393 }
394}
395
396static int
397hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
398{
399 int cpu = (int)(long)hcpu;
400
401 switch (action) {
402 case CPU_UP_CANCELED:
403 case CPU_UP_CANCELED_FROZEN:
404 case CPU_DOWN_PREPARE:
405 case CPU_DOWN_PREPARE_FROZEN:
406 case CPU_DEAD:
407 case CPU_DEAD_FROZEN:
408 hrtick_clear(cpu_rq(cpu));
409 return NOTIFY_OK;
410 }
411
412 return NOTIFY_DONE;
413}
414
415static __init void init_hrtick(void)
416{
417 hotcpu_notifier(hotplug_hrtick, 0);
418}
419#else
420/*
421 * Called to set the hrtick timer state.
422 *
423 * called with rq->lock held and irqs disabled
424 */
425void hrtick_start(struct rq *rq, u64 delay)
426{
427 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
428 HRTIMER_MODE_REL_PINNED, 0);
429}
430
431static inline void init_hrtick(void)
432{
433}
434#endif /* CONFIG_SMP */
435
436static void init_rq_hrtick(struct rq *rq)
437{
438#ifdef CONFIG_SMP
439 rq->hrtick_csd_pending = 0;
440
441 rq->hrtick_csd.flags = 0;
442 rq->hrtick_csd.func = __hrtick_start;
443 rq->hrtick_csd.info = rq;
444#endif
445
446 hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
447 rq->hrtick_timer.function = hrtick;
448}
449#else /* CONFIG_SCHED_HRTICK */
450static inline void hrtick_clear(struct rq *rq)
451{
452}
453
454static inline void init_rq_hrtick(struct rq *rq)
455{
456}
457
458static inline void init_hrtick(void)
459{
460}
461#endif /* CONFIG_SCHED_HRTICK */
462
463/*
464 * resched_task - mark a task 'to be rescheduled now'.
465 *
466 * On UP this means the setting of the need_resched flag, on SMP it
467 * might also involve a cross-CPU call to trigger the scheduler on
468 * the target CPU.
469 */
470#ifdef CONFIG_SMP
471
472#ifndef tsk_is_polling
473#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
474#endif
475
476void resched_task(struct task_struct *p)
477{
478 int cpu;
479
480 assert_raw_spin_locked(&task_rq(p)->lock);
481
482 if (test_tsk_need_resched(p))
483 return;
484
485 set_tsk_need_resched(p);
486
487 cpu = task_cpu(p);
488 if (cpu == smp_processor_id())
489 return;
490
491 /* NEED_RESCHED must be visible before we test polling */
492 smp_mb();
493 if (!tsk_is_polling(p))
494 smp_send_reschedule(cpu);
495}
496
497void resched_cpu(int cpu)
498{
499 struct rq *rq = cpu_rq(cpu);
500 unsigned long flags;
501
502 if (!raw_spin_trylock_irqsave(&rq->lock, flags))
503 return;
504 resched_task(cpu_curr(cpu));
505 raw_spin_unlock_irqrestore(&rq->lock, flags);
506}
507
508#ifdef CONFIG_NO_HZ
509/*
510 * In the semi idle case, use the nearest busy cpu for migrating timers
511 * from an idle cpu. This is good for power-savings.
512 *
513 * We don't do similar optimization for completely idle system, as
514 * selecting an idle cpu will add more delays to the timers than intended
515 * (as that cpu's timer base may not be uptodate wrt jiffies etc).
516 */
517int get_nohz_timer_target(void)
518{
519 int cpu = smp_processor_id();
520 int i;
521 struct sched_domain *sd;
522
523 rcu_read_lock();
524 for_each_domain(cpu, sd) {
525 for_each_cpu(i, sched_domain_span(sd)) {
526 if (!idle_cpu(i)) {
527 cpu = i;
528 goto unlock;
529 }
530 }
531 }
532unlock:
533 rcu_read_unlock();
534 return cpu;
535}
536/*
537 * When add_timer_on() enqueues a timer into the timer wheel of an
538 * idle CPU then this timer might expire before the next timer event
539 * which is scheduled to wake up that CPU. In case of a completely
540 * idle system the next event might even be infinite time into the
541 * future. wake_up_idle_cpu() ensures that the CPU is woken up and
542 * leaves the inner idle loop so the newly added timer is taken into
543 * account when the CPU goes back to idle and evaluates the timer
544 * wheel for the next timer event.
545 */
546void wake_up_idle_cpu(int cpu)
547{
548 struct rq *rq = cpu_rq(cpu);
549
550 if (cpu == smp_processor_id())
551 return;
552
553 /*
554 * This is safe, as this function is called with the timer
555 * wheel base lock of (cpu) held. When the CPU is on the way
556 * to idle and has not yet set rq->curr to idle then it will
557 * be serialized on the timer wheel base lock and take the new
558 * timer into account automatically.
559 */
560 if (rq->curr != rq->idle)
561 return;
562
563 /*
564 * We can set TIF_RESCHED on the idle task of the other CPU
565 * lockless. The worst case is that the other CPU runs the
566 * idle task through an additional NOOP schedule()
567 */
568 set_tsk_need_resched(rq->idle);
569
570 /* NEED_RESCHED must be visible before we test polling */
571 smp_mb();
572 if (!tsk_is_polling(rq->idle))
573 smp_send_reschedule(cpu);
574}
575
576static inline bool got_nohz_idle_kick(void)
577{
578 return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick;
579}
580
581#else /* CONFIG_NO_HZ */
582
583static inline bool got_nohz_idle_kick(void)
584{
585 return false;
586}
587
588#endif /* CONFIG_NO_HZ */
589
590void sched_avg_update(struct rq *rq)
591{
592 s64 period = sched_avg_period();
593
594 while ((s64)(rq->clock - rq->age_stamp) > period) {
595 /*
596 * Inline assembly required to prevent the compiler
597 * optimising this loop into a divmod call.
598 * See __iter_div_u64_rem() for another example of this.
599 */
600 asm("" : "+rm" (rq->age_stamp));
601 rq->age_stamp += period;
602 rq->rt_avg /= 2;
603 }
604}
605
606#else /* !CONFIG_SMP */
607void resched_task(struct task_struct *p)
608{
609 assert_raw_spin_locked(&task_rq(p)->lock);
610 set_tsk_need_resched(p);
611}
612#endif /* CONFIG_SMP */
613
614#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \
615 (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH)))
616/*
617 * Iterate task_group tree rooted at *from, calling @down when first entering a
618 * node and @up when leaving it for the final time.
619 *
620 * Caller must hold rcu_lock or sufficient equivalent.
621 */
622int walk_tg_tree_from(struct task_group *from,
623 tg_visitor down, tg_visitor up, void *data)
624{
625 struct task_group *parent, *child;
626 int ret;
627
628 parent = from;
629
630down:
631 ret = (*down)(parent, data);
632 if (ret)
633 goto out;
634 list_for_each_entry_rcu(child, &parent->children, siblings) {
635 parent = child;
636 goto down;
637
638up:
639 continue;
640 }
641 ret = (*up)(parent, data);
642 if (ret || parent == from)
643 goto out;
644
645 child = parent;
646 parent = parent->parent;
647 if (parent)
648 goto up;
649out:
650 return ret;
651}
652
653int tg_nop(struct task_group *tg, void *data)
654{
655 return 0;
656}
657#endif
658
659void update_cpu_load(struct rq *this_rq);
660
661static void set_load_weight(struct task_struct *p)
662{
663 int prio = p->static_prio - MAX_RT_PRIO;
664 struct load_weight *load = &p->se.load;
665
666 /*
667 * SCHED_IDLE tasks get minimal weight:
668 */
669 if (p->policy == SCHED_IDLE) {
670 load->weight = scale_load(WEIGHT_IDLEPRIO);
671 load->inv_weight = WMULT_IDLEPRIO;
672 return;
673 }
674
675 load->weight = scale_load(prio_to_weight[prio]);
676 load->inv_weight = prio_to_wmult[prio];
677}
678
679static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
680{
681 update_rq_clock(rq);
682 sched_info_queued(p);
683 p->sched_class->enqueue_task(rq, p, flags);
684}
685
686static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
687{
688 update_rq_clock(rq);
689 sched_info_dequeued(p);
690 p->sched_class->dequeue_task(rq, p, flags);
691}
692
693/*
694 * activate_task - move a task to the runqueue.
695 */
696void activate_task(struct rq *rq, struct task_struct *p, int flags)
697{
698 if (task_contributes_to_load(p))
699 rq->nr_uninterruptible--;
700
701 enqueue_task(rq, p, flags);
702}
703
704/*
705 * deactivate_task - remove a task from the runqueue.
706 */
707void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
708{
709 if (task_contributes_to_load(p))
710 rq->nr_uninterruptible++;
711
712 dequeue_task(rq, p, flags);
713}
714
715#ifdef CONFIG_IRQ_TIME_ACCOUNTING
716
717/*
718 * There are no locks covering percpu hardirq/softirq time.
719 * They are only modified in account_system_vtime, on corresponding CPU
720 * with interrupts disabled. So, writes are safe.
721 * They are read and saved off onto struct rq in update_rq_clock().
722 * This may result in other CPU reading this CPU's irq time and can
723 * race with irq/account_system_vtime on this CPU. We would either get old
724 * or new value with a side effect of accounting a slice of irq time to wrong
725 * task when irq is in progress while we read rq->clock. That is a worthy
726 * compromise in place of having locks on each irq in account_system_time.
727 */
728static DEFINE_PER_CPU(u64, cpu_hardirq_time);
729static DEFINE_PER_CPU(u64, cpu_softirq_time);
730
731static DEFINE_PER_CPU(u64, irq_start_time);
732static int sched_clock_irqtime;
733
734void enable_sched_clock_irqtime(void)
735{
736 sched_clock_irqtime = 1;
737}
738
739void disable_sched_clock_irqtime(void)
740{
741 sched_clock_irqtime = 0;
742}
743
744#ifndef CONFIG_64BIT
745static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
746
747static inline void irq_time_write_begin(void)
748{
749 __this_cpu_inc(irq_time_seq.sequence);
750 smp_wmb();
751}
752
753static inline void irq_time_write_end(void)
754{
755 smp_wmb();
756 __this_cpu_inc(irq_time_seq.sequence);
757}
758
759static inline u64 irq_time_read(int cpu)
760{
761 u64 irq_time;
762 unsigned seq;
763
764 do {
765 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu));
766 irq_time = per_cpu(cpu_softirq_time, cpu) +
767 per_cpu(cpu_hardirq_time, cpu);
768 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq));
769
770 return irq_time;
771}
772#else /* CONFIG_64BIT */
773static inline void irq_time_write_begin(void)
774{
775}
776
777static inline void irq_time_write_end(void)
778{
779}
780
781static inline u64 irq_time_read(int cpu)
782{
783 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu);
784}
785#endif /* CONFIG_64BIT */
786
787/*
788 * Called before incrementing preempt_count on {soft,}irq_enter
789 * and before decrementing preempt_count on {soft,}irq_exit.
790 */
791void account_system_vtime(struct task_struct *curr)
792{
793 unsigned long flags;
794 s64 delta;
795 int cpu;
796
797 if (!sched_clock_irqtime)
798 return;
799
800 local_irq_save(flags);
801
802 cpu = smp_processor_id();
803 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
804 __this_cpu_add(irq_start_time, delta);
805
806 irq_time_write_begin();
807 /*
808 * We do not account for softirq time from ksoftirqd here.
809 * We want to continue accounting softirq time to ksoftirqd thread
810 * in that case, so as not to confuse scheduler with a special task
811 * that do not consume any time, but still wants to run.
812 */
813 if (hardirq_count())
814 __this_cpu_add(cpu_hardirq_time, delta);
815 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
816 __this_cpu_add(cpu_softirq_time, delta);
817
818 irq_time_write_end();
819 local_irq_restore(flags);
820}
821EXPORT_SYMBOL_GPL(account_system_vtime);
822
823#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
824
825#ifdef CONFIG_PARAVIRT
826static inline u64 steal_ticks(u64 steal)
827{
828 if (unlikely(steal > NSEC_PER_SEC))
829 return div_u64(steal, TICK_NSEC);
830
831 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
832}
833#endif
834
835static void update_rq_clock_task(struct rq *rq, s64 delta)
836{
837/*
838 * In theory, the compile should just see 0 here, and optimize out the call
839 * to sched_rt_avg_update. But I don't trust it...
840 */
841#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
842 s64 steal = 0, irq_delta = 0;
843#endif
844#ifdef CONFIG_IRQ_TIME_ACCOUNTING
845 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
846
847 /*
848 * Since irq_time is only updated on {soft,}irq_exit, we might run into
849 * this case when a previous update_rq_clock() happened inside a
850 * {soft,}irq region.
851 *
852 * When this happens, we stop ->clock_task and only update the
853 * prev_irq_time stamp to account for the part that fit, so that a next
854 * update will consume the rest. This ensures ->clock_task is
855 * monotonic.
856 *
857 * It does however cause some slight miss-attribution of {soft,}irq
858 * time, a more accurate solution would be to update the irq_time using
859 * the current rq->clock timestamp, except that would require using
860 * atomic ops.
861 */
862 if (irq_delta > delta)
863 irq_delta = delta;
864
865 rq->prev_irq_time += irq_delta;
866 delta -= irq_delta;
867#endif
868#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
869 if (static_branch((&paravirt_steal_rq_enabled))) {
870 u64 st;
871
872 steal = paravirt_steal_clock(cpu_of(rq));
873 steal -= rq->prev_steal_time_rq;
874
875 if (unlikely(steal > delta))
876 steal = delta;
877
878 st = steal_ticks(steal);
879 steal = st * TICK_NSEC;
880
881 rq->prev_steal_time_rq += steal;
882
883 delta -= steal;
884 }
885#endif
886
887 rq->clock_task += delta;
888
889#if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING)
890 if ((irq_delta + steal) && sched_feat(NONTASK_POWER))
891 sched_rt_avg_update(rq, irq_delta + steal);
892#endif
893}
894
895#ifdef CONFIG_IRQ_TIME_ACCOUNTING
896static int irqtime_account_hi_update(void)
897{
898 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
899 unsigned long flags;
900 u64 latest_ns;
901 int ret = 0;
902
903 local_irq_save(flags);
904 latest_ns = this_cpu_read(cpu_hardirq_time);
905 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
906 ret = 1;
907 local_irq_restore(flags);
908 return ret;
909}
910
911static int irqtime_account_si_update(void)
912{
913 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
914 unsigned long flags;
915 u64 latest_ns;
916 int ret = 0;
917
918 local_irq_save(flags);
919 latest_ns = this_cpu_read(cpu_softirq_time);
920 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
921 ret = 1;
922 local_irq_restore(flags);
923 return ret;
924}
925
926#else /* CONFIG_IRQ_TIME_ACCOUNTING */
927
928#define sched_clock_irqtime (0)
929
930#endif
931
932void sched_set_stop_task(int cpu, struct task_struct *stop)
933{
934 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
935 struct task_struct *old_stop = cpu_rq(cpu)->stop;
936
937 if (stop) {
938 /*
939 * Make it appear like a SCHED_FIFO task, its something
940 * userspace knows about and won't get confused about.
941 *
942 * Also, it will make PI more or less work without too
943 * much confusion -- but then, stop work should not
944 * rely on PI working anyway.
945 */
946 sched_setscheduler_nocheck(stop, SCHED_FIFO, &param);
947
948 stop->sched_class = &stop_sched_class;
949 }
950
951 cpu_rq(cpu)->stop = stop;
952
953 if (old_stop) {
954 /*
955 * Reset it back to a normal scheduling class so that
956 * it can die in pieces.
957 */
958 old_stop->sched_class = &rt_sched_class;
959 }
960}
961
962/*
963 * __normal_prio - return the priority that is based on the static prio
964 */
965static inline int __normal_prio(struct task_struct *p)
966{
967 return p->static_prio;
968}
969
970/*
971 * Calculate the expected normal priority: i.e. priority
972 * without taking RT-inheritance into account. Might be
973 * boosted by interactivity modifiers. Changes upon fork,
974 * setprio syscalls, and whenever the interactivity
975 * estimator recalculates.
976 */
977static inline int normal_prio(struct task_struct *p)
978{
979 int prio;
980
981 if (task_has_rt_policy(p))
982 prio = MAX_RT_PRIO-1 - p->rt_priority;
983 else
984 prio = __normal_prio(p);
985 return prio;
986}
987
988/*
989 * Calculate the current priority, i.e. the priority
990 * taken into account by the scheduler. This value might
991 * be boosted by RT tasks, or might be boosted by
992 * interactivity modifiers. Will be RT if the task got
993 * RT-boosted. If not then it returns p->normal_prio.
994 */
995static int effective_prio(struct task_struct *p)
996{
997 p->normal_prio = normal_prio(p);
998 /*
999 * If we are RT tasks or we were boosted to RT priority,
1000 * keep the priority unchanged. Otherwise, update priority
1001 * to the normal priority:
1002 */
1003 if (!rt_prio(p->prio))
1004 return p->normal_prio;
1005 return p->prio;
1006}
1007
1008/**
1009 * task_curr - is this task currently executing on a CPU?
1010 * @p: the task in question.
1011 */
1012inline int task_curr(const struct task_struct *p)
1013{
1014 return cpu_curr(task_cpu(p)) == p;
1015}
1016
1017static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1018 const struct sched_class *prev_class,
1019 int oldprio)
1020{
1021 if (prev_class != p->sched_class) {
1022 if (prev_class->switched_from)
1023 prev_class->switched_from(rq, p);
1024 p->sched_class->switched_to(rq, p);
1025 } else if (oldprio != p->prio)
1026 p->sched_class->prio_changed(rq, p, oldprio);
1027}
1028
1029void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1030{
1031 const struct sched_class *class;
1032
1033 if (p->sched_class == rq->curr->sched_class) {
1034 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
1035 } else {
1036 for_each_class(class) {
1037 if (class == rq->curr->sched_class)
1038 break;
1039 if (class == p->sched_class) {
1040 resched_task(rq->curr);
1041 break;
1042 }
1043 }
1044 }
1045
1046 /*
1047 * A queue event has occurred, and we're going to schedule. In
1048 * this case, we can save a useless back to back clock update.
1049 */
1050 if (rq->curr->on_rq && test_tsk_need_resched(rq->curr))
1051 rq->skip_clock_update = 1;
1052}
1053
1054#ifdef CONFIG_SMP
1055void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1056{
1057#ifdef CONFIG_SCHED_DEBUG
1058 /*
1059 * We should never call set_task_cpu() on a blocked task,
1060 * ttwu() will sort out the placement.
1061 */
1062 WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
1063 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
1064
1065#ifdef CONFIG_LOCKDEP
1066 /*
1067 * The caller should hold either p->pi_lock or rq->lock, when changing
1068 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
1069 *
1070 * sched_move_task() holds both and thus holding either pins the cgroup,
1071 * see set_task_rq().
1072 *
1073 * Furthermore, all task_rq users should acquire both locks, see
1074 * task_rq_lock().
1075 */
1076 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
1077 lockdep_is_held(&task_rq(p)->lock)));
1078#endif
1079#endif
1080
1081 trace_sched_migrate_task(p, new_cpu);
1082
1083 if (task_cpu(p) != new_cpu) {
1084 p->se.nr_migrations++;
1085 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
1086 }
1087
1088 __set_task_cpu(p, new_cpu);
1089}
1090
1091struct migration_arg {
1092 struct task_struct *task;
1093 int dest_cpu;
1094};
1095
1096static int migration_cpu_stop(void *data);
1097
1098/*
1099 * wait_task_inactive - wait for a thread to unschedule.
1100 *
1101 * If @match_state is nonzero, it's the @p->state value just checked and
1102 * not expected to change. If it changes, i.e. @p might have woken up,
1103 * then return zero. When we succeed in waiting for @p to be off its CPU,
1104 * we return a positive number (its total switch count). If a second call
1105 * a short while later returns the same number, the caller can be sure that
1106 * @p has remained unscheduled the whole time.
1107 *
1108 * The caller must ensure that the task *will* unschedule sometime soon,
1109 * else this function might spin for a *long* time. This function can't
1110 * be called with interrupts off, or it may introduce deadlock with
1111 * smp_call_function() if an IPI is sent by the same process we are
1112 * waiting to become inactive.
1113 */
1114unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1115{
1116 unsigned long flags;
1117 int running, on_rq;
1118 unsigned long ncsw;
1119 struct rq *rq;
1120
1121 for (;;) {
1122 /*
1123 * We do the initial early heuristics without holding
1124 * any task-queue locks at all. We'll only try to get
1125 * the runqueue lock when things look like they will
1126 * work out!
1127 */
1128 rq = task_rq(p);
1129
1130 /*
1131 * If the task is actively running on another CPU
1132 * still, just relax and busy-wait without holding
1133 * any locks.
1134 *
1135 * NOTE! Since we don't hold any locks, it's not
1136 * even sure that "rq" stays as the right runqueue!
1137 * But we don't care, since "task_running()" will
1138 * return false if the runqueue has changed and p
1139 * is actually now running somewhere else!
1140 */
1141 while (task_running(rq, p)) {
1142 if (match_state && unlikely(p->state != match_state))
1143 return 0;
1144 cpu_relax();
1145 }
1146
1147 /*
1148 * Ok, time to look more closely! We need the rq
1149 * lock now, to be *sure*. If we're wrong, we'll
1150 * just go back and repeat.
1151 */
1152 rq = task_rq_lock(p, &flags);
1153 trace_sched_wait_task(p);
1154 running = task_running(rq, p);
1155 on_rq = p->on_rq;
1156 ncsw = 0;
1157 if (!match_state || p->state == match_state)
1158 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1159 task_rq_unlock(rq, p, &flags);
1160
1161 /*
1162 * If it changed from the expected state, bail out now.
1163 */
1164 if (unlikely(!ncsw))
1165 break;
1166
1167 /*
1168 * Was it really running after all now that we
1169 * checked with the proper locks actually held?
1170 *
1171 * Oops. Go back and try again..
1172 */
1173 if (unlikely(running)) {
1174 cpu_relax();
1175 continue;
1176 }
1177
1178 /*
1179 * It's not enough that it's not actively running,
1180 * it must be off the runqueue _entirely_, and not
1181 * preempted!
1182 *
1183 * So if it was still runnable (but just not actively
1184 * running right now), it's preempted, and we should
1185 * yield - it could be a while.
1186 */
1187 if (unlikely(on_rq)) {
1188 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
1189
1190 set_current_state(TASK_UNINTERRUPTIBLE);
1191 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
1192 continue;
1193 }
1194
1195 /*
1196 * Ahh, all good. It wasn't running, and it wasn't
1197 * runnable, which means that it will never become
1198 * running in the future either. We're all done!
1199 */
1200 break;
1201 }
1202
1203 return ncsw;
1204}
1205
1206/***
1207 * kick_process - kick a running thread to enter/exit the kernel
1208 * @p: the to-be-kicked thread
1209 *
1210 * Cause a process which is running on another CPU to enter
1211 * kernel-mode, without any delay. (to get signals handled.)
1212 *
1213 * NOTE: this function doesn't have to take the runqueue lock,
1214 * because all it wants to ensure is that the remote task enters
1215 * the kernel. If the IPI races and the task has been migrated
1216 * to another CPU then no harm is done and the purpose has been
1217 * achieved as well.
1218 */
1219void kick_process(struct task_struct *p)
1220{
1221 int cpu;
1222
1223 preempt_disable();
1224 cpu = task_cpu(p);
1225 if ((cpu != smp_processor_id()) && task_curr(p))
1226 smp_send_reschedule(cpu);
1227 preempt_enable();
1228}
1229EXPORT_SYMBOL_GPL(kick_process);
1230#endif /* CONFIG_SMP */
1231
1232#ifdef CONFIG_SMP
1233/*
1234 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1235 */
1236static int select_fallback_rq(int cpu, struct task_struct *p)
1237{
1238 int dest_cpu;
1239 const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
1240
1241 /* Look for allowed, online CPU in same node. */
1242 for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
1243 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
1244 return dest_cpu;
1245
1246 /* Any allowed, online CPU? */
1247 dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask);
1248 if (dest_cpu < nr_cpu_ids)
1249 return dest_cpu;
1250
1251 /* No more Mr. Nice Guy. */
1252 dest_cpu = cpuset_cpus_allowed_fallback(p);
1253 /*
1254 * Don't tell them about moving exiting tasks or
1255 * kernel threads (both mm NULL), since they never
1256 * leave kernel.
1257 */
1258 if (p->mm && printk_ratelimit()) {
1259 printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
1260 task_pid_nr(p), p->comm, cpu);
1261 }
1262
1263 return dest_cpu;
1264}
1265
1266/*
1267 * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
1268 */
1269static inline
1270int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
1271{
1272 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
1273
1274 /*
1275 * In order not to call set_task_cpu() on a blocking task we need
1276 * to rely on ttwu() to place the task on a valid ->cpus_allowed
1277 * cpu.
1278 *
1279 * Since this is common to all placement strategies, this lives here.
1280 *
1281 * [ this allows ->select_task() to simply return task_cpu(p) and
1282 * not worry about this generic constraint ]
1283 */
1284 if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) ||
1285 !cpu_online(cpu)))
1286 cpu = select_fallback_rq(task_cpu(p), p);
1287
1288 return cpu;
1289}
1290
1291static void update_avg(u64 *avg, u64 sample)
1292{
1293 s64 diff = sample - *avg;
1294 *avg += diff >> 3;
1295}
1296#endif
1297
1298static void
1299ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
1300{
1301#ifdef CONFIG_SCHEDSTATS
1302 struct rq *rq = this_rq();
1303
1304#ifdef CONFIG_SMP
1305 int this_cpu = smp_processor_id();
1306
1307 if (cpu == this_cpu) {
1308 schedstat_inc(rq, ttwu_local);
1309 schedstat_inc(p, se.statistics.nr_wakeups_local);
1310 } else {
1311 struct sched_domain *sd;
1312
1313 schedstat_inc(p, se.statistics.nr_wakeups_remote);
1314 rcu_read_lock();
1315 for_each_domain(this_cpu, sd) {
1316 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
1317 schedstat_inc(sd, ttwu_wake_remote);
1318 break;
1319 }
1320 }
1321 rcu_read_unlock();
1322 }
1323
1324 if (wake_flags & WF_MIGRATED)
1325 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
1326
1327#endif /* CONFIG_SMP */
1328
1329 schedstat_inc(rq, ttwu_count);
1330 schedstat_inc(p, se.statistics.nr_wakeups);
1331
1332 if (wake_flags & WF_SYNC)
1333 schedstat_inc(p, se.statistics.nr_wakeups_sync);
1334
1335#endif /* CONFIG_SCHEDSTATS */
1336}
1337
1338static void ttwu_activate(struct rq *rq, struct task_struct *p, int en_flags)
1339{
1340 activate_task(rq, p, en_flags);
1341 p->on_rq = 1;
1342
1343 /* if a worker is waking up, notify workqueue */
1344 if (p->flags & PF_WQ_WORKER)
1345 wq_worker_waking_up(p, cpu_of(rq));
1346}
1347
1348/*
1349 * Mark the task runnable and perform wakeup-preemption.
1350 */
1351static void
1352ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
1353{
1354 trace_sched_wakeup(p, true);
1355 check_preempt_curr(rq, p, wake_flags);
1356
1357 p->state = TASK_RUNNING;
1358#ifdef CONFIG_SMP
1359 if (p->sched_class->task_woken)
1360 p->sched_class->task_woken(rq, p);
1361
1362 if (rq->idle_stamp) {
1363 u64 delta = rq->clock - rq->idle_stamp;
1364 u64 max = 2*sysctl_sched_migration_cost;
1365
1366 if (delta > max)
1367 rq->avg_idle = max;
1368 else
1369 update_avg(&rq->avg_idle, delta);
1370 rq->idle_stamp = 0;
1371 }
1372#endif
1373}
1374
1375static void
1376ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
1377{
1378#ifdef CONFIG_SMP
1379 if (p->sched_contributes_to_load)
1380 rq->nr_uninterruptible--;
1381#endif
1382
1383 ttwu_activate(rq, p, ENQUEUE_WAKEUP | ENQUEUE_WAKING);
1384 ttwu_do_wakeup(rq, p, wake_flags);
1385}
1386
1387/*
1388 * Called in case the task @p isn't fully descheduled from its runqueue,
1389 * in this case we must do a remote wakeup. Its a 'light' wakeup though,
1390 * since all we need to do is flip p->state to TASK_RUNNING, since
1391 * the task is still ->on_rq.
1392 */
1393static int ttwu_remote(struct task_struct *p, int wake_flags)
1394{
1395 struct rq *rq;
1396 int ret = 0;
1397
1398 rq = __task_rq_lock(p);
1399 if (p->on_rq) {
1400 ttwu_do_wakeup(rq, p, wake_flags);
1401 ret = 1;
1402 }
1403 __task_rq_unlock(rq);
1404
1405 return ret;
1406}
1407
1408#ifdef CONFIG_SMP
1409static void sched_ttwu_pending(void)
1410{
1411 struct rq *rq = this_rq();
1412 struct llist_node *llist = llist_del_all(&rq->wake_list);
1413 struct task_struct *p;
1414
1415 raw_spin_lock(&rq->lock);
1416
1417 while (llist) {
1418 p = llist_entry(llist, struct task_struct, wake_entry);
1419 llist = llist_next(llist);
1420 ttwu_do_activate(rq, p, 0);
1421 }
1422
1423 raw_spin_unlock(&rq->lock);
1424}
1425
1426void scheduler_ipi(void)
1427{
1428 if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
1429 return;
1430
1431 /*
1432 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
1433 * traditionally all their work was done from the interrupt return
1434 * path. Now that we actually do some work, we need to make sure
1435 * we do call them.
1436 *
1437 * Some archs already do call them, luckily irq_enter/exit nest
1438 * properly.
1439 *
1440 * Arguably we should visit all archs and update all handlers,
1441 * however a fair share of IPIs are still resched only so this would
1442 * somewhat pessimize the simple resched case.
1443 */
1444 irq_enter();
1445 sched_ttwu_pending();
1446
1447 /*
1448 * Check if someone kicked us for doing the nohz idle load balance.
1449 */
1450 if (unlikely(got_nohz_idle_kick() && !need_resched())) {
1451 this_rq()->idle_balance = 1;
1452 raise_softirq_irqoff(SCHED_SOFTIRQ);
1453 }
1454 irq_exit();
1455}
1456
1457static void ttwu_queue_remote(struct task_struct *p, int cpu)
1458{
1459 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list))
1460 smp_send_reschedule(cpu);
1461}
1462
1463#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1464static int ttwu_activate_remote(struct task_struct *p, int wake_flags)
1465{
1466 struct rq *rq;
1467 int ret = 0;
1468
1469 rq = __task_rq_lock(p);
1470 if (p->on_cpu) {
1471 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1472 ttwu_do_wakeup(rq, p, wake_flags);
1473 ret = 1;
1474 }
1475 __task_rq_unlock(rq);
1476
1477 return ret;
1478
1479}
1480#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1481#endif /* CONFIG_SMP */
1482
1483static void ttwu_queue(struct task_struct *p, int cpu)
1484{
1485 struct rq *rq = cpu_rq(cpu);
1486
1487#if defined(CONFIG_SMP)
1488 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
1489 sched_clock_cpu(cpu); /* sync clocks x-cpu */
1490 ttwu_queue_remote(p, cpu);
1491 return;
1492 }
1493#endif
1494
1495 raw_spin_lock(&rq->lock);
1496 ttwu_do_activate(rq, p, 0);
1497 raw_spin_unlock(&rq->lock);
1498}
1499
1500/**
1501 * try_to_wake_up - wake up a thread
1502 * @p: the thread to be awakened
1503 * @state: the mask of task states that can be woken
1504 * @wake_flags: wake modifier flags (WF_*)
1505 *
1506 * Put it on the run-queue if it's not already there. The "current"
1507 * thread is always on the run-queue (except when the actual
1508 * re-schedule is in progress), and as such you're allowed to do
1509 * the simpler "current->state = TASK_RUNNING" to mark yourself
1510 * runnable without the overhead of this.
1511 *
1512 * Returns %true if @p was woken up, %false if it was already running
1513 * or @state didn't match @p's state.
1514 */
1515static int
1516try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
1517{
1518 unsigned long flags;
1519 int cpu, success = 0;
1520
1521 smp_wmb();
1522 raw_spin_lock_irqsave(&p->pi_lock, flags);
1523 if (!(p->state & state))
1524 goto out;
1525
1526 success = 1; /* we're going to change ->state */
1527 cpu = task_cpu(p);
1528
1529 if (p->on_rq && ttwu_remote(p, wake_flags))
1530 goto stat;
1531
1532#ifdef CONFIG_SMP
1533 /*
1534 * If the owning (remote) cpu is still in the middle of schedule() with
1535 * this task as prev, wait until its done referencing the task.
1536 */
1537 while (p->on_cpu) {
1538#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1539 /*
1540 * In case the architecture enables interrupts in
1541 * context_switch(), we cannot busy wait, since that
1542 * would lead to deadlocks when an interrupt hits and
1543 * tries to wake up @prev. So bail and do a complete
1544 * remote wakeup.
1545 */
1546 if (ttwu_activate_remote(p, wake_flags))
1547 goto stat;
1548#else
1549 cpu_relax();
1550#endif
1551 }
1552 /*
1553 * Pairs with the smp_wmb() in finish_lock_switch().
1554 */
1555 smp_rmb();
1556
1557 p->sched_contributes_to_load = !!task_contributes_to_load(p);
1558 p->state = TASK_WAKING;
1559
1560 if (p->sched_class->task_waking)
1561 p->sched_class->task_waking(p);
1562
1563 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
1564 if (task_cpu(p) != cpu) {
1565 wake_flags |= WF_MIGRATED;
1566 set_task_cpu(p, cpu);
1567 }
1568#endif /* CONFIG_SMP */
1569
1570 ttwu_queue(p, cpu);
1571stat:
1572 ttwu_stat(p, cpu, wake_flags);
1573out:
1574 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1575
1576 return success;
1577}
1578
1579/**
1580 * try_to_wake_up_local - try to wake up a local task with rq lock held
1581 * @p: the thread to be awakened
1582 *
1583 * Put @p on the run-queue if it's not already there. The caller must
1584 * ensure that this_rq() is locked, @p is bound to this_rq() and not
1585 * the current task.
1586 */
1587static void try_to_wake_up_local(struct task_struct *p)
1588{
1589 struct rq *rq = task_rq(p);
1590
1591 BUG_ON(rq != this_rq());
1592 BUG_ON(p == current);
1593 lockdep_assert_held(&rq->lock);
1594
1595 if (!raw_spin_trylock(&p->pi_lock)) {
1596 raw_spin_unlock(&rq->lock);
1597 raw_spin_lock(&p->pi_lock);
1598 raw_spin_lock(&rq->lock);
1599 }
1600
1601 if (!(p->state & TASK_NORMAL))
1602 goto out;
1603
1604 if (!p->on_rq)
1605 ttwu_activate(rq, p, ENQUEUE_WAKEUP);
1606
1607 ttwu_do_wakeup(rq, p, 0);
1608 ttwu_stat(p, smp_processor_id(), 0);
1609out:
1610 raw_spin_unlock(&p->pi_lock);
1611}
1612
1613/**
1614 * wake_up_process - Wake up a specific process
1615 * @p: The process to be woken up.
1616 *
1617 * Attempt to wake up the nominated process and move it to the set of runnable
1618 * processes. Returns 1 if the process was woken up, 0 if it was already
1619 * running.
1620 *
1621 * It may be assumed that this function implies a write memory barrier before
1622 * changing the task state if and only if any tasks are woken up.
1623 */
1624int wake_up_process(struct task_struct *p)
1625{
1626 return try_to_wake_up(p, TASK_ALL, 0);
1627}
1628EXPORT_SYMBOL(wake_up_process);
1629
1630int wake_up_state(struct task_struct *p, unsigned int state)
1631{
1632 return try_to_wake_up(p, state, 0);
1633}
1634
1635/*
1636 * Perform scheduler related setup for a newly forked process p.
1637 * p is forked by current.
1638 *
1639 * __sched_fork() is basic setup used by init_idle() too:
1640 */
1641static void __sched_fork(struct task_struct *p)
1642{
1643 p->on_rq = 0;
1644
1645 p->se.on_rq = 0;
1646 p->se.exec_start = 0;
1647 p->se.sum_exec_runtime = 0;
1648 p->se.prev_sum_exec_runtime = 0;
1649 p->se.nr_migrations = 0;
1650 p->se.vruntime = 0;
1651 INIT_LIST_HEAD(&p->se.group_node);
1652
1653#ifdef CONFIG_SCHEDSTATS
1654 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
1655#endif
1656
1657 INIT_LIST_HEAD(&p->rt.run_list);
1658
1659#ifdef CONFIG_PREEMPT_NOTIFIERS
1660 INIT_HLIST_HEAD(&p->preempt_notifiers);
1661#endif
1662}
1663
1664/*
1665 * fork()/clone()-time setup:
1666 */
1667void sched_fork(struct task_struct *p)
1668{
1669 unsigned long flags;
1670 int cpu = get_cpu();
1671
1672 __sched_fork(p);
1673 /*
1674 * We mark the process as running here. This guarantees that
1675 * nobody will actually run it, and a signal or other external
1676 * event cannot wake it up and insert it on the runqueue either.
1677 */
1678 p->state = TASK_RUNNING;
1679
1680 /*
1681 * Make sure we do not leak PI boosting priority to the child.
1682 */
1683 p->prio = current->normal_prio;
1684
1685 /*
1686 * Revert to default priority/policy on fork if requested.
1687 */
1688 if (unlikely(p->sched_reset_on_fork)) {
1689 if (task_has_rt_policy(p)) {
1690 p->policy = SCHED_NORMAL;
1691 p->static_prio = NICE_TO_PRIO(0);
1692 p->rt_priority = 0;
1693 } else if (PRIO_TO_NICE(p->static_prio) < 0)
1694 p->static_prio = NICE_TO_PRIO(0);
1695
1696 p->prio = p->normal_prio = __normal_prio(p);
1697 set_load_weight(p);
1698
1699 /*
1700 * We don't need the reset flag anymore after the fork. It has
1701 * fulfilled its duty:
1702 */
1703 p->sched_reset_on_fork = 0;
1704 }
1705
1706 if (!rt_prio(p->prio))
1707 p->sched_class = &fair_sched_class;
1708
1709 if (p->sched_class->task_fork)
1710 p->sched_class->task_fork(p);
1711
1712 /*
1713 * The child is not yet in the pid-hash so no cgroup attach races,
1714 * and the cgroup is pinned to this child due to cgroup_fork()
1715 * is ran before sched_fork().
1716 *
1717 * Silence PROVE_RCU.
1718 */
1719 raw_spin_lock_irqsave(&p->pi_lock, flags);
1720 set_task_cpu(p, cpu);
1721 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
1722
1723#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1724 if (likely(sched_info_on()))
1725 memset(&p->sched_info, 0, sizeof(p->sched_info));
1726#endif
1727#if defined(CONFIG_SMP)
1728 p->on_cpu = 0;
1729#endif
1730#ifdef CONFIG_PREEMPT_COUNT
1731 /* Want to start with kernel preemption disabled. */
1732 task_thread_info(p)->preempt_count = 1;
1733#endif
1734#ifdef CONFIG_SMP
1735 plist_node_init(&p->pushable_tasks, MAX_PRIO);
1736#endif
1737
1738 put_cpu();
1739}
1740
1741/*
1742 * wake_up_new_task - wake up a newly created task for the first time.
1743 *
1744 * This function will do some initial scheduler statistics housekeeping
1745 * that must be done for every newly created context, then puts the task
1746 * on the runqueue and wakes it.
1747 */
1748void wake_up_new_task(struct task_struct *p)
1749{
1750 unsigned long flags;
1751 struct rq *rq;
1752
1753 raw_spin_lock_irqsave(&p->pi_lock, flags);
1754#ifdef CONFIG_SMP
1755 /*
1756 * Fork balancing, do it here and not earlier because:
1757 * - cpus_allowed can change in the fork path
1758 * - any previously selected cpu might disappear through hotplug
1759 */
1760 set_task_cpu(p, select_task_rq(p, SD_BALANCE_FORK, 0));
1761#endif
1762
1763 rq = __task_rq_lock(p);
1764 activate_task(rq, p, 0);
1765 p->on_rq = 1;
1766 trace_sched_wakeup_new(p, true);
1767 check_preempt_curr(rq, p, WF_FORK);
1768#ifdef CONFIG_SMP
1769 if (p->sched_class->task_woken)
1770 p->sched_class->task_woken(rq, p);
1771#endif
1772 task_rq_unlock(rq, p, &flags);
1773}
1774
1775#ifdef CONFIG_PREEMPT_NOTIFIERS
1776
1777/**
1778 * preempt_notifier_register - tell me when current is being preempted & rescheduled
1779 * @notifier: notifier struct to register
1780 */
1781void preempt_notifier_register(struct preempt_notifier *notifier)
1782{
1783 hlist_add_head(&notifier->link, &current->preempt_notifiers);
1784}
1785EXPORT_SYMBOL_GPL(preempt_notifier_register);
1786
1787/**
1788 * preempt_notifier_unregister - no longer interested in preemption notifications
1789 * @notifier: notifier struct to unregister
1790 *
1791 * This is safe to call from within a preemption notifier.
1792 */
1793void preempt_notifier_unregister(struct preempt_notifier *notifier)
1794{
1795 hlist_del(&notifier->link);
1796}
1797EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1798
1799static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1800{
1801 struct preempt_notifier *notifier;
1802 struct hlist_node *node;
1803
1804 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1805 notifier->ops->sched_in(notifier, raw_smp_processor_id());
1806}
1807
1808static void
1809fire_sched_out_preempt_notifiers(struct task_struct *curr,
1810 struct task_struct *next)
1811{
1812 struct preempt_notifier *notifier;
1813 struct hlist_node *node;
1814
1815 hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1816 notifier->ops->sched_out(notifier, next);
1817}
1818
1819#else /* !CONFIG_PREEMPT_NOTIFIERS */
1820
1821static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1822{
1823}
1824
1825static void
1826fire_sched_out_preempt_notifiers(struct task_struct *curr,
1827 struct task_struct *next)
1828{
1829}
1830
1831#endif /* CONFIG_PREEMPT_NOTIFIERS */
1832
1833/**
1834 * prepare_task_switch - prepare to switch tasks
1835 * @rq: the runqueue preparing to switch
1836 * @prev: the current task that is being switched out
1837 * @next: the task we are going to switch to.
1838 *
1839 * This is called with the rq lock held and interrupts off. It must
1840 * be paired with a subsequent finish_task_switch after the context
1841 * switch.
1842 *
1843 * prepare_task_switch sets up locking and calls architecture specific
1844 * hooks.
1845 */
1846static inline void
1847prepare_task_switch(struct rq *rq, struct task_struct *prev,
1848 struct task_struct *next)
1849{
1850 sched_info_switch(prev, next);
1851 perf_event_task_sched_out(prev, next);
1852 fire_sched_out_preempt_notifiers(prev, next);
1853 prepare_lock_switch(rq, next);
1854 prepare_arch_switch(next);
1855 trace_sched_switch(prev, next);
1856}
1857
1858/**
1859 * finish_task_switch - clean up after a task-switch
1860 * @rq: runqueue associated with task-switch
1861 * @prev: the thread we just switched away from.
1862 *
1863 * finish_task_switch must be called after the context switch, paired
1864 * with a prepare_task_switch call before the context switch.
1865 * finish_task_switch will reconcile locking set up by prepare_task_switch,
1866 * and do any other architecture-specific cleanup actions.
1867 *
1868 * Note that we may have delayed dropping an mm in context_switch(). If
1869 * so, we finish that here outside of the runqueue lock. (Doing it
1870 * with the lock held can cause deadlocks; see schedule() for
1871 * details.)
1872 */
1873static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1874 __releases(rq->lock)
1875{
1876 struct mm_struct *mm = rq->prev_mm;
1877 long prev_state;
1878
1879 rq->prev_mm = NULL;
1880
1881 /*
1882 * A task struct has one reference for the use as "current".
1883 * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1884 * schedule one last time. The schedule call will never return, and
1885 * the scheduled task must drop that reference.
1886 * The test for TASK_DEAD must occur while the runqueue locks are
1887 * still held, otherwise prev could be scheduled on another cpu, die
1888 * there before we look at prev->state, and then the reference would
1889 * be dropped twice.
1890 * Manfred Spraul <manfred@colorfullife.com>
1891 */
1892 prev_state = prev->state;
1893 finish_arch_switch(prev);
1894#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1895 local_irq_disable();
1896#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1897 perf_event_task_sched_in(prev, current);
1898#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1899 local_irq_enable();
1900#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */
1901 finish_lock_switch(rq, prev);
1902
1903 fire_sched_in_preempt_notifiers(current);
1904 if (mm)
1905 mmdrop(mm);
1906 if (unlikely(prev_state == TASK_DEAD)) {
1907 /*
1908 * Remove function-return probe instances associated with this
1909 * task and put them back on the free list.
1910 */
1911 kprobe_flush_task(prev);
1912 put_task_struct(prev);
1913 }
1914}
1915
1916#ifdef CONFIG_SMP
1917
1918/* assumes rq->lock is held */
1919static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
1920{
1921 if (prev->sched_class->pre_schedule)
1922 prev->sched_class->pre_schedule(rq, prev);
1923}
1924
1925/* rq->lock is NOT held, but preemption is disabled */
1926static inline void post_schedule(struct rq *rq)
1927{
1928 if (rq->post_schedule) {
1929 unsigned long flags;
1930
1931 raw_spin_lock_irqsave(&rq->lock, flags);
1932 if (rq->curr->sched_class->post_schedule)
1933 rq->curr->sched_class->post_schedule(rq);
1934 raw_spin_unlock_irqrestore(&rq->lock, flags);
1935
1936 rq->post_schedule = 0;
1937 }
1938}
1939
1940#else
1941
1942static inline void pre_schedule(struct rq *rq, struct task_struct *p)
1943{
1944}
1945
1946static inline void post_schedule(struct rq *rq)
1947{
1948}
1949
1950#endif
1951
1952/**
1953 * schedule_tail - first thing a freshly forked thread must call.
1954 * @prev: the thread we just switched away from.
1955 */
1956asmlinkage void schedule_tail(struct task_struct *prev)
1957 __releases(rq->lock)
1958{
1959 struct rq *rq = this_rq();
1960
1961 finish_task_switch(rq, prev);
1962
1963 /*
1964 * FIXME: do we need to worry about rq being invalidated by the
1965 * task_switch?
1966 */
1967 post_schedule(rq);
1968
1969#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1970 /* In this case, finish_task_switch does not reenable preemption */
1971 preempt_enable();
1972#endif
1973 if (current->set_child_tid)
1974 put_user(task_pid_vnr(current), current->set_child_tid);
1975}
1976
1977/*
1978 * context_switch - switch to the new MM and the new
1979 * thread's register state.
1980 */
1981static inline void
1982context_switch(struct rq *rq, struct task_struct *prev,
1983 struct task_struct *next)
1984{
1985 struct mm_struct *mm, *oldmm;
1986
1987 prepare_task_switch(rq, prev, next);
1988
1989 mm = next->mm;
1990 oldmm = prev->active_mm;
1991 /*
1992 * For paravirt, this is coupled with an exit in switch_to to
1993 * combine the page table reload and the switch backend into
1994 * one hypercall.
1995 */
1996 arch_start_context_switch(prev);
1997
1998 if (!mm) {
1999 next->active_mm = oldmm;
2000 atomic_inc(&oldmm->mm_count);
2001 enter_lazy_tlb(oldmm, next);
2002 } else
2003 switch_mm(oldmm, mm, next);
2004
2005 if (!prev->mm) {
2006 prev->active_mm = NULL;
2007 rq->prev_mm = oldmm;
2008 }
2009 /*
2010 * Since the runqueue lock will be released by the next
2011 * task (which is an invalid locking op but in the case
2012 * of the scheduler it's an obvious special-case), so we
2013 * do an early lockdep release here:
2014 */
2015#ifndef __ARCH_WANT_UNLOCKED_CTXSW
2016 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
2017#endif
2018
2019 /* Here we just switch the register state and the stack. */
2020 switch_to(prev, next, prev);
2021
2022 barrier();
2023 /*
2024 * this_rq must be evaluated again because prev may have moved
2025 * CPUs since it called schedule(), thus the 'rq' on its stack
2026 * frame will be invalid.
2027 */
2028 finish_task_switch(this_rq(), prev);
2029}
2030
2031/*
2032 * nr_running, nr_uninterruptible and nr_context_switches:
2033 *
2034 * externally visible scheduler statistics: current number of runnable
2035 * threads, current number of uninterruptible-sleeping threads, total
2036 * number of context switches performed since bootup.
2037 */
2038unsigned long nr_running(void)
2039{
2040 unsigned long i, sum = 0;
2041
2042 for_each_online_cpu(i)
2043 sum += cpu_rq(i)->nr_running;
2044
2045 return sum;
2046}
2047
2048unsigned long nr_uninterruptible(void)
2049{
2050 unsigned long i, sum = 0;
2051
2052 for_each_possible_cpu(i)
2053 sum += cpu_rq(i)->nr_uninterruptible;
2054
2055 /*
2056 * Since we read the counters lockless, it might be slightly
2057 * inaccurate. Do not allow it to go below zero though:
2058 */
2059 if (unlikely((long)sum < 0))
2060 sum = 0;
2061
2062 return sum;
2063}
2064
2065unsigned long long nr_context_switches(void)
2066{
2067 int i;
2068 unsigned long long sum = 0;
2069
2070 for_each_possible_cpu(i)
2071 sum += cpu_rq(i)->nr_switches;
2072
2073 return sum;
2074}
2075
2076unsigned long nr_iowait(void)
2077{
2078 unsigned long i, sum = 0;
2079
2080 for_each_possible_cpu(i)
2081 sum += atomic_read(&cpu_rq(i)->nr_iowait);
2082
2083 return sum;
2084}
2085
2086unsigned long nr_iowait_cpu(int cpu)
2087{
2088 struct rq *this = cpu_rq(cpu);
2089 return atomic_read(&this->nr_iowait);
2090}
2091
2092unsigned long this_cpu_load(void)
2093{
2094 struct rq *this = this_rq();
2095 return this->cpu_load[0];
2096}
2097
2098
2099/* Variables and functions for calc_load */
2100static atomic_long_t calc_load_tasks;
2101static unsigned long calc_load_update;
2102unsigned long avenrun[3];
2103EXPORT_SYMBOL(avenrun);
2104
2105static long calc_load_fold_active(struct rq *this_rq)
2106{
2107 long nr_active, delta = 0;
2108
2109 nr_active = this_rq->nr_running;
2110 nr_active += (long) this_rq->nr_uninterruptible;
2111
2112 if (nr_active != this_rq->calc_load_active) {
2113 delta = nr_active - this_rq->calc_load_active;
2114 this_rq->calc_load_active = nr_active;
2115 }
2116
2117 return delta;
2118}
2119
2120static unsigned long
2121calc_load(unsigned long load, unsigned long exp, unsigned long active)
2122{
2123 load *= exp;
2124 load += active * (FIXED_1 - exp);
2125 load += 1UL << (FSHIFT - 1);
2126 return load >> FSHIFT;
2127}
2128
2129#ifdef CONFIG_NO_HZ
2130/*
2131 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2132 *
2133 * When making the ILB scale, we should try to pull this in as well.
2134 */
2135static atomic_long_t calc_load_tasks_idle;
2136
2137void calc_load_account_idle(struct rq *this_rq)
2138{
2139 long delta;
2140
2141 delta = calc_load_fold_active(this_rq);
2142 if (delta)
2143 atomic_long_add(delta, &calc_load_tasks_idle);
2144}
2145
2146static long calc_load_fold_idle(void)
2147{
2148 long delta = 0;
2149
2150 /*
2151 * Its got a race, we don't care...
2152 */
2153 if (atomic_long_read(&calc_load_tasks_idle))
2154 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
2155
2156 return delta;
2157}
2158
2159/**
2160 * fixed_power_int - compute: x^n, in O(log n) time
2161 *
2162 * @x: base of the power
2163 * @frac_bits: fractional bits of @x
2164 * @n: power to raise @x to.
2165 *
2166 * By exploiting the relation between the definition of the natural power
2167 * function: x^n := x*x*...*x (x multiplied by itself for n times), and
2168 * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
2169 * (where: n_i \elem {0, 1}, the binary vector representing n),
2170 * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
2171 * of course trivially computable in O(log_2 n), the length of our binary
2172 * vector.
2173 */
2174static unsigned long
2175fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
2176{
2177 unsigned long result = 1UL << frac_bits;
2178
2179 if (n) for (;;) {
2180 if (n & 1) {
2181 result *= x;
2182 result += 1UL << (frac_bits - 1);
2183 result >>= frac_bits;
2184 }
2185 n >>= 1;
2186 if (!n)
2187 break;
2188 x *= x;
2189 x += 1UL << (frac_bits - 1);
2190 x >>= frac_bits;
2191 }
2192
2193 return result;
2194}
2195
2196/*
2197 * a1 = a0 * e + a * (1 - e)
2198 *
2199 * a2 = a1 * e + a * (1 - e)
2200 * = (a0 * e + a * (1 - e)) * e + a * (1 - e)
2201 * = a0 * e^2 + a * (1 - e) * (1 + e)
2202 *
2203 * a3 = a2 * e + a * (1 - e)
2204 * = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
2205 * = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
2206 *
2207 * ...
2208 *
2209 * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
2210 * = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
2211 * = a0 * e^n + a * (1 - e^n)
2212 *
2213 * [1] application of the geometric series:
2214 *
2215 * n 1 - x^(n+1)
2216 * S_n := \Sum x^i = -------------
2217 * i=0 1 - x
2218 */
2219static unsigned long
2220calc_load_n(unsigned long load, unsigned long exp,
2221 unsigned long active, unsigned int n)
2222{
2223
2224 return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
2225}
2226
2227/*
2228 * NO_HZ can leave us missing all per-cpu ticks calling
2229 * calc_load_account_active(), but since an idle CPU folds its delta into
2230 * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
2231 * in the pending idle delta if our idle period crossed a load cycle boundary.
2232 *
2233 * Once we've updated the global active value, we need to apply the exponential
2234 * weights adjusted to the number of cycles missed.
2235 */
2236static void calc_global_nohz(unsigned long ticks)
2237{
2238 long delta, active, n;
2239
2240 if (time_before(jiffies, calc_load_update))
2241 return;
2242
2243 /*
2244 * If we crossed a calc_load_update boundary, make sure to fold
2245 * any pending idle changes, the respective CPUs might have
2246 * missed the tick driven calc_load_account_active() update
2247 * due to NO_HZ.
2248 */
2249 delta = calc_load_fold_idle();
2250 if (delta)
2251 atomic_long_add(delta, &calc_load_tasks);
2252
2253 /*
2254 * If we were idle for multiple load cycles, apply them.
2255 */
2256 if (ticks >= LOAD_FREQ) {
2257 n = ticks / LOAD_FREQ;
2258
2259 active = atomic_long_read(&calc_load_tasks);
2260 active = active > 0 ? active * FIXED_1 : 0;
2261
2262 avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
2263 avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
2264 avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
2265
2266 calc_load_update += n * LOAD_FREQ;
2267 }
2268
2269 /*
2270 * Its possible the remainder of the above division also crosses
2271 * a LOAD_FREQ period, the regular check in calc_global_load()
2272 * which comes after this will take care of that.
2273 *
2274 * Consider us being 11 ticks before a cycle completion, and us
2275 * sleeping for 4*LOAD_FREQ + 22 ticks, then the above code will
2276 * age us 4 cycles, and the test in calc_global_load() will
2277 * pick up the final one.
2278 */
2279}
2280#else
2281void calc_load_account_idle(struct rq *this_rq)
2282{
2283}
2284
2285static inline long calc_load_fold_idle(void)
2286{
2287 return 0;
2288}
2289
2290static void calc_global_nohz(unsigned long ticks)
2291{
2292}
2293#endif
2294
2295/**
2296 * get_avenrun - get the load average array
2297 * @loads: pointer to dest load array
2298 * @offset: offset to add
2299 * @shift: shift count to shift the result left
2300 *
2301 * These values are estimates at best, so no need for locking.
2302 */
2303void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2304{
2305 loads[0] = (avenrun[0] + offset) << shift;
2306 loads[1] = (avenrun[1] + offset) << shift;
2307 loads[2] = (avenrun[2] + offset) << shift;
2308}
2309
2310/*
2311 * calc_load - update the avenrun load estimates 10 ticks after the
2312 * CPUs have updated calc_load_tasks.
2313 */
2314void calc_global_load(unsigned long ticks)
2315{
2316 long active;
2317
2318 calc_global_nohz(ticks);
2319
2320 if (time_before(jiffies, calc_load_update + 10))
2321 return;
2322
2323 active = atomic_long_read(&calc_load_tasks);
2324 active = active > 0 ? active * FIXED_1 : 0;
2325
2326 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2327 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2328 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2329
2330 calc_load_update += LOAD_FREQ;
2331}
2332
2333/*
2334 * Called from update_cpu_load() to periodically update this CPU's
2335 * active count.
2336 */
2337static void calc_load_account_active(struct rq *this_rq)
2338{
2339 long delta;
2340
2341 if (time_before(jiffies, this_rq->calc_load_update))
2342 return;
2343
2344 delta = calc_load_fold_active(this_rq);
2345 delta += calc_load_fold_idle();
2346 if (delta)
2347 atomic_long_add(delta, &calc_load_tasks);
2348
2349 this_rq->calc_load_update += LOAD_FREQ;
2350}
2351
2352/*
2353 * The exact cpuload at various idx values, calculated at every tick would be
2354 * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
2355 *
2356 * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
2357 * on nth tick when cpu may be busy, then we have:
2358 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2359 * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
2360 *
2361 * decay_load_missed() below does efficient calculation of
2362 * load = ((2^idx - 1) / 2^idx)^(n-1) * load
2363 * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
2364 *
2365 * The calculation is approximated on a 128 point scale.
2366 * degrade_zero_ticks is the number of ticks after which load at any
2367 * particular idx is approximated to be zero.
2368 * degrade_factor is a precomputed table, a row for each load idx.
2369 * Each column corresponds to degradation factor for a power of two ticks,
2370 * based on 128 point scale.
2371 * Example:
2372 * row 2, col 3 (=12) says that the degradation at load idx 2 after
2373 * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
2374 *
2375 * With this power of 2 load factors, we can degrade the load n times
2376 * by looking at 1 bits in n and doing as many mult/shift instead of
2377 * n mult/shifts needed by the exact degradation.
2378 */
2379#define DEGRADE_SHIFT 7
2380static const unsigned char
2381 degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
2382static const unsigned char
2383 degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
2384 {0, 0, 0, 0, 0, 0, 0, 0},
2385 {64, 32, 8, 0, 0, 0, 0, 0},
2386 {96, 72, 40, 12, 1, 0, 0},
2387 {112, 98, 75, 43, 15, 1, 0},
2388 {120, 112, 98, 76, 45, 16, 2} };
2389
2390/*
2391 * Update cpu_load for any missed ticks, due to tickless idle. The backlog
2392 * would be when CPU is idle and so we just decay the old load without
2393 * adding any new load.
2394 */
2395static unsigned long
2396decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
2397{
2398 int j = 0;
2399
2400 if (!missed_updates)
2401 return load;
2402
2403 if (missed_updates >= degrade_zero_ticks[idx])
2404 return 0;
2405
2406 if (idx == 1)
2407 return load >> missed_updates;
2408
2409 while (missed_updates) {
2410 if (missed_updates % 2)
2411 load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
2412
2413 missed_updates >>= 1;
2414 j++;
2415 }
2416 return load;
2417}
2418
2419/*
2420 * Update rq->cpu_load[] statistics. This function is usually called every
2421 * scheduler tick (TICK_NSEC). With tickless idle this will not be called
2422 * every tick. We fix it up based on jiffies.
2423 */
2424void update_cpu_load(struct rq *this_rq)
2425{
2426 unsigned long this_load = this_rq->load.weight;
2427 unsigned long curr_jiffies = jiffies;
2428 unsigned long pending_updates;
2429 int i, scale;
2430
2431 this_rq->nr_load_updates++;
2432
2433 /* Avoid repeated calls on same jiffy, when moving in and out of idle */
2434 if (curr_jiffies == this_rq->last_load_update_tick)
2435 return;
2436
2437 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2438 this_rq->last_load_update_tick = curr_jiffies;
2439
2440 /* Update our load: */
2441 this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
2442 for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
2443 unsigned long old_load, new_load;
2444
2445 /* scale is effectively 1 << i now, and >> i divides by scale */
2446
2447 old_load = this_rq->cpu_load[i];
2448 old_load = decay_load_missed(old_load, pending_updates - 1, i);
2449 new_load = this_load;
2450 /*
2451 * Round up the averaging division if load is increasing. This
2452 * prevents us from getting stuck on 9 if the load is 10, for
2453 * example.
2454 */
2455 if (new_load > old_load)
2456 new_load += scale - 1;
2457
2458 this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
2459 }
2460
2461 sched_avg_update(this_rq);
2462}
2463
2464static void update_cpu_load_active(struct rq *this_rq)
2465{
2466 update_cpu_load(this_rq);
2467
2468 calc_load_account_active(this_rq);
2469}
2470
2471#ifdef CONFIG_SMP
2472
2473/*
2474 * sched_exec - execve() is a valuable balancing opportunity, because at
2475 * this point the task has the smallest effective memory and cache footprint.
2476 */
2477void sched_exec(void)
2478{
2479 struct task_struct *p = current;
2480 unsigned long flags;
2481 int dest_cpu;
2482
2483 raw_spin_lock_irqsave(&p->pi_lock, flags);
2484 dest_cpu = p->sched_class->select_task_rq(p, SD_BALANCE_EXEC, 0);
2485 if (dest_cpu == smp_processor_id())
2486 goto unlock;
2487
2488 if (likely(cpu_active(dest_cpu))) {
2489 struct migration_arg arg = { p, dest_cpu };
2490
2491 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2492 stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
2493 return;
2494 }
2495unlock:
2496 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
2497}
2498
2499#endif
2500
2501DEFINE_PER_CPU(struct kernel_stat, kstat);
2502
2503EXPORT_PER_CPU_SYMBOL(kstat);
2504
2505/*
2506 * Return any ns on the sched_clock that have not yet been accounted in
2507 * @p in case that task is currently running.
2508 *
2509 * Called with task_rq_lock() held on @rq.
2510 */
2511static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2512{
2513 u64 ns = 0;
2514
2515 if (task_current(rq, p)) {
2516 update_rq_clock(rq);
2517 ns = rq->clock_task - p->se.exec_start;
2518 if ((s64)ns < 0)
2519 ns = 0;
2520 }
2521
2522 return ns;
2523}
2524
2525unsigned long long task_delta_exec(struct task_struct *p)
2526{
2527 unsigned long flags;
2528 struct rq *rq;
2529 u64 ns = 0;
2530
2531 rq = task_rq_lock(p, &flags);
2532 ns = do_task_delta_exec(p, rq);
2533 task_rq_unlock(rq, p, &flags);
2534
2535 return ns;
2536}
2537
2538/*
2539 * Return accounted runtime for the task.
2540 * In case the task is currently running, return the runtime plus current's
2541 * pending runtime that have not been accounted yet.
2542 */
2543unsigned long long task_sched_runtime(struct task_struct *p)
2544{
2545 unsigned long flags;
2546 struct rq *rq;
2547 u64 ns = 0;
2548
2549 rq = task_rq_lock(p, &flags);
2550 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2551 task_rq_unlock(rq, p, &flags);
2552
2553 return ns;
2554}
2555
2556/*
2557 * Account user cpu time to a process.
2558 * @p: the process that the cpu time gets accounted to
2559 * @cputime: the cpu time spent in user space since the last update
2560 * @cputime_scaled: cputime scaled by cpu frequency
2561 */
2562void account_user_time(struct task_struct *p, cputime_t cputime,
2563 cputime_t cputime_scaled)
2564{
2565 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2566 cputime64_t tmp;
2567
2568 /* Add user time to process. */
2569 p->utime = cputime_add(p->utime, cputime);
2570 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
2571 account_group_user_time(p, cputime);
2572
2573 /* Add user time to cpustat. */
2574 tmp = cputime_to_cputime64(cputime);
2575 if (TASK_NICE(p) > 0)
2576 cpustat->nice = cputime64_add(cpustat->nice, tmp);
2577 else
2578 cpustat->user = cputime64_add(cpustat->user, tmp);
2579
2580 cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
2581 /* Account for user time used */
2582 acct_update_integrals(p);
2583}
2584
2585/*
2586 * Account guest cpu time to a process.
2587 * @p: the process that the cpu time gets accounted to
2588 * @cputime: the cpu time spent in virtual machine since the last update
2589 * @cputime_scaled: cputime scaled by cpu frequency
2590 */
2591static void account_guest_time(struct task_struct *p, cputime_t cputime,
2592 cputime_t cputime_scaled)
2593{
2594 cputime64_t tmp;
2595 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2596
2597 tmp = cputime_to_cputime64(cputime);
2598
2599 /* Add guest time to process. */
2600 p->utime = cputime_add(p->utime, cputime);
2601 p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
2602 account_group_user_time(p, cputime);
2603 p->gtime = cputime_add(p->gtime, cputime);
2604
2605 /* Add guest time to cpustat. */
2606 if (TASK_NICE(p) > 0) {
2607 cpustat->nice = cputime64_add(cpustat->nice, tmp);
2608 cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
2609 } else {
2610 cpustat->user = cputime64_add(cpustat->user, tmp);
2611 cpustat->guest = cputime64_add(cpustat->guest, tmp);
2612 }
2613}
2614
2615/*
2616 * Account system cpu time to a process and desired cpustat field
2617 * @p: the process that the cpu time gets accounted to
2618 * @cputime: the cpu time spent in kernel space since the last update
2619 * @cputime_scaled: cputime scaled by cpu frequency
2620 * @target_cputime64: pointer to cpustat field that has to be updated
2621 */
2622static inline
2623void __account_system_time(struct task_struct *p, cputime_t cputime,
2624 cputime_t cputime_scaled, cputime64_t *target_cputime64)
2625{
2626 cputime64_t tmp = cputime_to_cputime64(cputime);
2627
2628 /* Add system time to process. */
2629 p->stime = cputime_add(p->stime, cputime);
2630 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
2631 account_group_system_time(p, cputime);
2632
2633 /* Add system time to cpustat. */
2634 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
2635 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
2636
2637 /* Account for system time used */
2638 acct_update_integrals(p);
2639}
2640
2641/*
2642 * Account system cpu time to a process.
2643 * @p: the process that the cpu time gets accounted to
2644 * @hardirq_offset: the offset to subtract from hardirq_count()
2645 * @cputime: the cpu time spent in kernel space since the last update
2646 * @cputime_scaled: cputime scaled by cpu frequency
2647 */
2648void account_system_time(struct task_struct *p, int hardirq_offset,
2649 cputime_t cputime, cputime_t cputime_scaled)
2650{
2651 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2652 cputime64_t *target_cputime64;
2653
2654 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
2655 account_guest_time(p, cputime, cputime_scaled);
2656 return;
2657 }
2658
2659 if (hardirq_count() - hardirq_offset)
2660 target_cputime64 = &cpustat->irq;
2661 else if (in_serving_softirq())
2662 target_cputime64 = &cpustat->softirq;
2663 else
2664 target_cputime64 = &cpustat->system;
2665
2666 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
2667}
2668
2669/*
2670 * Account for involuntary wait time.
2671 * @cputime: the cpu time spent in involuntary wait
2672 */
2673void account_steal_time(cputime_t cputime)
2674{
2675 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2676 cputime64_t cputime64 = cputime_to_cputime64(cputime);
2677
2678 cpustat->steal = cputime64_add(cpustat->steal, cputime64);
2679}
2680
2681/*
2682 * Account for idle time.
2683 * @cputime: the cpu time spent in idle wait
2684 */
2685void account_idle_time(cputime_t cputime)
2686{
2687 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2688 cputime64_t cputime64 = cputime_to_cputime64(cputime);
2689 struct rq *rq = this_rq();
2690
2691 if (atomic_read(&rq->nr_iowait) > 0)
2692 cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
2693 else
2694 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
2695}
2696
2697static __always_inline bool steal_account_process_tick(void)
2698{
2699#ifdef CONFIG_PARAVIRT
2700 if (static_branch(&paravirt_steal_enabled)) {
2701 u64 steal, st = 0;
2702
2703 steal = paravirt_steal_clock(smp_processor_id());
2704 steal -= this_rq()->prev_steal_time;
2705
2706 st = steal_ticks(steal);
2707 this_rq()->prev_steal_time += st * TICK_NSEC;
2708
2709 account_steal_time(st);
2710 return st;
2711 }
2712#endif
2713 return false;
2714}
2715
2716#ifndef CONFIG_VIRT_CPU_ACCOUNTING
2717
2718#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2719/*
2720 * Account a tick to a process and cpustat
2721 * @p: the process that the cpu time gets accounted to
2722 * @user_tick: is the tick from userspace
2723 * @rq: the pointer to rq
2724 *
2725 * Tick demultiplexing follows the order
2726 * - pending hardirq update
2727 * - pending softirq update
2728 * - user_time
2729 * - idle_time
2730 * - system time
2731 * - check for guest_time
2732 * - else account as system_time
2733 *
2734 * Check for hardirq is done both for system and user time as there is
2735 * no timer going off while we are on hardirq and hence we may never get an
2736 * opportunity to update it solely in system time.
2737 * p->stime and friends are only updated on system time and not on irq
2738 * softirq as those do not count in task exec_runtime any more.
2739 */
2740static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2741 struct rq *rq)
2742{
2743 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
2744 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
2745 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2746
2747 if (steal_account_process_tick())
2748 return;
2749
2750 if (irqtime_account_hi_update()) {
2751 cpustat->irq = cputime64_add(cpustat->irq, tmp);
2752 } else if (irqtime_account_si_update()) {
2753 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
2754 } else if (this_cpu_ksoftirqd() == p) {
2755 /*
2756 * ksoftirqd time do not get accounted in cpu_softirq_time.
2757 * So, we have to handle it separately here.
2758 * Also, p->stime needs to be updated for ksoftirqd.
2759 */
2760 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
2761 &cpustat->softirq);
2762 } else if (user_tick) {
2763 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
2764 } else if (p == rq->idle) {
2765 account_idle_time(cputime_one_jiffy);
2766 } else if (p->flags & PF_VCPU) { /* System time or guest time */
2767 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
2768 } else {
2769 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
2770 &cpustat->system);
2771 }
2772}
2773
2774static void irqtime_account_idle_ticks(int ticks)
2775{
2776 int i;
2777 struct rq *rq = this_rq();
2778
2779 for (i = 0; i < ticks; i++)
2780 irqtime_account_process_tick(current, 0, rq);
2781}
2782#else /* CONFIG_IRQ_TIME_ACCOUNTING */
2783static void irqtime_account_idle_ticks(int ticks) {}
2784static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
2785 struct rq *rq) {}
2786#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
2787
2788/*
2789 * Account a single tick of cpu time.
2790 * @p: the process that the cpu time gets accounted to
2791 * @user_tick: indicates if the tick is a user or a system tick
2792 */
2793void account_process_tick(struct task_struct *p, int user_tick)
2794{
2795 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
2796 struct rq *rq = this_rq();
2797
2798 if (sched_clock_irqtime) {
2799 irqtime_account_process_tick(p, user_tick, rq);
2800 return;
2801 }
2802
2803 if (steal_account_process_tick())
2804 return;
2805
2806 if (user_tick)
2807 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
2808 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
2809 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
2810 one_jiffy_scaled);
2811 else
2812 account_idle_time(cputime_one_jiffy);
2813}
2814
2815/*
2816 * Account multiple ticks of steal time.
2817 * @p: the process from which the cpu time has been stolen
2818 * @ticks: number of stolen ticks
2819 */
2820void account_steal_ticks(unsigned long ticks)
2821{
2822 account_steal_time(jiffies_to_cputime(ticks));
2823}
2824
2825/*
2826 * Account multiple ticks of idle time.
2827 * @ticks: number of stolen ticks
2828 */
2829void account_idle_ticks(unsigned long ticks)
2830{
2831
2832 if (sched_clock_irqtime) {
2833 irqtime_account_idle_ticks(ticks);
2834 return;
2835 }
2836
2837 account_idle_time(jiffies_to_cputime(ticks));
2838}
2839
2840#endif
2841
2842/*
2843 * Use precise platform statistics if available:
2844 */
2845#ifdef CONFIG_VIRT_CPU_ACCOUNTING
2846void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
2847{
2848 *ut = p->utime;
2849 *st = p->stime;
2850}
2851
2852void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
2853{
2854 struct task_cputime cputime;
2855
2856 thread_group_cputime(p, &cputime);
2857
2858 *ut = cputime.utime;
2859 *st = cputime.stime;
2860}
2861#else
2862
2863#ifndef nsecs_to_cputime
2864# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
2865#endif
2866
2867void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
2868{
2869 cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
2870
2871 /*
2872 * Use CFS's precise accounting:
2873 */
2874 rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
2875
2876 if (total) {
2877 u64 temp = rtime;
2878
2879 temp *= utime;
2880 do_div(temp, total);
2881 utime = (cputime_t)temp;
2882 } else
2883 utime = rtime;
2884
2885 /*
2886 * Compare with previous values, to keep monotonicity:
2887 */
2888 p->prev_utime = max(p->prev_utime, utime);
2889 p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
2890
2891 *ut = p->prev_utime;
2892 *st = p->prev_stime;
2893}
2894
2895/*
2896 * Must be called with siglock held.
2897 */
2898void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
2899{
2900 struct signal_struct *sig = p->signal;
2901 struct task_cputime cputime;
2902 cputime_t rtime, utime, total;
2903
2904 thread_group_cputime(p, &cputime);
2905
2906 total = cputime_add(cputime.utime, cputime.stime);
2907 rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
2908
2909 if (total) {
2910 u64 temp = rtime;
2911
2912 temp *= cputime.utime;
2913 do_div(temp, total);
2914 utime = (cputime_t)temp;
2915 } else
2916 utime = rtime;
2917
2918 sig->prev_utime = max(sig->prev_utime, utime);
2919 sig->prev_stime = max(sig->prev_stime,
2920 cputime_sub(rtime, sig->prev_utime));
2921
2922 *ut = sig->prev_utime;
2923 *st = sig->prev_stime;
2924}
2925#endif
2926
2927/*
2928 * This function gets called by the timer code, with HZ frequency.
2929 * We call it with interrupts disabled.
2930 */
2931void scheduler_tick(void)
2932{
2933 int cpu = smp_processor_id();
2934 struct rq *rq = cpu_rq(cpu);
2935 struct task_struct *curr = rq->curr;
2936
2937 sched_clock_tick();
2938
2939 raw_spin_lock(&rq->lock);
2940 update_rq_clock(rq);
2941 update_cpu_load_active(rq);
2942 curr->sched_class->task_tick(rq, curr, 0);
2943 raw_spin_unlock(&rq->lock);
2944
2945 perf_event_task_tick();
2946
2947#ifdef CONFIG_SMP
2948 rq->idle_balance = idle_cpu(cpu);
2949 trigger_load_balance(rq, cpu);
2950#endif
2951}
2952
2953notrace unsigned long get_parent_ip(unsigned long addr)
2954{
2955 if (in_lock_functions(addr)) {
2956 addr = CALLER_ADDR2;
2957 if (in_lock_functions(addr))
2958 addr = CALLER_ADDR3;
2959 }
2960 return addr;
2961}
2962
2963#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2964 defined(CONFIG_PREEMPT_TRACER))
2965
2966void __kprobes add_preempt_count(int val)
2967{
2968#ifdef CONFIG_DEBUG_PREEMPT
2969 /*
2970 * Underflow?
2971 */
2972 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2973 return;
2974#endif
2975 preempt_count() += val;
2976#ifdef CONFIG_DEBUG_PREEMPT
2977 /*
2978 * Spinlock count overflowing soon?
2979 */
2980 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2981 PREEMPT_MASK - 10);
2982#endif
2983 if (preempt_count() == val)
2984 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2985}
2986EXPORT_SYMBOL(add_preempt_count);
2987
2988void __kprobes sub_preempt_count(int val)
2989{
2990#ifdef CONFIG_DEBUG_PREEMPT
2991 /*
2992 * Underflow?
2993 */
2994 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
2995 return;
2996 /*
2997 * Is the spinlock portion underflowing?
2998 */
2999 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3000 !(preempt_count() & PREEMPT_MASK)))
3001 return;
3002#endif
3003
3004 if (preempt_count() == val)
3005 trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
3006 preempt_count() -= val;
3007}
3008EXPORT_SYMBOL(sub_preempt_count);
3009
3010#endif
3011
3012/*
3013 * Print scheduling while atomic bug:
3014 */
3015static noinline void __schedule_bug(struct task_struct *prev)
3016{
3017 struct pt_regs *regs = get_irq_regs();
3018
3019 printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
3020 prev->comm, prev->pid, preempt_count());
3021
3022 debug_show_held_locks(prev);
3023 print_modules();
3024 if (irqs_disabled())
3025 print_irqtrace_events(prev);
3026
3027 if (regs)
3028 show_regs(regs);
3029 else
3030 dump_stack();
3031}
3032
3033/*
3034 * Various schedule()-time debugging checks and statistics:
3035 */
3036static inline void schedule_debug(struct task_struct *prev)
3037{
3038 /*
3039 * Test if we are atomic. Since do_exit() needs to call into
3040 * schedule() atomically, we ignore that path for now.
3041 * Otherwise, whine if we are scheduling when we should not be.
3042 */
3043 if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
3044 __schedule_bug(prev);
3045 rcu_sleep_check();
3046
3047 profile_hit(SCHED_PROFILING, __builtin_return_address(0));
3048
3049 schedstat_inc(this_rq(), sched_count);
3050}
3051
3052static void put_prev_task(struct rq *rq, struct task_struct *prev)
3053{
3054 if (prev->on_rq || rq->skip_clock_update < 0)
3055 update_rq_clock(rq);
3056 prev->sched_class->put_prev_task(rq, prev);
3057}
3058
3059/*
3060 * Pick up the highest-prio task:
3061 */
3062static inline struct task_struct *
3063pick_next_task(struct rq *rq)
3064{
3065 const struct sched_class *class;
3066 struct task_struct *p;
3067
3068 /*
3069 * Optimization: we know that if all tasks are in
3070 * the fair class we can call that function directly:
3071 */
3072 if (likely(rq->nr_running == rq->cfs.h_nr_running)) {
3073 p = fair_sched_class.pick_next_task(rq);
3074 if (likely(p))
3075 return p;
3076 }
3077
3078 for_each_class(class) {
3079 p = class->pick_next_task(rq);
3080 if (p)
3081 return p;
3082 }
3083
3084 BUG(); /* the idle class will always have a runnable task */
3085}
3086
3087/*
3088 * __schedule() is the main scheduler function.
3089 */
3090static void __sched __schedule(void)
3091{
3092 struct task_struct *prev, *next;
3093 unsigned long *switch_count;
3094 struct rq *rq;
3095 int cpu;
3096
3097need_resched:
3098 preempt_disable();
3099 cpu = smp_processor_id();
3100 rq = cpu_rq(cpu);
3101 rcu_note_context_switch(cpu);
3102 prev = rq->curr;
3103
3104 schedule_debug(prev);
3105
3106 if (sched_feat(HRTICK))
3107 hrtick_clear(rq);
3108
3109 raw_spin_lock_irq(&rq->lock);
3110
3111 switch_count = &prev->nivcsw;
3112 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3113 if (unlikely(signal_pending_state(prev->state, prev))) {
3114 prev->state = TASK_RUNNING;
3115 } else {
3116 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3117 prev->on_rq = 0;
3118
3119 /*
3120 * If a worker went to sleep, notify and ask workqueue
3121 * whether it wants to wake up a task to maintain
3122 * concurrency.
3123 */
3124 if (prev->flags & PF_WQ_WORKER) {
3125 struct task_struct *to_wakeup;
3126
3127 to_wakeup = wq_worker_sleeping(prev, cpu);
3128 if (to_wakeup)
3129 try_to_wake_up_local(to_wakeup);
3130 }
3131 }
3132 switch_count = &prev->nvcsw;
3133 }
3134
3135 pre_schedule(rq, prev);
3136
3137 if (unlikely(!rq->nr_running))
3138 idle_balance(cpu, rq);
3139
3140 put_prev_task(rq, prev);
3141 next = pick_next_task(rq);
3142 clear_tsk_need_resched(prev);
3143 rq->skip_clock_update = 0;
3144
3145 if (likely(prev != next)) {
3146 rq->nr_switches++;
3147 rq->curr = next;
3148 ++*switch_count;
3149
3150 context_switch(rq, prev, next); /* unlocks the rq */
3151 /*
3152 * The context switch have flipped the stack from under us
3153 * and restored the local variables which were saved when
3154 * this task called schedule() in the past. prev == current
3155 * is still correct, but it can be moved to another cpu/rq.
3156 */
3157 cpu = smp_processor_id();
3158 rq = cpu_rq(cpu);
3159 } else
3160 raw_spin_unlock_irq(&rq->lock);
3161
3162 post_schedule(rq);
3163
3164 preempt_enable_no_resched();
3165 if (need_resched())
3166 goto need_resched;
3167}
3168
3169static inline void sched_submit_work(struct task_struct *tsk)
3170{
3171 if (!tsk->state)
3172 return;
3173 /*
3174 * If we are going to sleep and we have plugged IO queued,
3175 * make sure to submit it to avoid deadlocks.
3176 */
3177 if (blk_needs_flush_plug(tsk))
3178 blk_schedule_flush_plug(tsk);
3179}
3180
3181asmlinkage void __sched schedule(void)
3182{
3183 struct task_struct *tsk = current;
3184
3185 sched_submit_work(tsk);
3186 __schedule();
3187}
3188EXPORT_SYMBOL(schedule);
3189
3190#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
3191
3192static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
3193{
3194 if (lock->owner != owner)
3195 return false;
3196
3197 /*
3198 * Ensure we emit the owner->on_cpu, dereference _after_ checking
3199 * lock->owner still matches owner, if that fails, owner might
3200 * point to free()d memory, if it still matches, the rcu_read_lock()
3201 * ensures the memory stays valid.
3202 */
3203 barrier();
3204
3205 return owner->on_cpu;
3206}
3207
3208/*
3209 * Look out! "owner" is an entirely speculative pointer
3210 * access and not reliable.
3211 */
3212int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
3213{
3214 if (!sched_feat(OWNER_SPIN))
3215 return 0;
3216
3217 rcu_read_lock();
3218 while (owner_running(lock, owner)) {
3219 if (need_resched())
3220 break;
3221
3222 arch_mutex_cpu_relax();
3223 }
3224 rcu_read_unlock();
3225
3226 /*
3227 * We break out the loop above on need_resched() and when the
3228 * owner changed, which is a sign for heavy contention. Return
3229 * success only when lock->owner is NULL.
3230 */
3231 return lock->owner == NULL;
3232}
3233#endif
3234
3235#ifdef CONFIG_PREEMPT
3236/*
3237 * this is the entry point to schedule() from in-kernel preemption
3238 * off of preempt_enable. Kernel preemptions off return from interrupt
3239 * occur there and call schedule directly.
3240 */
3241asmlinkage void __sched notrace preempt_schedule(void)
3242{
3243 struct thread_info *ti = current_thread_info();
3244
3245 /*
3246 * If there is a non-zero preempt_count or interrupts are disabled,
3247 * we do not want to preempt the current task. Just return..
3248 */
3249 if (likely(ti->preempt_count || irqs_disabled()))
3250 return;
3251
3252 do {
3253 add_preempt_count_notrace(PREEMPT_ACTIVE);
3254 __schedule();
3255 sub_preempt_count_notrace(PREEMPT_ACTIVE);
3256
3257 /*
3258 * Check again in case we missed a preemption opportunity
3259 * between schedule and now.
3260 */
3261 barrier();
3262 } while (need_resched());
3263}
3264EXPORT_SYMBOL(preempt_schedule);
3265
3266/*
3267 * this is the entry point to schedule() from kernel preemption
3268 * off of irq context.
3269 * Note, that this is called and return with irqs disabled. This will
3270 * protect us against recursive calling from irq.
3271 */
3272asmlinkage void __sched preempt_schedule_irq(void)
3273{
3274 struct thread_info *ti = current_thread_info();
3275
3276 /* Catch callers which need to be fixed */
3277 BUG_ON(ti->preempt_count || !irqs_disabled());
3278
3279 do {
3280 add_preempt_count(PREEMPT_ACTIVE);
3281 local_irq_enable();
3282 __schedule();
3283 local_irq_disable();
3284 sub_preempt_count(PREEMPT_ACTIVE);
3285
3286 /*
3287 * Check again in case we missed a preemption opportunity
3288 * between schedule and now.
3289 */
3290 barrier();
3291 } while (need_resched());
3292}
3293
3294#endif /* CONFIG_PREEMPT */
3295
3296int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
3297 void *key)
3298{
3299 return try_to_wake_up(curr->private, mode, wake_flags);
3300}
3301EXPORT_SYMBOL(default_wake_function);
3302
3303/*
3304 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
3305 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
3306 * number) then we wake all the non-exclusive tasks and one exclusive task.
3307 *
3308 * There are circumstances in which we can try to wake a task which has already
3309 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
3310 * zero in this (rare) case, and we handle it by continuing to scan the queue.
3311 */
3312static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
3313 int nr_exclusive, int wake_flags, void *key)
3314{
3315 wait_queue_t *curr, *next;
3316
3317 list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
3318 unsigned flags = curr->flags;
3319
3320 if (curr->func(curr, mode, wake_flags, key) &&
3321 (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
3322 break;
3323 }
3324}
3325
3326/**
3327 * __wake_up - wake up threads blocked on a waitqueue.
3328 * @q: the waitqueue
3329 * @mode: which threads
3330 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3331 * @key: is directly passed to the wakeup function
3332 *
3333 * It may be assumed that this function implies a write memory barrier before
3334 * changing the task state if and only if any tasks are woken up.
3335 */
3336void __wake_up(wait_queue_head_t *q, unsigned int mode,
3337 int nr_exclusive, void *key)
3338{
3339 unsigned long flags;
3340
3341 spin_lock_irqsave(&q->lock, flags);
3342 __wake_up_common(q, mode, nr_exclusive, 0, key);
3343 spin_unlock_irqrestore(&q->lock, flags);
3344}
3345EXPORT_SYMBOL(__wake_up);
3346
3347/*
3348 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
3349 */
3350void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
3351{
3352 __wake_up_common(q, mode, 1, 0, NULL);
3353}
3354EXPORT_SYMBOL_GPL(__wake_up_locked);
3355
3356void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
3357{
3358 __wake_up_common(q, mode, 1, 0, key);
3359}
3360EXPORT_SYMBOL_GPL(__wake_up_locked_key);
3361
3362/**
3363 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
3364 * @q: the waitqueue
3365 * @mode: which threads
3366 * @nr_exclusive: how many wake-one or wake-many threads to wake up
3367 * @key: opaque value to be passed to wakeup targets
3368 *
3369 * The sync wakeup differs that the waker knows that it will schedule
3370 * away soon, so while the target thread will be woken up, it will not
3371 * be migrated to another CPU - ie. the two threads are 'synchronized'
3372 * with each other. This can prevent needless bouncing between CPUs.
3373 *
3374 * On UP it can prevent extra preemption.
3375 *
3376 * It may be assumed that this function implies a write memory barrier before
3377 * changing the task state if and only if any tasks are woken up.
3378 */
3379void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
3380 int nr_exclusive, void *key)
3381{
3382 unsigned long flags;
3383 int wake_flags = WF_SYNC;
3384
3385 if (unlikely(!q))
3386 return;
3387
3388 if (unlikely(!nr_exclusive))
3389 wake_flags = 0;
3390
3391 spin_lock_irqsave(&q->lock, flags);
3392 __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
3393 spin_unlock_irqrestore(&q->lock, flags);
3394}
3395EXPORT_SYMBOL_GPL(__wake_up_sync_key);
3396
3397/*
3398 * __wake_up_sync - see __wake_up_sync_key()
3399 */
3400void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
3401{
3402 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
3403}
3404EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
3405
3406/**
3407 * complete: - signals a single thread waiting on this completion
3408 * @x: holds the state of this particular completion
3409 *
3410 * This will wake up a single thread waiting on this completion. Threads will be
3411 * awakened in the same order in which they were queued.
3412 *
3413 * See also complete_all(), wait_for_completion() and related routines.
3414 *
3415 * It may be assumed that this function implies a write memory barrier before
3416 * changing the task state if and only if any tasks are woken up.
3417 */
3418void complete(struct completion *x)
3419{
3420 unsigned long flags;
3421
3422 spin_lock_irqsave(&x->wait.lock, flags);
3423 x->done++;
3424 __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
3425 spin_unlock_irqrestore(&x->wait.lock, flags);
3426}
3427EXPORT_SYMBOL(complete);
3428
3429/**
3430 * complete_all: - signals all threads waiting on this completion
3431 * @x: holds the state of this particular completion
3432 *
3433 * This will wake up all threads waiting on this particular completion event.
3434 *
3435 * It may be assumed that this function implies a write memory barrier before
3436 * changing the task state if and only if any tasks are woken up.
3437 */
3438void complete_all(struct completion *x)
3439{
3440 unsigned long flags;
3441
3442 spin_lock_irqsave(&x->wait.lock, flags);
3443 x->done += UINT_MAX/2;
3444 __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
3445 spin_unlock_irqrestore(&x->wait.lock, flags);
3446}
3447EXPORT_SYMBOL(complete_all);
3448
3449static inline long __sched
3450do_wait_for_common(struct completion *x, long timeout, int state)
3451{
3452 if (!x->done) {
3453 DECLARE_WAITQUEUE(wait, current);
3454
3455 __add_wait_queue_tail_exclusive(&x->wait, &wait);
3456 do {
3457 if (signal_pending_state(state, current)) {
3458 timeout = -ERESTARTSYS;
3459 break;
3460 }
3461 __set_current_state(state);
3462 spin_unlock_irq(&x->wait.lock);
3463 timeout = schedule_timeout(timeout);
3464 spin_lock_irq(&x->wait.lock);
3465 } while (!x->done && timeout);
3466 __remove_wait_queue(&x->wait, &wait);
3467 if (!x->done)
3468 return timeout;
3469 }
3470 x->done--;
3471 return timeout ?: 1;
3472}
3473
3474static long __sched
3475wait_for_common(struct completion *x, long timeout, int state)
3476{
3477 might_sleep();
3478
3479 spin_lock_irq(&x->wait.lock);
3480 timeout = do_wait_for_common(x, timeout, state);
3481 spin_unlock_irq(&x->wait.lock);
3482 return timeout;
3483}
3484
3485/**
3486 * wait_for_completion: - waits for completion of a task
3487 * @x: holds the state of this particular completion
3488 *
3489 * This waits to be signaled for completion of a specific task. It is NOT
3490 * interruptible and there is no timeout.
3491 *
3492 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
3493 * and interrupt capability. Also see complete().
3494 */
3495void __sched wait_for_completion(struct completion *x)
3496{
3497 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
3498}
3499EXPORT_SYMBOL(wait_for_completion);
3500
3501/**
3502 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
3503 * @x: holds the state of this particular completion
3504 * @timeout: timeout value in jiffies
3505 *
3506 * This waits for either a completion of a specific task to be signaled or for a
3507 * specified timeout to expire. The timeout is in jiffies. It is not
3508 * interruptible.
3509 *
3510 * The return value is 0 if timed out, and positive (at least 1, or number of
3511 * jiffies left till timeout) if completed.
3512 */
3513unsigned long __sched
3514wait_for_completion_timeout(struct completion *x, unsigned long timeout)
3515{
3516 return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
3517}
3518EXPORT_SYMBOL(wait_for_completion_timeout);
3519
3520/**
3521 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
3522 * @x: holds the state of this particular completion
3523 *
3524 * This waits for completion of a specific task to be signaled. It is
3525 * interruptible.
3526 *
3527 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
3528 */
3529int __sched wait_for_completion_interruptible(struct completion *x)
3530{
3531 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3532 if (t == -ERESTARTSYS)
3533 return t;
3534 return 0;
3535}
3536EXPORT_SYMBOL(wait_for_completion_interruptible);
3537
3538/**
3539 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
3540 * @x: holds the state of this particular completion
3541 * @timeout: timeout value in jiffies
3542 *
3543 * This waits for either a completion of a specific task to be signaled or for a
3544 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
3545 *
3546 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3547 * positive (at least 1, or number of jiffies left till timeout) if completed.
3548 */
3549long __sched
3550wait_for_completion_interruptible_timeout(struct completion *x,
3551 unsigned long timeout)
3552{
3553 return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
3554}
3555EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
3556
3557/**
3558 * wait_for_completion_killable: - waits for completion of a task (killable)
3559 * @x: holds the state of this particular completion
3560 *
3561 * This waits to be signaled for completion of a specific task. It can be
3562 * interrupted by a kill signal.
3563 *
3564 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
3565 */
3566int __sched wait_for_completion_killable(struct completion *x)
3567{
3568 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
3569 if (t == -ERESTARTSYS)
3570 return t;
3571 return 0;
3572}
3573EXPORT_SYMBOL(wait_for_completion_killable);
3574
3575/**
3576 * wait_for_completion_killable_timeout: - waits for completion of a task (w/(to,killable))
3577 * @x: holds the state of this particular completion
3578 * @timeout: timeout value in jiffies
3579 *
3580 * This waits for either a completion of a specific task to be
3581 * signaled or for a specified timeout to expire. It can be
3582 * interrupted by a kill signal. The timeout is in jiffies.
3583 *
3584 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
3585 * positive (at least 1, or number of jiffies left till timeout) if completed.
3586 */
3587long __sched
3588wait_for_completion_killable_timeout(struct completion *x,
3589 unsigned long timeout)
3590{
3591 return wait_for_common(x, timeout, TASK_KILLABLE);
3592}
3593EXPORT_SYMBOL(wait_for_completion_killable_timeout);
3594
3595/**
3596 * try_wait_for_completion - try to decrement a completion without blocking
3597 * @x: completion structure
3598 *
3599 * Returns: 0 if a decrement cannot be done without blocking
3600 * 1 if a decrement succeeded.
3601 *
3602 * If a completion is being used as a counting completion,
3603 * attempt to decrement the counter without blocking. This
3604 * enables us to avoid waiting if the resource the completion
3605 * is protecting is not available.
3606 */
3607bool try_wait_for_completion(struct completion *x)
3608{
3609 unsigned long flags;
3610 int ret = 1;
3611
3612 spin_lock_irqsave(&x->wait.lock, flags);
3613 if (!x->done)
3614 ret = 0;
3615 else
3616 x->done--;
3617 spin_unlock_irqrestore(&x->wait.lock, flags);
3618 return ret;
3619}
3620EXPORT_SYMBOL(try_wait_for_completion);
3621
3622/**
3623 * completion_done - Test to see if a completion has any waiters
3624 * @x: completion structure
3625 *
3626 * Returns: 0 if there are waiters (wait_for_completion() in progress)
3627 * 1 if there are no waiters.
3628 *
3629 */
3630bool completion_done(struct completion *x)
3631{
3632 unsigned long flags;
3633 int ret = 1;
3634
3635 spin_lock_irqsave(&x->wait.lock, flags);
3636 if (!x->done)
3637 ret = 0;
3638 spin_unlock_irqrestore(&x->wait.lock, flags);
3639 return ret;
3640}
3641EXPORT_SYMBOL(completion_done);
3642
3643static long __sched
3644sleep_on_common(wait_queue_head_t *q, int state, long timeout)
3645{
3646 unsigned long flags;
3647 wait_queue_t wait;
3648
3649 init_waitqueue_entry(&wait, current);
3650
3651 __set_current_state(state);
3652
3653 spin_lock_irqsave(&q->lock, flags);
3654 __add_wait_queue(q, &wait);
3655 spin_unlock(&q->lock);
3656 timeout = schedule_timeout(timeout);
3657 spin_lock_irq(&q->lock);
3658 __remove_wait_queue(q, &wait);
3659 spin_unlock_irqrestore(&q->lock, flags);
3660
3661 return timeout;
3662}
3663
3664void __sched interruptible_sleep_on(wait_queue_head_t *q)
3665{
3666 sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3667}
3668EXPORT_SYMBOL(interruptible_sleep_on);
3669
3670long __sched
3671interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3672{
3673 return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
3674}
3675EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3676
3677void __sched sleep_on(wait_queue_head_t *q)
3678{
3679 sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3680}
3681EXPORT_SYMBOL(sleep_on);
3682
3683long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3684{
3685 return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
3686}
3687EXPORT_SYMBOL(sleep_on_timeout);
3688
3689#ifdef CONFIG_RT_MUTEXES
3690
3691/*
3692 * rt_mutex_setprio - set the current priority of a task
3693 * @p: task
3694 * @prio: prio value (kernel-internal form)
3695 *
3696 * This function changes the 'effective' priority of a task. It does
3697 * not touch ->normal_prio like __setscheduler().
3698 *
3699 * Used by the rt_mutex code to implement priority inheritance logic.
3700 */
3701void rt_mutex_setprio(struct task_struct *p, int prio)
3702{
3703 int oldprio, on_rq, running;
3704 struct rq *rq;
3705 const struct sched_class *prev_class;
3706
3707 BUG_ON(prio < 0 || prio > MAX_PRIO);
3708
3709 rq = __task_rq_lock(p);
3710
3711 trace_sched_pi_setprio(p, prio);
3712 oldprio = p->prio;
3713 prev_class = p->sched_class;
3714 on_rq = p->on_rq;
3715 running = task_current(rq, p);
3716 if (on_rq)
3717 dequeue_task(rq, p, 0);
3718 if (running)
3719 p->sched_class->put_prev_task(rq, p);
3720
3721 if (rt_prio(prio))
3722 p->sched_class = &rt_sched_class;
3723 else
3724 p->sched_class = &fair_sched_class;
3725
3726 p->prio = prio;
3727
3728 if (running)
3729 p->sched_class->set_curr_task(rq);
3730 if (on_rq)
3731 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
3732
3733 check_class_changed(rq, p, prev_class, oldprio);
3734 __task_rq_unlock(rq);
3735}
3736
3737#endif
3738
3739void set_user_nice(struct task_struct *p, long nice)
3740{
3741 int old_prio, delta, on_rq;
3742 unsigned long flags;
3743 struct rq *rq;
3744
3745 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3746 return;
3747 /*
3748 * We have to be careful, if called from sys_setpriority(),
3749 * the task might be in the middle of scheduling on another CPU.
3750 */
3751 rq = task_rq_lock(p, &flags);
3752 /*
3753 * The RT priorities are set via sched_setscheduler(), but we still
3754 * allow the 'normal' nice value to be set - but as expected
3755 * it wont have any effect on scheduling until the task is
3756 * SCHED_FIFO/SCHED_RR:
3757 */
3758 if (task_has_rt_policy(p)) {
3759 p->static_prio = NICE_TO_PRIO(nice);
3760 goto out_unlock;
3761 }
3762 on_rq = p->on_rq;
3763 if (on_rq)
3764 dequeue_task(rq, p, 0);
3765
3766 p->static_prio = NICE_TO_PRIO(nice);
3767 set_load_weight(p);
3768 old_prio = p->prio;
3769 p->prio = effective_prio(p);
3770 delta = p->prio - old_prio;
3771
3772 if (on_rq) {
3773 enqueue_task(rq, p, 0);
3774 /*
3775 * If the task increased its priority or is running and
3776 * lowered its priority, then reschedule its CPU:
3777 */
3778 if (delta < 0 || (delta > 0 && task_running(rq, p)))
3779 resched_task(rq->curr);
3780 }
3781out_unlock:
3782 task_rq_unlock(rq, p, &flags);
3783}
3784EXPORT_SYMBOL(set_user_nice);
3785
3786/*
3787 * can_nice - check if a task can reduce its nice value
3788 * @p: task
3789 * @nice: nice value
3790 */
3791int can_nice(const struct task_struct *p, const int nice)
3792{
3793 /* convert nice value [19,-20] to rlimit style value [1,40] */
3794 int nice_rlim = 20 - nice;
3795
3796 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
3797 capable(CAP_SYS_NICE));
3798}
3799
3800#ifdef __ARCH_WANT_SYS_NICE
3801
3802/*
3803 * sys_nice - change the priority of the current process.
3804 * @increment: priority increment
3805 *
3806 * sys_setpriority is a more generic, but much slower function that
3807 * does similar things.
3808 */
3809SYSCALL_DEFINE1(nice, int, increment)
3810{
3811 long nice, retval;
3812
3813 /*
3814 * Setpriority might change our priority at the same moment.
3815 * We don't have to worry. Conceptually one call occurs first
3816 * and we have a single winner.
3817 */
3818 if (increment < -40)
3819 increment = -40;
3820 if (increment > 40)
3821 increment = 40;
3822
3823 nice = TASK_NICE(current) + increment;
3824 if (nice < -20)
3825 nice = -20;
3826 if (nice > 19)
3827 nice = 19;
3828
3829 if (increment < 0 && !can_nice(current, nice))
3830 return -EPERM;
3831
3832 retval = security_task_setnice(current, nice);
3833 if (retval)
3834 return retval;
3835
3836 set_user_nice(current, nice);
3837 return 0;
3838}
3839
3840#endif
3841
3842/**
3843 * task_prio - return the priority value of a given task.
3844 * @p: the task in question.
3845 *
3846 * This is the priority value as seen by users in /proc.
3847 * RT tasks are offset by -200. Normal tasks are centered
3848 * around 0, value goes from -16 to +15.
3849 */
3850int task_prio(const struct task_struct *p)
3851{
3852 return p->prio - MAX_RT_PRIO;
3853}
3854
3855/**
3856 * task_nice - return the nice value of a given task.
3857 * @p: the task in question.
3858 */
3859int task_nice(const struct task_struct *p)
3860{
3861 return TASK_NICE(p);
3862}
3863EXPORT_SYMBOL(task_nice);
3864
3865/**
3866 * idle_cpu - is a given cpu idle currently?
3867 * @cpu: the processor in question.
3868 */
3869int idle_cpu(int cpu)
3870{
3871 struct rq *rq = cpu_rq(cpu);
3872
3873 if (rq->curr != rq->idle)
3874 return 0;
3875
3876 if (rq->nr_running)
3877 return 0;
3878
3879#ifdef CONFIG_SMP
3880 if (!llist_empty(&rq->wake_list))
3881 return 0;
3882#endif
3883
3884 return 1;
3885}
3886
3887/**
3888 * idle_task - return the idle task for a given cpu.
3889 * @cpu: the processor in question.
3890 */
3891struct task_struct *idle_task(int cpu)
3892{
3893 return cpu_rq(cpu)->idle;
3894}
3895
3896/**
3897 * find_process_by_pid - find a process with a matching PID value.
3898 * @pid: the pid in question.
3899 */
3900static struct task_struct *find_process_by_pid(pid_t pid)
3901{
3902 return pid ? find_task_by_vpid(pid) : current;
3903}
3904
3905/* Actually do priority change: must hold rq lock. */
3906static void
3907__setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
3908{
3909 p->policy = policy;
3910 p->rt_priority = prio;
3911 p->normal_prio = normal_prio(p);
3912 /* we are holding p->pi_lock already */
3913 p->prio = rt_mutex_getprio(p);
3914 if (rt_prio(p->prio))
3915 p->sched_class = &rt_sched_class;
3916 else
3917 p->sched_class = &fair_sched_class;
3918 set_load_weight(p);
3919}
3920
3921/*
3922 * check the target process has a UID that matches the current process's
3923 */
3924static bool check_same_owner(struct task_struct *p)
3925{
3926 const struct cred *cred = current_cred(), *pcred;
3927 bool match;
3928
3929 rcu_read_lock();
3930 pcred = __task_cred(p);
3931 if (cred->user->user_ns == pcred->user->user_ns)
3932 match = (cred->euid == pcred->euid ||
3933 cred->euid == pcred->uid);
3934 else
3935 match = false;
3936 rcu_read_unlock();
3937 return match;
3938}
3939
3940static int __sched_setscheduler(struct task_struct *p, int policy,
3941 const struct sched_param *param, bool user)
3942{
3943 int retval, oldprio, oldpolicy = -1, on_rq, running;
3944 unsigned long flags;
3945 const struct sched_class *prev_class;
3946 struct rq *rq;
3947 int reset_on_fork;
3948
3949 /* may grab non-irq protected spin_locks */
3950 BUG_ON(in_interrupt());
3951recheck:
3952 /* double check policy once rq lock held */
3953 if (policy < 0) {
3954 reset_on_fork = p->sched_reset_on_fork;
3955 policy = oldpolicy = p->policy;
3956 } else {
3957 reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
3958 policy &= ~SCHED_RESET_ON_FORK;
3959
3960 if (policy != SCHED_FIFO && policy != SCHED_RR &&
3961 policy != SCHED_NORMAL && policy != SCHED_BATCH &&
3962 policy != SCHED_IDLE)
3963 return -EINVAL;
3964 }
3965
3966 /*
3967 * Valid priorities for SCHED_FIFO and SCHED_RR are
3968 * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
3969 * SCHED_BATCH and SCHED_IDLE is 0.
3970 */
3971 if (param->sched_priority < 0 ||
3972 (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
3973 (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
3974 return -EINVAL;
3975 if (rt_policy(policy) != (param->sched_priority != 0))
3976 return -EINVAL;
3977
3978 /*
3979 * Allow unprivileged RT tasks to decrease priority:
3980 */
3981 if (user && !capable(CAP_SYS_NICE)) {
3982 if (rt_policy(policy)) {
3983 unsigned long rlim_rtprio =
3984 task_rlimit(p, RLIMIT_RTPRIO);
3985
3986 /* can't set/change the rt policy */
3987 if (policy != p->policy && !rlim_rtprio)
3988 return -EPERM;
3989
3990 /* can't increase priority */
3991 if (param->sched_priority > p->rt_priority &&
3992 param->sched_priority > rlim_rtprio)
3993 return -EPERM;
3994 }
3995
3996 /*
3997 * Treat SCHED_IDLE as nice 20. Only allow a switch to
3998 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
3999 */
4000 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4001 if (!can_nice(p, TASK_NICE(p)))
4002 return -EPERM;
4003 }
4004
4005 /* can't change other user's priorities */
4006 if (!check_same_owner(p))
4007 return -EPERM;
4008
4009 /* Normal users shall not reset the sched_reset_on_fork flag */
4010 if (p->sched_reset_on_fork && !reset_on_fork)
4011 return -EPERM;
4012 }
4013
4014 if (user) {
4015 retval = security_task_setscheduler(p);
4016 if (retval)
4017 return retval;
4018 }
4019
4020 /*
4021 * make sure no PI-waiters arrive (or leave) while we are
4022 * changing the priority of the task:
4023 *
4024 * To be able to change p->policy safely, the appropriate
4025 * runqueue lock must be held.
4026 */
4027 rq = task_rq_lock(p, &flags);
4028
4029 /*
4030 * Changing the policy of the stop threads its a very bad idea
4031 */
4032 if (p == rq->stop) {
4033 task_rq_unlock(rq, p, &flags);
4034 return -EINVAL;
4035 }
4036
4037 /*
4038 * If not changing anything there's no need to proceed further:
4039 */
4040 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
4041 param->sched_priority == p->rt_priority))) {
4042
4043 __task_rq_unlock(rq);
4044 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4045 return 0;
4046 }
4047
4048#ifdef CONFIG_RT_GROUP_SCHED
4049 if (user) {
4050 /*
4051 * Do not allow realtime tasks into groups that have no runtime
4052 * assigned.
4053 */
4054 if (rt_bandwidth_enabled() && rt_policy(policy) &&
4055 task_group(p)->rt_bandwidth.rt_runtime == 0 &&
4056 !task_group_is_autogroup(task_group(p))) {
4057 task_rq_unlock(rq, p, &flags);
4058 return -EPERM;
4059 }
4060 }
4061#endif
4062
4063 /* recheck policy now with rq lock held */
4064 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
4065 policy = oldpolicy = -1;
4066 task_rq_unlock(rq, p, &flags);
4067 goto recheck;
4068 }
4069 on_rq = p->on_rq;
4070 running = task_current(rq, p);
4071 if (on_rq)
4072 deactivate_task(rq, p, 0);
4073 if (running)
4074 p->sched_class->put_prev_task(rq, p);
4075
4076 p->sched_reset_on_fork = reset_on_fork;
4077
4078 oldprio = p->prio;
4079 prev_class = p->sched_class;
4080 __setscheduler(rq, p, policy, param->sched_priority);
4081
4082 if (running)
4083 p->sched_class->set_curr_task(rq);
4084 if (on_rq)
4085 activate_task(rq, p, 0);
4086
4087 check_class_changed(rq, p, prev_class, oldprio);
4088 task_rq_unlock(rq, p, &flags);
4089
4090 rt_mutex_adjust_pi(p);
4091
4092 return 0;
4093}
4094
4095/**
4096 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4097 * @p: the task in question.
4098 * @policy: new policy.
4099 * @param: structure containing the new RT priority.
4100 *
4101 * NOTE that the task may be already dead.
4102 */
4103int sched_setscheduler(struct task_struct *p, int policy,
4104 const struct sched_param *param)
4105{
4106 return __sched_setscheduler(p, policy, param, true);
4107}
4108EXPORT_SYMBOL_GPL(sched_setscheduler);
4109
4110/**
4111 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
4112 * @p: the task in question.
4113 * @policy: new policy.
4114 * @param: structure containing the new RT priority.
4115 *
4116 * Just like sched_setscheduler, only don't bother checking if the
4117 * current context has permission. For example, this is needed in
4118 * stop_machine(): we create temporary high priority worker threads,
4119 * but our caller might not have that capability.
4120 */
4121int sched_setscheduler_nocheck(struct task_struct *p, int policy,
4122 const struct sched_param *param)
4123{
4124 return __sched_setscheduler(p, policy, param, false);
4125}
4126
4127static int
4128do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
4129{
4130 struct sched_param lparam;
4131 struct task_struct *p;
4132 int retval;
4133
4134 if (!param || pid < 0)
4135 return -EINVAL;
4136 if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
4137 return -EFAULT;
4138
4139 rcu_read_lock();
4140 retval = -ESRCH;
4141 p = find_process_by_pid(pid);
4142 if (p != NULL)
4143 retval = sched_setscheduler(p, policy, &lparam);
4144 rcu_read_unlock();
4145
4146 return retval;
4147}
4148
4149/**
4150 * sys_sched_setscheduler - set/change the scheduler policy and RT priority
4151 * @pid: the pid in question.
4152 * @policy: new policy.
4153 * @param: structure containing the new RT priority.
4154 */
4155SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
4156 struct sched_param __user *, param)
4157{
4158 /* negative values for policy are not valid */
4159 if (policy < 0)
4160 return -EINVAL;
4161
4162 return do_sched_setscheduler(pid, policy, param);
4163}
4164
4165/**
4166 * sys_sched_setparam - set/change the RT priority of a thread
4167 * @pid: the pid in question.
4168 * @param: structure containing the new RT priority.
4169 */
4170SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
4171{
4172 return do_sched_setscheduler(pid, -1, param);
4173}
4174
4175/**
4176 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
4177 * @pid: the pid in question.
4178 */
4179SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
4180{
4181 struct task_struct *p;
4182 int retval;
4183
4184 if (pid < 0)
4185 return -EINVAL;
4186
4187 retval = -ESRCH;
4188 rcu_read_lock();
4189 p = find_process_by_pid(pid);
4190 if (p) {
4191 retval = security_task_getscheduler(p);
4192 if (!retval)
4193 retval = p->policy
4194 | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
4195 }
4196 rcu_read_unlock();
4197 return retval;
4198}
4199
4200/**
4201 * sys_sched_getparam - get the RT priority of a thread
4202 * @pid: the pid in question.
4203 * @param: structure containing the RT priority.
4204 */
4205SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
4206{
4207 struct sched_param lp;
4208 struct task_struct *p;
4209 int retval;
4210
4211 if (!param || pid < 0)
4212 return -EINVAL;
4213
4214 rcu_read_lock();
4215 p = find_process_by_pid(pid);
4216 retval = -ESRCH;
4217 if (!p)
4218 goto out_unlock;
4219
4220 retval = security_task_getscheduler(p);
4221 if (retval)
4222 goto out_unlock;
4223
4224 lp.sched_priority = p->rt_priority;
4225 rcu_read_unlock();
4226
4227 /*
4228 * This one might sleep, we cannot do it with a spinlock held ...
4229 */
4230 retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
4231
4232 return retval;
4233
4234out_unlock:
4235 rcu_read_unlock();
4236 return retval;
4237}
4238
4239long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
4240{
4241 cpumask_var_t cpus_allowed, new_mask;
4242 struct task_struct *p;
4243 int retval;
4244
4245 get_online_cpus();
4246 rcu_read_lock();
4247
4248 p = find_process_by_pid(pid);
4249 if (!p) {
4250 rcu_read_unlock();
4251 put_online_cpus();
4252 return -ESRCH;
4253 }
4254
4255 /* Prevent p going away */
4256 get_task_struct(p);
4257 rcu_read_unlock();
4258
4259 if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
4260 retval = -ENOMEM;
4261 goto out_put_task;
4262 }
4263 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
4264 retval = -ENOMEM;
4265 goto out_free_cpus_allowed;
4266 }
4267 retval = -EPERM;
4268 if (!check_same_owner(p) && !task_ns_capable(p, CAP_SYS_NICE))
4269 goto out_unlock;
4270
4271 retval = security_task_setscheduler(p);
4272 if (retval)
4273 goto out_unlock;
4274
4275 cpuset_cpus_allowed(p, cpus_allowed);
4276 cpumask_and(new_mask, in_mask, cpus_allowed);
4277again:
4278 retval = set_cpus_allowed_ptr(p, new_mask);
4279
4280 if (!retval) {
4281 cpuset_cpus_allowed(p, cpus_allowed);
4282 if (!cpumask_subset(new_mask, cpus_allowed)) {
4283 /*
4284 * We must have raced with a concurrent cpuset
4285 * update. Just reset the cpus_allowed to the
4286 * cpuset's cpus_allowed
4287 */
4288 cpumask_copy(new_mask, cpus_allowed);
4289 goto again;
4290 }
4291 }
4292out_unlock:
4293 free_cpumask_var(new_mask);
4294out_free_cpus_allowed:
4295 free_cpumask_var(cpus_allowed);
4296out_put_task:
4297 put_task_struct(p);
4298 put_online_cpus();
4299 return retval;
4300}
4301
4302static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
4303 struct cpumask *new_mask)
4304{
4305 if (len < cpumask_size())
4306 cpumask_clear(new_mask);
4307 else if (len > cpumask_size())
4308 len = cpumask_size();
4309
4310 return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
4311}
4312
4313/**
4314 * sys_sched_setaffinity - set the cpu affinity of a process
4315 * @pid: pid of the process
4316 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4317 * @user_mask_ptr: user-space pointer to the new cpu mask
4318 */
4319SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
4320 unsigned long __user *, user_mask_ptr)
4321{
4322 cpumask_var_t new_mask;
4323 int retval;
4324
4325 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
4326 return -ENOMEM;
4327
4328 retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
4329 if (retval == 0)
4330 retval = sched_setaffinity(pid, new_mask);
4331 free_cpumask_var(new_mask);
4332 return retval;
4333}
4334
4335long sched_getaffinity(pid_t pid, struct cpumask *mask)
4336{
4337 struct task_struct *p;
4338 unsigned long flags;
4339 int retval;
4340
4341 get_online_cpus();
4342 rcu_read_lock();
4343
4344 retval = -ESRCH;
4345 p = find_process_by_pid(pid);
4346 if (!p)
4347 goto out_unlock;
4348
4349 retval = security_task_getscheduler(p);
4350 if (retval)
4351 goto out_unlock;
4352
4353 raw_spin_lock_irqsave(&p->pi_lock, flags);
4354 cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
4355 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4356
4357out_unlock:
4358 rcu_read_unlock();
4359 put_online_cpus();
4360
4361 return retval;
4362}
4363
4364/**
4365 * sys_sched_getaffinity - get the cpu affinity of a process
4366 * @pid: pid of the process
4367 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
4368 * @user_mask_ptr: user-space pointer to hold the current cpu mask
4369 */
4370SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4371 unsigned long __user *, user_mask_ptr)
4372{
4373 int ret;
4374 cpumask_var_t mask;
4375
4376 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4377 return -EINVAL;
4378 if (len & (sizeof(unsigned long)-1))
4379 return -EINVAL;
4380
4381 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
4382 return -ENOMEM;
4383
4384 ret = sched_getaffinity(pid, mask);
4385 if (ret == 0) {
4386 size_t retlen = min_t(size_t, len, cpumask_size());
4387
4388 if (copy_to_user(user_mask_ptr, mask, retlen))
4389 ret = -EFAULT;
4390 else
4391 ret = retlen;
4392 }
4393 free_cpumask_var(mask);
4394
4395 return ret;
4396}
4397
4398/**
4399 * sys_sched_yield - yield the current processor to other threads.
4400 *
4401 * This function yields the current CPU to other tasks. If there are no
4402 * other threads running on this CPU then this function will return.
4403 */
4404SYSCALL_DEFINE0(sched_yield)
4405{
4406 struct rq *rq = this_rq_lock();
4407
4408 schedstat_inc(rq, yld_count);
4409 current->sched_class->yield_task(rq);
4410
4411 /*
4412 * Since we are going to call schedule() anyway, there's
4413 * no need to preempt or enable interrupts:
4414 */
4415 __release(rq->lock);
4416 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
4417 do_raw_spin_unlock(&rq->lock);
4418 preempt_enable_no_resched();
4419
4420 schedule();
4421
4422 return 0;
4423}
4424
4425static inline int should_resched(void)
4426{
4427 return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
4428}
4429
4430static void __cond_resched(void)
4431{
4432 add_preempt_count(PREEMPT_ACTIVE);
4433 __schedule();
4434 sub_preempt_count(PREEMPT_ACTIVE);
4435}
4436
4437int __sched _cond_resched(void)
4438{
4439 if (should_resched()) {
4440 __cond_resched();
4441 return 1;
4442 }
4443 return 0;
4444}
4445EXPORT_SYMBOL(_cond_resched);
4446
4447/*
4448 * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
4449 * call schedule, and on return reacquire the lock.
4450 *
4451 * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
4452 * operations here to prevent schedule() from being called twice (once via
4453 * spin_unlock(), once by hand).
4454 */
4455int __cond_resched_lock(spinlock_t *lock)
4456{
4457 int resched = should_resched();
4458 int ret = 0;
4459
4460 lockdep_assert_held(lock);
4461
4462 if (spin_needbreak(lock) || resched) {
4463 spin_unlock(lock);
4464 if (resched)
4465 __cond_resched();
4466 else
4467 cpu_relax();
4468 ret = 1;
4469 spin_lock(lock);
4470 }
4471 return ret;
4472}
4473EXPORT_SYMBOL(__cond_resched_lock);
4474
4475int __sched __cond_resched_softirq(void)
4476{
4477 BUG_ON(!in_softirq());
4478
4479 if (should_resched()) {
4480 local_bh_enable();
4481 __cond_resched();
4482 local_bh_disable();
4483 return 1;
4484 }
4485 return 0;
4486}
4487EXPORT_SYMBOL(__cond_resched_softirq);
4488
4489/**
4490 * yield - yield the current processor to other threads.
4491 *
4492 * This is a shortcut for kernel-space yielding - it marks the
4493 * thread runnable and calls sys_sched_yield().
4494 */
4495void __sched yield(void)
4496{
4497 set_current_state(TASK_RUNNING);
4498 sys_sched_yield();
4499}
4500EXPORT_SYMBOL(yield);
4501
4502/**
4503 * yield_to - yield the current processor to another thread in
4504 * your thread group, or accelerate that thread toward the
4505 * processor it's on.
4506 * @p: target task
4507 * @preempt: whether task preemption is allowed or not
4508 *
4509 * It's the caller's job to ensure that the target task struct
4510 * can't go away on us before we can do any checks.
4511 *
4512 * Returns true if we indeed boosted the target task.
4513 */
4514bool __sched yield_to(struct task_struct *p, bool preempt)
4515{
4516 struct task_struct *curr = current;
4517 struct rq *rq, *p_rq;
4518 unsigned long flags;
4519 bool yielded = 0;
4520
4521 local_irq_save(flags);
4522 rq = this_rq();
4523
4524again:
4525 p_rq = task_rq(p);
4526 double_rq_lock(rq, p_rq);
4527 while (task_rq(p) != p_rq) {
4528 double_rq_unlock(rq, p_rq);
4529 goto again;
4530 }
4531
4532 if (!curr->sched_class->yield_to_task)
4533 goto out;
4534
4535 if (curr->sched_class != p->sched_class)
4536 goto out;
4537
4538 if (task_running(p_rq, p) || p->state)
4539 goto out;
4540
4541 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
4542 if (yielded) {
4543 schedstat_inc(rq, yld_count);
4544 /*
4545 * Make p's CPU reschedule; pick_next_entity takes care of
4546 * fairness.
4547 */
4548 if (preempt && rq != p_rq)
4549 resched_task(p_rq->curr);
4550 }
4551
4552out:
4553 double_rq_unlock(rq, p_rq);
4554 local_irq_restore(flags);
4555
4556 if (yielded)
4557 schedule();
4558
4559 return yielded;
4560}
4561EXPORT_SYMBOL_GPL(yield_to);
4562
4563/*
4564 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4565 * that process accounting knows that this is a task in IO wait state.
4566 */
4567void __sched io_schedule(void)
4568{
4569 struct rq *rq = raw_rq();
4570
4571 delayacct_blkio_start();
4572 atomic_inc(&rq->nr_iowait);
4573 blk_flush_plug(current);
4574 current->in_iowait = 1;
4575 schedule();
4576 current->in_iowait = 0;
4577 atomic_dec(&rq->nr_iowait);
4578 delayacct_blkio_end();
4579}
4580EXPORT_SYMBOL(io_schedule);
4581
4582long __sched io_schedule_timeout(long timeout)
4583{
4584 struct rq *rq = raw_rq();
4585 long ret;
4586
4587 delayacct_blkio_start();
4588 atomic_inc(&rq->nr_iowait);
4589 blk_flush_plug(current);
4590 current->in_iowait = 1;
4591 ret = schedule_timeout(timeout);
4592 current->in_iowait = 0;
4593 atomic_dec(&rq->nr_iowait);
4594 delayacct_blkio_end();
4595 return ret;
4596}
4597
4598/**
4599 * sys_sched_get_priority_max - return maximum RT priority.
4600 * @policy: scheduling class.
4601 *
4602 * this syscall returns the maximum rt_priority that can be used
4603 * by a given scheduling class.
4604 */
4605SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
4606{
4607 int ret = -EINVAL;
4608
4609 switch (policy) {
4610 case SCHED_FIFO:
4611 case SCHED_RR:
4612 ret = MAX_USER_RT_PRIO-1;
4613 break;
4614 case SCHED_NORMAL:
4615 case SCHED_BATCH:
4616 case SCHED_IDLE:
4617 ret = 0;
4618 break;
4619 }
4620 return ret;
4621}
4622
4623/**
4624 * sys_sched_get_priority_min - return minimum RT priority.
4625 * @policy: scheduling class.
4626 *
4627 * this syscall returns the minimum rt_priority that can be used
4628 * by a given scheduling class.
4629 */
4630SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
4631{
4632 int ret = -EINVAL;
4633
4634 switch (policy) {
4635 case SCHED_FIFO:
4636 case SCHED_RR:
4637 ret = 1;
4638 break;
4639 case SCHED_NORMAL:
4640 case SCHED_BATCH:
4641 case SCHED_IDLE:
4642 ret = 0;
4643 }
4644 return ret;
4645}
4646
4647/**
4648 * sys_sched_rr_get_interval - return the default timeslice of a process.
4649 * @pid: pid of the process.
4650 * @interval: userspace pointer to the timeslice value.
4651 *
4652 * this syscall writes the default timeslice value of a given process
4653 * into the user-space timespec buffer. A value of '0' means infinity.
4654 */
4655SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
4656 struct timespec __user *, interval)
4657{
4658 struct task_struct *p;
4659 unsigned int time_slice;
4660 unsigned long flags;
4661 struct rq *rq;
4662 int retval;
4663 struct timespec t;
4664
4665 if (pid < 0)
4666 return -EINVAL;
4667
4668 retval = -ESRCH;
4669 rcu_read_lock();
4670 p = find_process_by_pid(pid);
4671 if (!p)
4672 goto out_unlock;
4673
4674 retval = security_task_getscheduler(p);
4675 if (retval)
4676 goto out_unlock;
4677
4678 rq = task_rq_lock(p, &flags);
4679 time_slice = p->sched_class->get_rr_interval(rq, p);
4680 task_rq_unlock(rq, p, &flags);
4681
4682 rcu_read_unlock();
4683 jiffies_to_timespec(time_slice, &t);
4684 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
4685 return retval;
4686
4687out_unlock:
4688 rcu_read_unlock();
4689 return retval;
4690}
4691
4692static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
4693
4694void sched_show_task(struct task_struct *p)
4695{
4696 unsigned long free = 0;
4697 unsigned state;
4698
4699 state = p->state ? __ffs(p->state) + 1 : 0;
4700 printk(KERN_INFO "%-15.15s %c", p->comm,
4701 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4702#if BITS_PER_LONG == 32
4703 if (state == TASK_RUNNING)
4704 printk(KERN_CONT " running ");
4705 else
4706 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4707#else
4708 if (state == TASK_RUNNING)
4709 printk(KERN_CONT " running task ");
4710 else
4711 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4712#endif
4713#ifdef CONFIG_DEBUG_STACK_USAGE
4714 free = stack_not_used(p);
4715#endif
4716 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4717 task_pid_nr(p), task_pid_nr(p->real_parent),
4718 (unsigned long)task_thread_info(p)->flags);
4719
4720 show_stack(p, NULL);
4721}
4722
4723void show_state_filter(unsigned long state_filter)
4724{
4725 struct task_struct *g, *p;
4726
4727#if BITS_PER_LONG == 32
4728 printk(KERN_INFO
4729 " task PC stack pid father\n");
4730#else
4731 printk(KERN_INFO
4732 " task PC stack pid father\n");
4733#endif
4734 rcu_read_lock();
4735 do_each_thread(g, p) {
4736 /*
4737 * reset the NMI-timeout, listing all files on a slow
4738 * console might take a lot of time:
4739 */
4740 touch_nmi_watchdog();
4741 if (!state_filter || (p->state & state_filter))
4742 sched_show_task(p);
4743 } while_each_thread(g, p);
4744
4745 touch_all_softlockup_watchdogs();
4746
4747#ifdef CONFIG_SCHED_DEBUG
4748 sysrq_sched_debug_show();
4749#endif
4750 rcu_read_unlock();
4751 /*
4752 * Only show locks if all tasks are dumped:
4753 */
4754 if (!state_filter)
4755 debug_show_all_locks();
4756}
4757
4758void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4759{
4760 idle->sched_class = &idle_sched_class;
4761}
4762
4763/**
4764 * init_idle - set up an idle thread for a given CPU
4765 * @idle: task in question
4766 * @cpu: cpu the idle task belongs to
4767 *
4768 * NOTE: this function does not set the idle thread's NEED_RESCHED
4769 * flag, to make booting more robust.
4770 */
4771void __cpuinit init_idle(struct task_struct *idle, int cpu)
4772{
4773 struct rq *rq = cpu_rq(cpu);
4774 unsigned long flags;
4775
4776 raw_spin_lock_irqsave(&rq->lock, flags);
4777
4778 __sched_fork(idle);
4779 idle->state = TASK_RUNNING;
4780 idle->se.exec_start = sched_clock();
4781
4782 do_set_cpus_allowed(idle, cpumask_of(cpu));
4783 /*
4784 * We're having a chicken and egg problem, even though we are
4785 * holding rq->lock, the cpu isn't yet set to this cpu so the
4786 * lockdep check in task_group() will fail.
4787 *
4788 * Similar case to sched_fork(). / Alternatively we could
4789 * use task_rq_lock() here and obtain the other rq->lock.
4790 *
4791 * Silence PROVE_RCU
4792 */
4793 rcu_read_lock();
4794 __set_task_cpu(idle, cpu);
4795 rcu_read_unlock();
4796
4797 rq->curr = rq->idle = idle;
4798#if defined(CONFIG_SMP)
4799 idle->on_cpu = 1;
4800#endif
4801 raw_spin_unlock_irqrestore(&rq->lock, flags);
4802
4803 /* Set the preempt count _outside_ the spinlocks! */
4804 task_thread_info(idle)->preempt_count = 0;
4805
4806 /*
4807 * The idle tasks have their own, simple scheduling class:
4808 */
4809 idle->sched_class = &idle_sched_class;
4810 ftrace_graph_init_idle_task(idle, cpu);
4811#if defined(CONFIG_SMP)
4812 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4813#endif
4814}
4815
4816#ifdef CONFIG_SMP
4817void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4818{
4819 if (p->sched_class && p->sched_class->set_cpus_allowed)
4820 p->sched_class->set_cpus_allowed(p, new_mask);
4821
4822 cpumask_copy(&p->cpus_allowed, new_mask);
4823 p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
4824}
4825
4826/*
4827 * This is how migration works:
4828 *
4829 * 1) we invoke migration_cpu_stop() on the target CPU using
4830 * stop_one_cpu().
4831 * 2) stopper starts to run (implicitly forcing the migrated thread
4832 * off the CPU)
4833 * 3) it checks whether the migrated task is still in the wrong runqueue.
4834 * 4) if it's in the wrong runqueue then the migration thread removes
4835 * it and puts it into the right queue.
4836 * 5) stopper completes and stop_one_cpu() returns and the migration
4837 * is done.
4838 */
4839
4840/*
4841 * Change a given task's CPU affinity. Migrate the thread to a
4842 * proper CPU and schedule it away if the CPU it's executing on
4843 * is removed from the allowed bitmask.
4844 *
4845 * NOTE: the caller must have a valid reference to the task, the
4846 * task must not exit() & deallocate itself prematurely. The
4847 * call is not atomic; no spinlocks may be held.
4848 */
4849int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4850{
4851 unsigned long flags;
4852 struct rq *rq;
4853 unsigned int dest_cpu;
4854 int ret = 0;
4855
4856 rq = task_rq_lock(p, &flags);
4857
4858 if (cpumask_equal(&p->cpus_allowed, new_mask))
4859 goto out;
4860
4861 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
4862 ret = -EINVAL;
4863 goto out;
4864 }
4865
4866 if (unlikely((p->flags & PF_THREAD_BOUND) && p != current)) {
4867 ret = -EINVAL;
4868 goto out;
4869 }
4870
4871 do_set_cpus_allowed(p, new_mask);
4872
4873 /* Can the task run on the task's current CPU? If so, we're done */
4874 if (cpumask_test_cpu(task_cpu(p), new_mask))
4875 goto out;
4876
4877 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
4878 if (p->on_rq) {
4879 struct migration_arg arg = { p, dest_cpu };
4880 /* Need help from migration thread: drop lock and wait. */
4881 task_rq_unlock(rq, p, &flags);
4882 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
4883 tlb_migrate_finish(p->mm);
4884 return 0;
4885 }
4886out:
4887 task_rq_unlock(rq, p, &flags);
4888
4889 return ret;
4890}
4891EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
4892
4893/*
4894 * Move (not current) task off this cpu, onto dest cpu. We're doing
4895 * this because either it can't run here any more (set_cpus_allowed()
4896 * away from this CPU, or CPU going down), or because we're
4897 * attempting to rebalance this task on exec (sched_exec).
4898 *
4899 * So we race with normal scheduler movements, but that's OK, as long
4900 * as the task is no longer on this CPU.
4901 *
4902 * Returns non-zero if task was successfully migrated.
4903 */
4904static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
4905{
4906 struct rq *rq_dest, *rq_src;
4907 int ret = 0;
4908
4909 if (unlikely(!cpu_active(dest_cpu)))
4910 return ret;
4911
4912 rq_src = cpu_rq(src_cpu);
4913 rq_dest = cpu_rq(dest_cpu);
4914
4915 raw_spin_lock(&p->pi_lock);
4916 double_rq_lock(rq_src, rq_dest);
4917 /* Already moved. */
4918 if (task_cpu(p) != src_cpu)
4919 goto done;
4920 /* Affinity changed (again). */
4921 if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
4922 goto fail;
4923
4924 /*
4925 * If we're not on a rq, the next wake-up will ensure we're
4926 * placed properly.
4927 */
4928 if (p->on_rq) {
4929 deactivate_task(rq_src, p, 0);
4930 set_task_cpu(p, dest_cpu);
4931 activate_task(rq_dest, p, 0);
4932 check_preempt_curr(rq_dest, p, 0);
4933 }
4934done:
4935 ret = 1;
4936fail:
4937 double_rq_unlock(rq_src, rq_dest);
4938 raw_spin_unlock(&p->pi_lock);
4939 return ret;
4940}
4941
4942/*
4943 * migration_cpu_stop - this will be executed by a highprio stopper thread
4944 * and performs thread migration by bumping thread off CPU then
4945 * 'pushing' onto another runqueue.
4946 */
4947static int migration_cpu_stop(void *data)
4948{
4949 struct migration_arg *arg = data;
4950
4951 /*
4952 * The original target cpu might have gone down and we might
4953 * be on another cpu but it doesn't matter.
4954 */
4955 local_irq_disable();
4956 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
4957 local_irq_enable();
4958 return 0;
4959}
4960
4961#ifdef CONFIG_HOTPLUG_CPU
4962
4963/*
4964 * Ensures that the idle task is using init_mm right before its cpu goes
4965 * offline.
4966 */
4967void idle_task_exit(void)
4968{
4969 struct mm_struct *mm = current->active_mm;
4970
4971 BUG_ON(cpu_online(smp_processor_id()));
4972
4973 if (mm != &init_mm)
4974 switch_mm(mm, &init_mm, current);
4975 mmdrop(mm);
4976}
4977
4978/*
4979 * While a dead CPU has no uninterruptible tasks queued at this point,
4980 * it might still have a nonzero ->nr_uninterruptible counter, because
4981 * for performance reasons the counter is not stricly tracking tasks to
4982 * their home CPUs. So we just add the counter to another CPU's counter,
4983 * to keep the global sum constant after CPU-down:
4984 */
4985static void migrate_nr_uninterruptible(struct rq *rq_src)
4986{
4987 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
4988
4989 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
4990 rq_src->nr_uninterruptible = 0;
4991}
4992
4993/*
4994 * remove the tasks which were accounted by rq from calc_load_tasks.
4995 */
4996static void calc_global_load_remove(struct rq *rq)
4997{
4998 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
4999 rq->calc_load_active = 0;
5000}
5001
5002/*
5003 * Migrate all tasks from the rq, sleeping tasks will be migrated by
5004 * try_to_wake_up()->select_task_rq().
5005 *
5006 * Called with rq->lock held even though we'er in stop_machine() and
5007 * there's no concurrency possible, we hold the required locks anyway
5008 * because of lock validation efforts.
5009 */
5010static void migrate_tasks(unsigned int dead_cpu)
5011{
5012 struct rq *rq = cpu_rq(dead_cpu);
5013 struct task_struct *next, *stop = rq->stop;
5014 int dest_cpu;
5015
5016 /*
5017 * Fudge the rq selection such that the below task selection loop
5018 * doesn't get stuck on the currently eligible stop task.
5019 *
5020 * We're currently inside stop_machine() and the rq is either stuck
5021 * in the stop_machine_cpu_stop() loop, or we're executing this code,
5022 * either way we should never end up calling schedule() until we're
5023 * done here.
5024 */
5025 rq->stop = NULL;
5026
5027 /* Ensure any throttled groups are reachable by pick_next_task */
5028 unthrottle_offline_cfs_rqs(rq);
5029
5030 for ( ; ; ) {
5031 /*
5032 * There's this thread running, bail when that's the only
5033 * remaining thread.
5034 */
5035 if (rq->nr_running == 1)
5036 break;
5037
5038 next = pick_next_task(rq);
5039 BUG_ON(!next);
5040 next->sched_class->put_prev_task(rq, next);
5041
5042 /* Find suitable destination for @next, with force if needed. */
5043 dest_cpu = select_fallback_rq(dead_cpu, next);
5044 raw_spin_unlock(&rq->lock);
5045
5046 __migrate_task(next, dead_cpu, dest_cpu);
5047
5048 raw_spin_lock(&rq->lock);
5049 }
5050
5051 rq->stop = stop;
5052}
5053
5054#endif /* CONFIG_HOTPLUG_CPU */
5055
5056#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
5057
5058static struct ctl_table sd_ctl_dir[] = {
5059 {
5060 .procname = "sched_domain",
5061 .mode = 0555,
5062 },
5063 {}
5064};
5065
5066static struct ctl_table sd_ctl_root[] = {
5067 {
5068 .procname = "kernel",
5069 .mode = 0555,
5070 .child = sd_ctl_dir,
5071 },
5072 {}
5073};
5074
5075static struct ctl_table *sd_alloc_ctl_entry(int n)
5076{
5077 struct ctl_table *entry =
5078 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
5079
5080 return entry;
5081}
5082
5083static void sd_free_ctl_entry(struct ctl_table **tablep)
5084{
5085 struct ctl_table *entry;
5086
5087 /*
5088 * In the intermediate directories, both the child directory and
5089 * procname are dynamically allocated and could fail but the mode
5090 * will always be set. In the lowest directory the names are
5091 * static strings and all have proc handlers.
5092 */
5093 for (entry = *tablep; entry->mode; entry++) {
5094 if (entry->child)
5095 sd_free_ctl_entry(&entry->child);
5096 if (entry->proc_handler == NULL)
5097 kfree(entry->procname);
5098 }
5099
5100 kfree(*tablep);
5101 *tablep = NULL;
5102}
5103
5104static void
5105set_table_entry(struct ctl_table *entry,
5106 const char *procname, void *data, int maxlen,
5107 mode_t mode, proc_handler *proc_handler)
5108{
5109 entry->procname = procname;
5110 entry->data = data;
5111 entry->maxlen = maxlen;
5112 entry->mode = mode;
5113 entry->proc_handler = proc_handler;
5114}
5115
5116static struct ctl_table *
5117sd_alloc_ctl_domain_table(struct sched_domain *sd)
5118{
5119 struct ctl_table *table = sd_alloc_ctl_entry(13);
5120
5121 if (table == NULL)
5122 return NULL;
5123
5124 set_table_entry(&table[0], "min_interval", &sd->min_interval,
5125 sizeof(long), 0644, proc_doulongvec_minmax);
5126 set_table_entry(&table[1], "max_interval", &sd->max_interval,
5127 sizeof(long), 0644, proc_doulongvec_minmax);
5128 set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
5129 sizeof(int), 0644, proc_dointvec_minmax);
5130 set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
5131 sizeof(int), 0644, proc_dointvec_minmax);
5132 set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
5133 sizeof(int), 0644, proc_dointvec_minmax);
5134 set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
5135 sizeof(int), 0644, proc_dointvec_minmax);
5136 set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
5137 sizeof(int), 0644, proc_dointvec_minmax);
5138 set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
5139 sizeof(int), 0644, proc_dointvec_minmax);
5140 set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
5141 sizeof(int), 0644, proc_dointvec_minmax);
5142 set_table_entry(&table[9], "cache_nice_tries",
5143 &sd->cache_nice_tries,
5144 sizeof(int), 0644, proc_dointvec_minmax);
5145 set_table_entry(&table[10], "flags", &sd->flags,
5146 sizeof(int), 0644, proc_dointvec_minmax);
5147 set_table_entry(&table[11], "name", sd->name,
5148 CORENAME_MAX_SIZE, 0444, proc_dostring);
5149 /* &table[12] is terminator */
5150
5151 return table;
5152}
5153
5154static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
5155{
5156 struct ctl_table *entry, *table;
5157 struct sched_domain *sd;
5158 int domain_num = 0, i;
5159 char buf[32];
5160
5161 for_each_domain(cpu, sd)
5162 domain_num++;
5163 entry = table = sd_alloc_ctl_entry(domain_num + 1);
5164 if (table == NULL)
5165 return NULL;
5166
5167 i = 0;
5168 for_each_domain(cpu, sd) {
5169 snprintf(buf, 32, "domain%d", i);
5170 entry->procname = kstrdup(buf, GFP_KERNEL);
5171 entry->mode = 0555;
5172 entry->child = sd_alloc_ctl_domain_table(sd);
5173 entry++;
5174 i++;
5175 }
5176 return table;
5177}
5178
5179static struct ctl_table_header *sd_sysctl_header;
5180static void register_sched_domain_sysctl(void)
5181{
5182 int i, cpu_num = num_possible_cpus();
5183 struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
5184 char buf[32];
5185
5186 WARN_ON(sd_ctl_dir[0].child);
5187 sd_ctl_dir[0].child = entry;
5188
5189 if (entry == NULL)
5190 return;
5191
5192 for_each_possible_cpu(i) {
5193 snprintf(buf, 32, "cpu%d", i);
5194 entry->procname = kstrdup(buf, GFP_KERNEL);
5195 entry->mode = 0555;
5196 entry->child = sd_alloc_ctl_cpu_table(i);
5197 entry++;
5198 }
5199
5200 WARN_ON(sd_sysctl_header);
5201 sd_sysctl_header = register_sysctl_table(sd_ctl_root);
5202}
5203
5204/* may be called multiple times per register */
5205static void unregister_sched_domain_sysctl(void)
5206{
5207 if (sd_sysctl_header)
5208 unregister_sysctl_table(sd_sysctl_header);
5209 sd_sysctl_header = NULL;
5210 if (sd_ctl_dir[0].child)
5211 sd_free_ctl_entry(&sd_ctl_dir[0].child);
5212}
5213#else
5214static void register_sched_domain_sysctl(void)
5215{
5216}
5217static void unregister_sched_domain_sysctl(void)
5218{
5219}
5220#endif
5221
5222static void set_rq_online(struct rq *rq)
5223{
5224 if (!rq->online) {
5225 const struct sched_class *class;
5226
5227 cpumask_set_cpu(rq->cpu, rq->rd->online);
5228 rq->online = 1;
5229
5230 for_each_class(class) {
5231 if (class->rq_online)
5232 class->rq_online(rq);
5233 }
5234 }
5235}
5236
5237static void set_rq_offline(struct rq *rq)
5238{
5239 if (rq->online) {
5240 const struct sched_class *class;
5241
5242 for_each_class(class) {
5243 if (class->rq_offline)
5244 class->rq_offline(rq);
5245 }
5246
5247 cpumask_clear_cpu(rq->cpu, rq->rd->online);
5248 rq->online = 0;
5249 }
5250}
5251
5252/*
5253 * migration_call - callback that gets triggered when a CPU is added.
5254 * Here we can start up the necessary migration thread for the new CPU.
5255 */
5256static int __cpuinit
5257migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5258{
5259 int cpu = (long)hcpu;
5260 unsigned long flags;
5261 struct rq *rq = cpu_rq(cpu);
5262
5263 switch (action & ~CPU_TASKS_FROZEN) {
5264
5265 case CPU_UP_PREPARE:
5266 rq->calc_load_update = calc_load_update;
5267 break;
5268
5269 case CPU_ONLINE:
5270 /* Update our root-domain */
5271 raw_spin_lock_irqsave(&rq->lock, flags);
5272 if (rq->rd) {
5273 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5274
5275 set_rq_online(rq);
5276 }
5277 raw_spin_unlock_irqrestore(&rq->lock, flags);
5278 break;
5279
5280#ifdef CONFIG_HOTPLUG_CPU
5281 case CPU_DYING:
5282 sched_ttwu_pending();
5283 /* Update our root-domain */
5284 raw_spin_lock_irqsave(&rq->lock, flags);
5285 if (rq->rd) {
5286 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
5287 set_rq_offline(rq);
5288 }
5289 migrate_tasks(cpu);
5290 BUG_ON(rq->nr_running != 1); /* the migration thread */
5291 raw_spin_unlock_irqrestore(&rq->lock, flags);
5292
5293 migrate_nr_uninterruptible(rq);
5294 calc_global_load_remove(rq);
5295 break;
5296#endif
5297 }
5298
5299 update_max_interval();
5300
5301 return NOTIFY_OK;
5302}
5303
5304/*
5305 * Register at high priority so that task migration (migrate_all_tasks)
5306 * happens before everything else. This has to be lower priority than
5307 * the notifier in the perf_event subsystem, though.
5308 */
5309static struct notifier_block __cpuinitdata migration_notifier = {
5310 .notifier_call = migration_call,
5311 .priority = CPU_PRI_MIGRATION,
5312};
5313
5314static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
5315 unsigned long action, void *hcpu)
5316{
5317 switch (action & ~CPU_TASKS_FROZEN) {
5318 case CPU_ONLINE:
5319 case CPU_DOWN_FAILED:
5320 set_cpu_active((long)hcpu, true);
5321 return NOTIFY_OK;
5322 default:
5323 return NOTIFY_DONE;
5324 }
5325}
5326
5327static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb,
5328 unsigned long action, void *hcpu)
5329{
5330 switch (action & ~CPU_TASKS_FROZEN) {
5331 case CPU_DOWN_PREPARE:
5332 set_cpu_active((long)hcpu, false);
5333 return NOTIFY_OK;
5334 default:
5335 return NOTIFY_DONE;
5336 }
5337}
5338
5339static int __init migration_init(void)
5340{
5341 void *cpu = (void *)(long)smp_processor_id();
5342 int err;
5343
5344 /* Initialize migration for the boot CPU */
5345 err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
5346 BUG_ON(err == NOTIFY_BAD);
5347 migration_call(&migration_notifier, CPU_ONLINE, cpu);
5348 register_cpu_notifier(&migration_notifier);
5349
5350 /* Register cpu active notifiers */
5351 cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE);
5352 cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE);
5353
5354 return 0;
5355}
5356early_initcall(migration_init);
5357#endif
5358
5359#ifdef CONFIG_SMP
5360
5361static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5362
5363#ifdef CONFIG_SCHED_DEBUG
5364
5365static __read_mostly int sched_domain_debug_enabled;
5366
5367static int __init sched_domain_debug_setup(char *str)
5368{
5369 sched_domain_debug_enabled = 1;
5370
5371 return 0;
5372}
5373early_param("sched_debug", sched_domain_debug_setup);
5374
5375static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5376 struct cpumask *groupmask)
5377{
5378 struct sched_group *group = sd->groups;
5379 char str[256];
5380
5381 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
5382 cpumask_clear(groupmask);
5383
5384 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
5385
5386 if (!(sd->flags & SD_LOAD_BALANCE)) {
5387 printk("does not load-balance\n");
5388 if (sd->parent)
5389 printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
5390 " has parent");
5391 return -1;
5392 }
5393
5394 printk(KERN_CONT "span %s level %s\n", str, sd->name);
5395
5396 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5397 printk(KERN_ERR "ERROR: domain->span does not contain "
5398 "CPU%d\n", cpu);
5399 }
5400 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
5401 printk(KERN_ERR "ERROR: domain->groups does not contain"
5402 " CPU%d\n", cpu);
5403 }
5404
5405 printk(KERN_DEBUG "%*s groups:", level + 1, "");
5406 do {
5407 if (!group) {
5408 printk("\n");
5409 printk(KERN_ERR "ERROR: group is NULL\n");
5410 break;
5411 }
5412
5413 if (!group->sgp->power) {
5414 printk(KERN_CONT "\n");
5415 printk(KERN_ERR "ERROR: domain->cpu_power not "
5416 "set\n");
5417 break;
5418 }
5419
5420 if (!cpumask_weight(sched_group_cpus(group))) {
5421 printk(KERN_CONT "\n");
5422 printk(KERN_ERR "ERROR: empty group\n");
5423 break;
5424 }
5425
5426 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
5427 printk(KERN_CONT "\n");
5428 printk(KERN_ERR "ERROR: repeated CPUs\n");
5429 break;
5430 }
5431
5432 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5433
5434 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
5435
5436 printk(KERN_CONT " %s", str);
5437 if (group->sgp->power != SCHED_POWER_SCALE) {
5438 printk(KERN_CONT " (cpu_power = %d)",
5439 group->sgp->power);
5440 }
5441
5442 group = group->next;
5443 } while (group != sd->groups);
5444 printk(KERN_CONT "\n");
5445
5446 if (!cpumask_equal(sched_domain_span(sd), groupmask))
5447 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
5448
5449 if (sd->parent &&
5450 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
5451 printk(KERN_ERR "ERROR: parent span is not a superset "
5452 "of domain->span\n");
5453 return 0;
5454}
5455
5456static void sched_domain_debug(struct sched_domain *sd, int cpu)
5457{
5458 int level = 0;
5459
5460 if (!sched_domain_debug_enabled)
5461 return;
5462
5463 if (!sd) {
5464 printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
5465 return;
5466 }
5467
5468 printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
5469
5470 for (;;) {
5471 if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
5472 break;
5473 level++;
5474 sd = sd->parent;
5475 if (!sd)
5476 break;
5477 }
5478}
5479#else /* !CONFIG_SCHED_DEBUG */
5480# define sched_domain_debug(sd, cpu) do { } while (0)
5481#endif /* CONFIG_SCHED_DEBUG */
5482
5483static int sd_degenerate(struct sched_domain *sd)
5484{
5485 if (cpumask_weight(sched_domain_span(sd)) == 1)
5486 return 1;
5487
5488 /* Following flags need at least 2 groups */
5489 if (sd->flags & (SD_LOAD_BALANCE |
5490 SD_BALANCE_NEWIDLE |
5491 SD_BALANCE_FORK |
5492 SD_BALANCE_EXEC |
5493 SD_SHARE_CPUPOWER |
5494 SD_SHARE_PKG_RESOURCES)) {
5495 if (sd->groups != sd->groups->next)
5496 return 0;
5497 }
5498
5499 /* Following flags don't use groups */
5500 if (sd->flags & (SD_WAKE_AFFINE))
5501 return 0;
5502
5503 return 1;
5504}
5505
5506static int
5507sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
5508{
5509 unsigned long cflags = sd->flags, pflags = parent->flags;
5510
5511 if (sd_degenerate(parent))
5512 return 1;
5513
5514 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
5515 return 0;
5516
5517 /* Flags needing groups don't count if only 1 group in parent */
5518 if (parent->groups == parent->groups->next) {
5519 pflags &= ~(SD_LOAD_BALANCE |
5520 SD_BALANCE_NEWIDLE |
5521 SD_BALANCE_FORK |
5522 SD_BALANCE_EXEC |
5523 SD_SHARE_CPUPOWER |
5524 SD_SHARE_PKG_RESOURCES);
5525 if (nr_node_ids == 1)
5526 pflags &= ~SD_SERIALIZE;
5527 }
5528 if (~cflags & pflags)
5529 return 0;
5530
5531 return 1;
5532}
5533
5534static void free_rootdomain(struct rcu_head *rcu)
5535{
5536 struct root_domain *rd = container_of(rcu, struct root_domain, rcu);
5537
5538 cpupri_cleanup(&rd->cpupri);
5539 free_cpumask_var(rd->rto_mask);
5540 free_cpumask_var(rd->online);
5541 free_cpumask_var(rd->span);
5542 kfree(rd);
5543}
5544
5545static void rq_attach_root(struct rq *rq, struct root_domain *rd)
5546{
5547 struct root_domain *old_rd = NULL;
5548 unsigned long flags;
5549
5550 raw_spin_lock_irqsave(&rq->lock, flags);
5551
5552 if (rq->rd) {
5553 old_rd = rq->rd;
5554
5555 if (cpumask_test_cpu(rq->cpu, old_rd->online))
5556 set_rq_offline(rq);
5557
5558 cpumask_clear_cpu(rq->cpu, old_rd->span);
5559
5560 /*
5561 * If we dont want to free the old_rt yet then
5562 * set old_rd to NULL to skip the freeing later
5563 * in this function:
5564 */
5565 if (!atomic_dec_and_test(&old_rd->refcount))
5566 old_rd = NULL;
5567 }
5568
5569 atomic_inc(&rd->refcount);
5570 rq->rd = rd;
5571
5572 cpumask_set_cpu(rq->cpu, rd->span);
5573 if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
5574 set_rq_online(rq);
5575
5576 raw_spin_unlock_irqrestore(&rq->lock, flags);
5577
5578 if (old_rd)
5579 call_rcu_sched(&old_rd->rcu, free_rootdomain);
5580}
5581
5582static int init_rootdomain(struct root_domain *rd)
5583{
5584 memset(rd, 0, sizeof(*rd));
5585
5586 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL))
5587 goto out;
5588 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL))
5589 goto free_span;
5590 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
5591 goto free_online;
5592
5593 if (cpupri_init(&rd->cpupri) != 0)
5594 goto free_rto_mask;
5595 return 0;
5596
5597free_rto_mask:
5598 free_cpumask_var(rd->rto_mask);
5599free_online:
5600 free_cpumask_var(rd->online);
5601free_span:
5602 free_cpumask_var(rd->span);
5603out:
5604 return -ENOMEM;
5605}
5606
5607/*
5608 * By default the system creates a single root-domain with all cpus as
5609 * members (mimicking the global state we have today).
5610 */
5611struct root_domain def_root_domain;
5612
5613static void init_defrootdomain(void)
5614{
5615 init_rootdomain(&def_root_domain);
5616
5617 atomic_set(&def_root_domain.refcount, 1);
5618}
5619
5620static struct root_domain *alloc_rootdomain(void)
5621{
5622 struct root_domain *rd;
5623
5624 rd = kmalloc(sizeof(*rd), GFP_KERNEL);
5625 if (!rd)
5626 return NULL;
5627
5628 if (init_rootdomain(rd) != 0) {
5629 kfree(rd);
5630 return NULL;
5631 }
5632
5633 return rd;
5634}
5635
5636static void free_sched_groups(struct sched_group *sg, int free_sgp)
5637{
5638 struct sched_group *tmp, *first;
5639
5640 if (!sg)
5641 return;
5642
5643 first = sg;
5644 do {
5645 tmp = sg->next;
5646
5647 if (free_sgp && atomic_dec_and_test(&sg->sgp->ref))
5648 kfree(sg->sgp);
5649
5650 kfree(sg);
5651 sg = tmp;
5652 } while (sg != first);
5653}
5654
5655static void free_sched_domain(struct rcu_head *rcu)
5656{
5657 struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu);
5658
5659 /*
5660 * If its an overlapping domain it has private groups, iterate and
5661 * nuke them all.
5662 */
5663 if (sd->flags & SD_OVERLAP) {
5664 free_sched_groups(sd->groups, 1);
5665 } else if (atomic_dec_and_test(&sd->groups->ref)) {
5666 kfree(sd->groups->sgp);
5667 kfree(sd->groups);
5668 }
5669 kfree(sd);
5670}
5671
5672static void destroy_sched_domain(struct sched_domain *sd, int cpu)
5673{
5674 call_rcu(&sd->rcu, free_sched_domain);
5675}
5676
5677static void destroy_sched_domains(struct sched_domain *sd, int cpu)
5678{
5679 for (; sd; sd = sd->parent)
5680 destroy_sched_domain(sd, cpu);
5681}
5682
5683/*
5684 * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
5685 * hold the hotplug lock.
5686 */
5687static void
5688cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5689{
5690 struct rq *rq = cpu_rq(cpu);
5691 struct sched_domain *tmp;
5692
5693 /* Remove the sched domains which do not contribute to scheduling. */
5694 for (tmp = sd; tmp; ) {
5695 struct sched_domain *parent = tmp->parent;
5696 if (!parent)
5697 break;
5698
5699 if (sd_parent_degenerate(tmp, parent)) {
5700 tmp->parent = parent->parent;
5701 if (parent->parent)
5702 parent->parent->child = tmp;
5703 destroy_sched_domain(parent, cpu);
5704 } else
5705 tmp = tmp->parent;
5706 }
5707
5708 if (sd && sd_degenerate(sd)) {
5709 tmp = sd;
5710 sd = sd->parent;
5711 destroy_sched_domain(tmp, cpu);
5712 if (sd)
5713 sd->child = NULL;
5714 }
5715
5716 sched_domain_debug(sd, cpu);
5717
5718 rq_attach_root(rq, rd);
5719 tmp = rq->sd;
5720 rcu_assign_pointer(rq->sd, sd);
5721 destroy_sched_domains(tmp, cpu);
5722}
5723
5724/* cpus with isolated domains */
5725static cpumask_var_t cpu_isolated_map;
5726
5727/* Setup the mask of cpus configured for isolated domains */
5728static int __init isolated_cpu_setup(char *str)
5729{
5730 alloc_bootmem_cpumask_var(&cpu_isolated_map);
5731 cpulist_parse(str, cpu_isolated_map);
5732 return 1;
5733}
5734
5735__setup("isolcpus=", isolated_cpu_setup);
5736
5737#ifdef CONFIG_NUMA
5738
5739/**
5740 * find_next_best_node - find the next node to include in a sched_domain
5741 * @node: node whose sched_domain we're building
5742 * @used_nodes: nodes already in the sched_domain
5743 *
5744 * Find the next node to include in a given scheduling domain. Simply
5745 * finds the closest node not already in the @used_nodes map.
5746 *
5747 * Should use nodemask_t.
5748 */
5749static int find_next_best_node(int node, nodemask_t *used_nodes)
5750{
5751 int i, n, val, min_val, best_node = -1;
5752
5753 min_val = INT_MAX;
5754
5755 for (i = 0; i < nr_node_ids; i++) {
5756 /* Start at @node */
5757 n = (node + i) % nr_node_ids;
5758
5759 if (!nr_cpus_node(n))
5760 continue;
5761
5762 /* Skip already used nodes */
5763 if (node_isset(n, *used_nodes))
5764 continue;
5765
5766 /* Simple min distance search */
5767 val = node_distance(node, n);
5768
5769 if (val < min_val) {
5770 min_val = val;
5771 best_node = n;
5772 }
5773 }
5774
5775 if (best_node != -1)
5776 node_set(best_node, *used_nodes);
5777 return best_node;
5778}
5779
5780/**
5781 * sched_domain_node_span - get a cpumask for a node's sched_domain
5782 * @node: node whose cpumask we're constructing
5783 * @span: resulting cpumask
5784 *
5785 * Given a node, construct a good cpumask for its sched_domain to span. It
5786 * should be one that prevents unnecessary balancing, but also spreads tasks
5787 * out optimally.
5788 */
5789static void sched_domain_node_span(int node, struct cpumask *span)
5790{
5791 nodemask_t used_nodes;
5792 int i;
5793
5794 cpumask_clear(span);
5795 nodes_clear(used_nodes);
5796
5797 cpumask_or(span, span, cpumask_of_node(node));
5798 node_set(node, used_nodes);
5799
5800 for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
5801 int next_node = find_next_best_node(node, &used_nodes);
5802 if (next_node < 0)
5803 break;
5804 cpumask_or(span, span, cpumask_of_node(next_node));
5805 }
5806}
5807
5808static const struct cpumask *cpu_node_mask(int cpu)
5809{
5810 lockdep_assert_held(&sched_domains_mutex);
5811
5812 sched_domain_node_span(cpu_to_node(cpu), sched_domains_tmpmask);
5813
5814 return sched_domains_tmpmask;
5815}
5816
5817static const struct cpumask *cpu_allnodes_mask(int cpu)
5818{
5819 return cpu_possible_mask;
5820}
5821#endif /* CONFIG_NUMA */
5822
5823static const struct cpumask *cpu_cpu_mask(int cpu)
5824{
5825 return cpumask_of_node(cpu_to_node(cpu));
5826}
5827
5828int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
5829
5830struct sd_data {
5831 struct sched_domain **__percpu sd;
5832 struct sched_group **__percpu sg;
5833 struct sched_group_power **__percpu sgp;
5834};
5835
5836struct s_data {
5837 struct sched_domain ** __percpu sd;
5838 struct root_domain *rd;
5839};
5840
5841enum s_alloc {
5842 sa_rootdomain,
5843 sa_sd,
5844 sa_sd_storage,
5845 sa_none,
5846};
5847
5848struct sched_domain_topology_level;
5849
5850typedef struct sched_domain *(*sched_domain_init_f)(struct sched_domain_topology_level *tl, int cpu);
5851typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
5852
5853#define SDTL_OVERLAP 0x01
5854
5855struct sched_domain_topology_level {
5856 sched_domain_init_f init;
5857 sched_domain_mask_f mask;
5858 int flags;
5859 struct sd_data data;
5860};
5861
5862static int
5863build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5864{
5865 struct sched_group *first = NULL, *last = NULL, *groups = NULL, *sg;
5866 const struct cpumask *span = sched_domain_span(sd);
5867 struct cpumask *covered = sched_domains_tmpmask;
5868 struct sd_data *sdd = sd->private;
5869 struct sched_domain *child;
5870 int i;
5871
5872 cpumask_clear(covered);
5873
5874 for_each_cpu(i, span) {
5875 struct cpumask *sg_span;
5876
5877 if (cpumask_test_cpu(i, covered))
5878 continue;
5879
5880 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5881 GFP_KERNEL, cpu_to_node(i));
5882
5883 if (!sg)
5884 goto fail;
5885
5886 sg_span = sched_group_cpus(sg);
5887
5888 child = *per_cpu_ptr(sdd->sd, i);
5889 if (child->child) {
5890 child = child->child;
5891 cpumask_copy(sg_span, sched_domain_span(child));
5892 } else
5893 cpumask_set_cpu(i, sg_span);
5894
5895 cpumask_or(covered, covered, sg_span);
5896
5897 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span));
5898 atomic_inc(&sg->sgp->ref);
5899
5900 if (cpumask_test_cpu(cpu, sg_span))
5901 groups = sg;
5902
5903 if (!first)
5904 first = sg;
5905 if (last)
5906 last->next = sg;
5907 last = sg;
5908 last->next = first;
5909 }
5910 sd->groups = groups;
5911
5912 return 0;
5913
5914fail:
5915 free_sched_groups(first, 0);
5916
5917 return -ENOMEM;
5918}
5919
5920static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
5921{
5922 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
5923 struct sched_domain *child = sd->child;
5924
5925 if (child)
5926 cpu = cpumask_first(sched_domain_span(child));
5927
5928 if (sg) {
5929 *sg = *per_cpu_ptr(sdd->sg, cpu);
5930 (*sg)->sgp = *per_cpu_ptr(sdd->sgp, cpu);
5931 atomic_set(&(*sg)->sgp->ref, 1); /* for claim_allocations */
5932 }
5933
5934 return cpu;
5935}
5936
5937/*
5938 * build_sched_groups will build a circular linked list of the groups
5939 * covered by the given span, and will set each group's ->cpumask correctly,
5940 * and ->cpu_power to 0.
5941 *
5942 * Assumes the sched_domain tree is fully constructed
5943 */
5944static int
5945build_sched_groups(struct sched_domain *sd, int cpu)
5946{
5947 struct sched_group *first = NULL, *last = NULL;
5948 struct sd_data *sdd = sd->private;
5949 const struct cpumask *span = sched_domain_span(sd);
5950 struct cpumask *covered;
5951 int i;
5952
5953 get_group(cpu, sdd, &sd->groups);
5954 atomic_inc(&sd->groups->ref);
5955
5956 if (cpu != cpumask_first(sched_domain_span(sd)))
5957 return 0;
5958
5959 lockdep_assert_held(&sched_domains_mutex);
5960 covered = sched_domains_tmpmask;
5961
5962 cpumask_clear(covered);
5963
5964 for_each_cpu(i, span) {
5965 struct sched_group *sg;
5966 int group = get_group(i, sdd, &sg);
5967 int j;
5968
5969 if (cpumask_test_cpu(i, covered))
5970 continue;
5971
5972 cpumask_clear(sched_group_cpus(sg));
5973 sg->sgp->power = 0;
5974
5975 for_each_cpu(j, span) {
5976 if (get_group(j, sdd, NULL) != group)
5977 continue;
5978
5979 cpumask_set_cpu(j, covered);
5980 cpumask_set_cpu(j, sched_group_cpus(sg));
5981 }
5982
5983 if (!first)
5984 first = sg;
5985 if (last)
5986 last->next = sg;
5987 last = sg;
5988 }
5989 last->next = first;
5990
5991 return 0;
5992}
5993
5994/*
5995 * Initialize sched groups cpu_power.
5996 *
5997 * cpu_power indicates the capacity of sched group, which is used while
5998 * distributing the load between different sched groups in a sched domain.
5999 * Typically cpu_power for all the groups in a sched domain will be same unless
6000 * there are asymmetries in the topology. If there are asymmetries, group
6001 * having more cpu_power will pickup more load compared to the group having
6002 * less cpu_power.
6003 */
6004static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6005{
6006 struct sched_group *sg = sd->groups;
6007
6008 WARN_ON(!sd || !sg);
6009
6010 do {
6011 sg->group_weight = cpumask_weight(sched_group_cpus(sg));
6012 sg = sg->next;
6013 } while (sg != sd->groups);
6014
6015 if (cpu != group_first_cpu(sg))
6016 return;
6017
6018 update_group_power(sd, cpu);
6019}
6020
6021int __weak arch_sd_sibling_asym_packing(void)
6022{
6023 return 0*SD_ASYM_PACKING;
6024}
6025
6026/*
6027 * Initializers for schedule domains
6028 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
6029 */
6030
6031#ifdef CONFIG_SCHED_DEBUG
6032# define SD_INIT_NAME(sd, type) sd->name = #type
6033#else
6034# define SD_INIT_NAME(sd, type) do { } while (0)
6035#endif
6036
6037#define SD_INIT_FUNC(type) \
6038static noinline struct sched_domain * \
6039sd_init_##type(struct sched_domain_topology_level *tl, int cpu) \
6040{ \
6041 struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); \
6042 *sd = SD_##type##_INIT; \
6043 SD_INIT_NAME(sd, type); \
6044 sd->private = &tl->data; \
6045 return sd; \
6046}
6047
6048SD_INIT_FUNC(CPU)
6049#ifdef CONFIG_NUMA
6050 SD_INIT_FUNC(ALLNODES)
6051 SD_INIT_FUNC(NODE)
6052#endif
6053#ifdef CONFIG_SCHED_SMT
6054 SD_INIT_FUNC(SIBLING)
6055#endif
6056#ifdef CONFIG_SCHED_MC
6057 SD_INIT_FUNC(MC)
6058#endif
6059#ifdef CONFIG_SCHED_BOOK
6060 SD_INIT_FUNC(BOOK)
6061#endif
6062
6063static int default_relax_domain_level = -1;
6064int sched_domain_level_max;
6065
6066static int __init setup_relax_domain_level(char *str)
6067{
6068 unsigned long val;
6069
6070 val = simple_strtoul(str, NULL, 0);
6071 if (val < sched_domain_level_max)
6072 default_relax_domain_level = val;
6073
6074 return 1;
6075}
6076__setup("relax_domain_level=", setup_relax_domain_level);
6077
6078static void set_domain_attribute(struct sched_domain *sd,
6079 struct sched_domain_attr *attr)
6080{
6081 int request;
6082
6083 if (!attr || attr->relax_domain_level < 0) {
6084 if (default_relax_domain_level < 0)
6085 return;
6086 else
6087 request = default_relax_domain_level;
6088 } else
6089 request = attr->relax_domain_level;
6090 if (request < sd->level) {
6091 /* turn off idle balance on this domain */
6092 sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6093 } else {
6094 /* turn on idle balance on this domain */
6095 sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
6096 }
6097}
6098
6099static void __sdt_free(const struct cpumask *cpu_map);
6100static int __sdt_alloc(const struct cpumask *cpu_map);
6101
6102static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
6103 const struct cpumask *cpu_map)
6104{
6105 switch (what) {
6106 case sa_rootdomain:
6107 if (!atomic_read(&d->rd->refcount))
6108 free_rootdomain(&d->rd->rcu); /* fall through */
6109 case sa_sd:
6110 free_percpu(d->sd); /* fall through */
6111 case sa_sd_storage:
6112 __sdt_free(cpu_map); /* fall through */
6113 case sa_none:
6114 break;
6115 }
6116}
6117
6118static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
6119 const struct cpumask *cpu_map)
6120{
6121 memset(d, 0, sizeof(*d));
6122
6123 if (__sdt_alloc(cpu_map))
6124 return sa_sd_storage;
6125 d->sd = alloc_percpu(struct sched_domain *);
6126 if (!d->sd)
6127 return sa_sd_storage;
6128 d->rd = alloc_rootdomain();
6129 if (!d->rd)
6130 return sa_sd;
6131 return sa_rootdomain;
6132}
6133
6134/*
6135 * NULL the sd_data elements we've used to build the sched_domain and
6136 * sched_group structure so that the subsequent __free_domain_allocs()
6137 * will not free the data we're using.
6138 */
6139static void claim_allocations(int cpu, struct sched_domain *sd)
6140{
6141 struct sd_data *sdd = sd->private;
6142
6143 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd);
6144 *per_cpu_ptr(sdd->sd, cpu) = NULL;
6145
6146 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref))
6147 *per_cpu_ptr(sdd->sg, cpu) = NULL;
6148
6149 if (atomic_read(&(*per_cpu_ptr(sdd->sgp, cpu))->ref))
6150 *per_cpu_ptr(sdd->sgp, cpu) = NULL;
6151}
6152
6153#ifdef CONFIG_SCHED_SMT
6154static const struct cpumask *cpu_smt_mask(int cpu)
6155{
6156 return topology_thread_cpumask(cpu);
6157}
6158#endif
6159
6160/*
6161 * Topology list, bottom-up.
6162 */
6163static struct sched_domain_topology_level default_topology[] = {
6164#ifdef CONFIG_SCHED_SMT
6165 { sd_init_SIBLING, cpu_smt_mask, },
6166#endif
6167#ifdef CONFIG_SCHED_MC
6168 { sd_init_MC, cpu_coregroup_mask, },
6169#endif
6170#ifdef CONFIG_SCHED_BOOK
6171 { sd_init_BOOK, cpu_book_mask, },
6172#endif
6173 { sd_init_CPU, cpu_cpu_mask, },
6174#ifdef CONFIG_NUMA
6175 { sd_init_NODE, cpu_node_mask, SDTL_OVERLAP, },
6176 { sd_init_ALLNODES, cpu_allnodes_mask, },
6177#endif
6178 { NULL, },
6179};
6180
6181static struct sched_domain_topology_level *sched_domain_topology = default_topology;
6182
6183static int __sdt_alloc(const struct cpumask *cpu_map)
6184{
6185 struct sched_domain_topology_level *tl;
6186 int j;
6187
6188 for (tl = sched_domain_topology; tl->init; tl++) {
6189 struct sd_data *sdd = &tl->data;
6190
6191 sdd->sd = alloc_percpu(struct sched_domain *);
6192 if (!sdd->sd)
6193 return -ENOMEM;
6194
6195 sdd->sg = alloc_percpu(struct sched_group *);
6196 if (!sdd->sg)
6197 return -ENOMEM;
6198
6199 sdd->sgp = alloc_percpu(struct sched_group_power *);
6200 if (!sdd->sgp)
6201 return -ENOMEM;
6202
6203 for_each_cpu(j, cpu_map) {
6204 struct sched_domain *sd;
6205 struct sched_group *sg;
6206 struct sched_group_power *sgp;
6207
6208 sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(),
6209 GFP_KERNEL, cpu_to_node(j));
6210 if (!sd)
6211 return -ENOMEM;
6212
6213 *per_cpu_ptr(sdd->sd, j) = sd;
6214
6215 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
6216 GFP_KERNEL, cpu_to_node(j));
6217 if (!sg)
6218 return -ENOMEM;
6219
6220 *per_cpu_ptr(sdd->sg, j) = sg;
6221
6222 sgp = kzalloc_node(sizeof(struct sched_group_power),
6223 GFP_KERNEL, cpu_to_node(j));
6224 if (!sgp)
6225 return -ENOMEM;
6226
6227 *per_cpu_ptr(sdd->sgp, j) = sgp;
6228 }
6229 }
6230
6231 return 0;
6232}
6233
6234static void __sdt_free(const struct cpumask *cpu_map)
6235{
6236 struct sched_domain_topology_level *tl;
6237 int j;
6238
6239 for (tl = sched_domain_topology; tl->init; tl++) {
6240 struct sd_data *sdd = &tl->data;
6241
6242 for_each_cpu(j, cpu_map) {
6243 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
6244 if (sd && (sd->flags & SD_OVERLAP))
6245 free_sched_groups(sd->groups, 0);
6246 kfree(*per_cpu_ptr(sdd->sd, j));
6247 kfree(*per_cpu_ptr(sdd->sg, j));
6248 kfree(*per_cpu_ptr(sdd->sgp, j));
6249 }
6250 free_percpu(sdd->sd);
6251 free_percpu(sdd->sg);
6252 free_percpu(sdd->sgp);
6253 }
6254}
6255
6256struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6257 struct s_data *d, const struct cpumask *cpu_map,
6258 struct sched_domain_attr *attr, struct sched_domain *child,
6259 int cpu)
6260{
6261 struct sched_domain *sd = tl->init(tl, cpu);
6262 if (!sd)
6263 return child;
6264
6265 set_domain_attribute(sd, attr);
6266 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6267 if (child) {
6268 sd->level = child->level + 1;
6269 sched_domain_level_max = max(sched_domain_level_max, sd->level);
6270 child->parent = sd;
6271 }
6272 sd->child = child;
6273
6274 return sd;
6275}
6276
6277/*
6278 * Build sched domains for a given set of cpus and attach the sched domains
6279 * to the individual cpus
6280 */
6281static int build_sched_domains(const struct cpumask *cpu_map,
6282 struct sched_domain_attr *attr)
6283{
6284 enum s_alloc alloc_state = sa_none;
6285 struct sched_domain *sd;
6286 struct s_data d;
6287 int i, ret = -ENOMEM;
6288
6289 alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
6290 if (alloc_state != sa_rootdomain)
6291 goto error;
6292
6293 /* Set up domains for cpus specified by the cpu_map. */
6294 for_each_cpu(i, cpu_map) {
6295 struct sched_domain_topology_level *tl;
6296
6297 sd = NULL;
6298 for (tl = sched_domain_topology; tl->init; tl++) {
6299 sd = build_sched_domain(tl, &d, cpu_map, attr, sd, i);
6300 if (tl->flags & SDTL_OVERLAP || sched_feat(FORCE_SD_OVERLAP))
6301 sd->flags |= SD_OVERLAP;
6302 if (cpumask_equal(cpu_map, sched_domain_span(sd)))
6303 break;
6304 }
6305
6306 while (sd->child)
6307 sd = sd->child;
6308
6309 *per_cpu_ptr(d.sd, i) = sd;
6310 }
6311
6312 /* Build the groups for the domains */
6313 for_each_cpu(i, cpu_map) {
6314 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6315 sd->span_weight = cpumask_weight(sched_domain_span(sd));
6316 if (sd->flags & SD_OVERLAP) {
6317 if (build_overlap_sched_groups(sd, i))
6318 goto error;
6319 } else {
6320 if (build_sched_groups(sd, i))
6321 goto error;
6322 }
6323 }
6324 }
6325
6326 /* Calculate CPU power for physical packages and nodes */
6327 for (i = nr_cpumask_bits-1; i >= 0; i--) {
6328 if (!cpumask_test_cpu(i, cpu_map))
6329 continue;
6330
6331 for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) {
6332 claim_allocations(i, sd);
6333 init_sched_groups_power(i, sd);
6334 }
6335 }
6336
6337 /* Attach the domains */
6338 rcu_read_lock();
6339 for_each_cpu(i, cpu_map) {
6340 sd = *per_cpu_ptr(d.sd, i);
6341 cpu_attach_domain(sd, d.rd, i);
6342 }
6343 rcu_read_unlock();
6344
6345 ret = 0;
6346error:
6347 __free_domain_allocs(&d, alloc_state, cpu_map);
6348 return ret;
6349}
6350
6351static cpumask_var_t *doms_cur; /* current sched domains */
6352static int ndoms_cur; /* number of sched domains in 'doms_cur' */
6353static struct sched_domain_attr *dattr_cur;
6354 /* attribues of custom domains in 'doms_cur' */
6355
6356/*
6357 * Special case: If a kmalloc of a doms_cur partition (array of
6358 * cpumask) fails, then fallback to a single sched domain,
6359 * as determined by the single cpumask fallback_doms.
6360 */
6361static cpumask_var_t fallback_doms;
6362
6363/*
6364 * arch_update_cpu_topology lets virtualized architectures update the
6365 * cpu core maps. It is supposed to return 1 if the topology changed
6366 * or 0 if it stayed the same.
6367 */
6368int __attribute__((weak)) arch_update_cpu_topology(void)
6369{
6370 return 0;
6371}
6372
6373cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
6374{
6375 int i;
6376 cpumask_var_t *doms;
6377
6378 doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
6379 if (!doms)
6380 return NULL;
6381 for (i = 0; i < ndoms; i++) {
6382 if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
6383 free_sched_domains(doms, i);
6384 return NULL;
6385 }
6386 }
6387 return doms;
6388}
6389
6390void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
6391{
6392 unsigned int i;
6393 for (i = 0; i < ndoms; i++)
6394 free_cpumask_var(doms[i]);
6395 kfree(doms);
6396}
6397
6398/*
6399 * Set up scheduler domains and groups. Callers must hold the hotplug lock.
6400 * For now this just excludes isolated cpus, but could be used to
6401 * exclude other special cases in the future.
6402 */
6403static int init_sched_domains(const struct cpumask *cpu_map)
6404{
6405 int err;
6406
6407 arch_update_cpu_topology();
6408 ndoms_cur = 1;
6409 doms_cur = alloc_sched_domains(ndoms_cur);
6410 if (!doms_cur)
6411 doms_cur = &fallback_doms;
6412 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6413 dattr_cur = NULL;
6414 err = build_sched_domains(doms_cur[0], NULL);
6415 register_sched_domain_sysctl();
6416
6417 return err;
6418}
6419
6420/*
6421 * Detach sched domains from a group of cpus specified in cpu_map
6422 * These cpus will now be attached to the NULL domain
6423 */
6424static void detach_destroy_domains(const struct cpumask *cpu_map)
6425{
6426 int i;
6427
6428 rcu_read_lock();
6429 for_each_cpu(i, cpu_map)
6430 cpu_attach_domain(NULL, &def_root_domain, i);
6431 rcu_read_unlock();
6432}
6433
6434/* handle null as "default" */
6435static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
6436 struct sched_domain_attr *new, int idx_new)
6437{
6438 struct sched_domain_attr tmp;
6439
6440 /* fast path */
6441 if (!new && !cur)
6442 return 1;
6443
6444 tmp = SD_ATTR_INIT;
6445 return !memcmp(cur ? (cur + idx_cur) : &tmp,
6446 new ? (new + idx_new) : &tmp,
6447 sizeof(struct sched_domain_attr));
6448}
6449
6450/*
6451 * Partition sched domains as specified by the 'ndoms_new'
6452 * cpumasks in the array doms_new[] of cpumasks. This compares
6453 * doms_new[] to the current sched domain partitioning, doms_cur[].
6454 * It destroys each deleted domain and builds each new domain.
6455 *
6456 * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
6457 * The masks don't intersect (don't overlap.) We should setup one
6458 * sched domain for each mask. CPUs not in any of the cpumasks will
6459 * not be load balanced. If the same cpumask appears both in the
6460 * current 'doms_cur' domains and in the new 'doms_new', we can leave
6461 * it as it is.
6462 *
6463 * The passed in 'doms_new' should be allocated using
6464 * alloc_sched_domains. This routine takes ownership of it and will
6465 * free_sched_domains it when done with it. If the caller failed the
6466 * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
6467 * and partition_sched_domains() will fallback to the single partition
6468 * 'fallback_doms', it also forces the domains to be rebuilt.
6469 *
6470 * If doms_new == NULL it will be replaced with cpu_online_mask.
6471 * ndoms_new == 0 is a special case for destroying existing domains,
6472 * and it will not create the default domain.
6473 *
6474 * Call with hotplug lock held
6475 */
6476void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
6477 struct sched_domain_attr *dattr_new)
6478{
6479 int i, j, n;
6480 int new_topology;
6481
6482 mutex_lock(&sched_domains_mutex);
6483
6484 /* always unregister in case we don't destroy any domains */
6485 unregister_sched_domain_sysctl();
6486
6487 /* Let architecture update cpu core mappings. */
6488 new_topology = arch_update_cpu_topology();
6489
6490 n = doms_new ? ndoms_new : 0;
6491
6492 /* Destroy deleted domains */
6493 for (i = 0; i < ndoms_cur; i++) {
6494 for (j = 0; j < n && !new_topology; j++) {
6495 if (cpumask_equal(doms_cur[i], doms_new[j])
6496 && dattrs_equal(dattr_cur, i, dattr_new, j))
6497 goto match1;
6498 }
6499 /* no match - a current sched domain not in new doms_new[] */
6500 detach_destroy_domains(doms_cur[i]);
6501match1:
6502 ;
6503 }
6504
6505 if (doms_new == NULL) {
6506 ndoms_cur = 0;
6507 doms_new = &fallback_doms;
6508 cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
6509 WARN_ON_ONCE(dattr_new);
6510 }
6511
6512 /* Build new domains */
6513 for (i = 0; i < ndoms_new; i++) {
6514 for (j = 0; j < ndoms_cur && !new_topology; j++) {
6515 if (cpumask_equal(doms_new[i], doms_cur[j])
6516 && dattrs_equal(dattr_new, i, dattr_cur, j))
6517 goto match2;
6518 }
6519 /* no match - add a new doms_new */
6520 build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL);
6521match2:
6522 ;
6523 }
6524
6525 /* Remember the new sched domains */
6526 if (doms_cur != &fallback_doms)
6527 free_sched_domains(doms_cur, ndoms_cur);
6528 kfree(dattr_cur); /* kfree(NULL) is safe */
6529 doms_cur = doms_new;
6530 dattr_cur = dattr_new;
6531 ndoms_cur = ndoms_new;
6532
6533 register_sched_domain_sysctl();
6534
6535 mutex_unlock(&sched_domains_mutex);
6536}
6537
6538#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
6539static void reinit_sched_domains(void)
6540{
6541 get_online_cpus();
6542
6543 /* Destroy domains first to force the rebuild */
6544 partition_sched_domains(0, NULL, NULL);
6545
6546 rebuild_sched_domains();
6547 put_online_cpus();
6548}
6549
6550static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
6551{
6552 unsigned int level = 0;
6553
6554 if (sscanf(buf, "%u", &level) != 1)
6555 return -EINVAL;
6556
6557 /*
6558 * level is always be positive so don't check for
6559 * level < POWERSAVINGS_BALANCE_NONE which is 0
6560 * What happens on 0 or 1 byte write,
6561 * need to check for count as well?
6562 */
6563
6564 if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
6565 return -EINVAL;
6566
6567 if (smt)
6568 sched_smt_power_savings = level;
6569 else
6570 sched_mc_power_savings = level;
6571
6572 reinit_sched_domains();
6573
6574 return count;
6575}
6576
6577#ifdef CONFIG_SCHED_MC
6578static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
6579 struct sysdev_class_attribute *attr,
6580 char *page)
6581{
6582 return sprintf(page, "%u\n", sched_mc_power_savings);
6583}
6584static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
6585 struct sysdev_class_attribute *attr,
6586 const char *buf, size_t count)
6587{
6588 return sched_power_savings_store(buf, count, 0);
6589}
6590static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
6591 sched_mc_power_savings_show,
6592 sched_mc_power_savings_store);
6593#endif
6594
6595#ifdef CONFIG_SCHED_SMT
6596static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
6597 struct sysdev_class_attribute *attr,
6598 char *page)
6599{
6600 return sprintf(page, "%u\n", sched_smt_power_savings);
6601}
6602static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
6603 struct sysdev_class_attribute *attr,
6604 const char *buf, size_t count)
6605{
6606 return sched_power_savings_store(buf, count, 1);
6607}
6608static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
6609 sched_smt_power_savings_show,
6610 sched_smt_power_savings_store);
6611#endif
6612
6613int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
6614{
6615 int err = 0;
6616
6617#ifdef CONFIG_SCHED_SMT
6618 if (smt_capable())
6619 err = sysfs_create_file(&cls->kset.kobj,
6620 &attr_sched_smt_power_savings.attr);
6621#endif
6622#ifdef CONFIG_SCHED_MC
6623 if (!err && mc_capable())
6624 err = sysfs_create_file(&cls->kset.kobj,
6625 &attr_sched_mc_power_savings.attr);
6626#endif
6627 return err;
6628}
6629#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
6630
6631/*
6632 * Update cpusets according to cpu_active mask. If cpusets are
6633 * disabled, cpuset_update_active_cpus() becomes a simple wrapper
6634 * around partition_sched_domains().
6635 */
6636static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
6637 void *hcpu)
6638{
6639 switch (action & ~CPU_TASKS_FROZEN) {
6640 case CPU_ONLINE:
6641 case CPU_DOWN_FAILED:
6642 cpuset_update_active_cpus();
6643 return NOTIFY_OK;
6644 default:
6645 return NOTIFY_DONE;
6646 }
6647}
6648
6649static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
6650 void *hcpu)
6651{
6652 switch (action & ~CPU_TASKS_FROZEN) {
6653 case CPU_DOWN_PREPARE:
6654 cpuset_update_active_cpus();
6655 return NOTIFY_OK;
6656 default:
6657 return NOTIFY_DONE;
6658 }
6659}
6660
6661void __init sched_init_smp(void)
6662{
6663 cpumask_var_t non_isolated_cpus;
6664
6665 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6666 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6667
6668 get_online_cpus();
6669 mutex_lock(&sched_domains_mutex);
6670 init_sched_domains(cpu_active_mask);
6671 cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6672 if (cpumask_empty(non_isolated_cpus))
6673 cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6674 mutex_unlock(&sched_domains_mutex);
6675 put_online_cpus();
6676
6677 hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE);
6678 hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE);
6679
6680 /* RT runtime code needs to handle some hotplug events */
6681 hotcpu_notifier(update_runtime, 0);
6682
6683 init_hrtick();
6684
6685 /* Move init over to a non-isolated CPU */
6686 if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6687 BUG();
6688 sched_init_granularity();
6689 free_cpumask_var(non_isolated_cpus);
6690
6691 init_sched_rt_class();
6692}
6693#else
6694void __init sched_init_smp(void)
6695{
6696 sched_init_granularity();
6697}
6698#endif /* CONFIG_SMP */
6699
6700const_debug unsigned int sysctl_timer_migration = 1;
6701
6702int in_sched_functions(unsigned long addr)
6703{
6704 return in_lock_functions(addr) ||
6705 (addr >= (unsigned long)__sched_text_start
6706 && addr < (unsigned long)__sched_text_end);
6707}
6708
6709#ifdef CONFIG_CGROUP_SCHED
6710struct task_group root_task_group;
6711#endif
6712
6713DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
6714
6715void __init sched_init(void)
6716{
6717 int i, j;
6718 unsigned long alloc_size = 0, ptr;
6719
6720#ifdef CONFIG_FAIR_GROUP_SCHED
6721 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6722#endif
6723#ifdef CONFIG_RT_GROUP_SCHED
6724 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
6725#endif
6726#ifdef CONFIG_CPUMASK_OFFSTACK
6727 alloc_size += num_possible_cpus() * cpumask_size();
6728#endif
6729 if (alloc_size) {
6730 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
6731
6732#ifdef CONFIG_FAIR_GROUP_SCHED
6733 root_task_group.se = (struct sched_entity **)ptr;
6734 ptr += nr_cpu_ids * sizeof(void **);
6735
6736 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
6737 ptr += nr_cpu_ids * sizeof(void **);
6738
6739#endif /* CONFIG_FAIR_GROUP_SCHED */
6740#ifdef CONFIG_RT_GROUP_SCHED
6741 root_task_group.rt_se = (struct sched_rt_entity **)ptr;
6742 ptr += nr_cpu_ids * sizeof(void **);
6743
6744 root_task_group.rt_rq = (struct rt_rq **)ptr;
6745 ptr += nr_cpu_ids * sizeof(void **);
6746
6747#endif /* CONFIG_RT_GROUP_SCHED */
6748#ifdef CONFIG_CPUMASK_OFFSTACK
6749 for_each_possible_cpu(i) {
6750 per_cpu(load_balance_tmpmask, i) = (void *)ptr;
6751 ptr += cpumask_size();
6752 }
6753#endif /* CONFIG_CPUMASK_OFFSTACK */
6754 }
6755
6756#ifdef CONFIG_SMP
6757 init_defrootdomain();
6758#endif
6759
6760 init_rt_bandwidth(&def_rt_bandwidth,
6761 global_rt_period(), global_rt_runtime());
6762
6763#ifdef CONFIG_RT_GROUP_SCHED
6764 init_rt_bandwidth(&root_task_group.rt_bandwidth,
6765 global_rt_period(), global_rt_runtime());
6766#endif /* CONFIG_RT_GROUP_SCHED */
6767
6768#ifdef CONFIG_CGROUP_SCHED
6769 list_add(&root_task_group.list, &task_groups);
6770 INIT_LIST_HEAD(&root_task_group.children);
6771 INIT_LIST_HEAD(&root_task_group.siblings);
6772 autogroup_init(&init_task);
6773#endif /* CONFIG_CGROUP_SCHED */
6774
6775 for_each_possible_cpu(i) {
6776 struct rq *rq;
6777
6778 rq = cpu_rq(i);
6779 raw_spin_lock_init(&rq->lock);
6780 rq->nr_running = 0;
6781 rq->calc_load_active = 0;
6782 rq->calc_load_update = jiffies + LOAD_FREQ;
6783 init_cfs_rq(&rq->cfs);
6784 init_rt_rq(&rq->rt, rq);
6785#ifdef CONFIG_FAIR_GROUP_SCHED
6786 root_task_group.shares = ROOT_TASK_GROUP_LOAD;
6787 INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
6788 /*
6789 * How much cpu bandwidth does root_task_group get?
6790 *
6791 * In case of task-groups formed thr' the cgroup filesystem, it
6792 * gets 100% of the cpu resources in the system. This overall
6793 * system cpu resource is divided among the tasks of
6794 * root_task_group and its child task-groups in a fair manner,
6795 * based on each entity's (task or task-group's) weight
6796 * (se->load.weight).
6797 *
6798 * In other words, if root_task_group has 10 tasks of weight
6799 * 1024) and two child groups A0 and A1 (of weight 1024 each),
6800 * then A0's share of the cpu resource is:
6801 *
6802 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
6803 *
6804 * We achieve this by letting root_task_group's tasks sit
6805 * directly in rq->cfs (i.e root_task_group->se[] = NULL).
6806 */
6807 init_cfs_bandwidth(&root_task_group.cfs_bandwidth);
6808 init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
6809#endif /* CONFIG_FAIR_GROUP_SCHED */
6810
6811 rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
6812#ifdef CONFIG_RT_GROUP_SCHED
6813 INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
6814 init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
6815#endif
6816
6817 for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
6818 rq->cpu_load[j] = 0;
6819
6820 rq->last_load_update_tick = jiffies;
6821
6822#ifdef CONFIG_SMP
6823 rq->sd = NULL;
6824 rq->rd = NULL;
6825 rq->cpu_power = SCHED_POWER_SCALE;
6826 rq->post_schedule = 0;
6827 rq->active_balance = 0;
6828 rq->next_balance = jiffies;
6829 rq->push_cpu = 0;
6830 rq->cpu = i;
6831 rq->online = 0;
6832 rq->idle_stamp = 0;
6833 rq->avg_idle = 2*sysctl_sched_migration_cost;
6834 rq_attach_root(rq, &def_root_domain);
6835#ifdef CONFIG_NO_HZ
6836 rq->nohz_balance_kick = 0;
6837#endif
6838#endif
6839 init_rq_hrtick(rq);
6840 atomic_set(&rq->nr_iowait, 0);
6841 }
6842
6843 set_load_weight(&init_task);
6844
6845#ifdef CONFIG_PREEMPT_NOTIFIERS
6846 INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6847#endif
6848
6849#ifdef CONFIG_RT_MUTEXES
6850 plist_head_init(&init_task.pi_waiters);
6851#endif
6852
6853 /*
6854 * The boot idle thread does lazy MMU switching as well:
6855 */
6856 atomic_inc(&init_mm.mm_count);
6857 enter_lazy_tlb(&init_mm, current);
6858
6859 /*
6860 * Make us the idle thread. Technically, schedule() should not be
6861 * called from this thread, however somewhere below it might be,
6862 * but because we are the idle thread, we just pick up running again
6863 * when this runqueue becomes "idle".
6864 */
6865 init_idle(current, smp_processor_id());
6866
6867 calc_load_update = jiffies + LOAD_FREQ;
6868
6869 /*
6870 * During early bootup we pretend to be a normal task:
6871 */
6872 current->sched_class = &fair_sched_class;
6873
6874#ifdef CONFIG_SMP
6875 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
6876 /* May be allocated at isolcpus cmdline parse time */
6877 if (cpu_isolated_map == NULL)
6878 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
6879#endif
6880 init_sched_fair_class();
6881
6882 scheduler_running = 1;
6883}
6884
6885#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
6886static inline int preempt_count_equals(int preempt_offset)
6887{
6888 int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
6889
6890 return (nested == preempt_offset);
6891}
6892
6893void __might_sleep(const char *file, int line, int preempt_offset)
6894{
6895 static unsigned long prev_jiffy; /* ratelimiting */
6896
6897 rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
6898 if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
6899 system_state != SYSTEM_RUNNING || oops_in_progress)
6900 return;
6901 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6902 return;
6903 prev_jiffy = jiffies;
6904
6905 printk(KERN_ERR
6906 "BUG: sleeping function called from invalid context at %s:%d\n",
6907 file, line);
6908 printk(KERN_ERR
6909 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
6910 in_atomic(), irqs_disabled(),
6911 current->pid, current->comm);
6912
6913 debug_show_held_locks(current);
6914 if (irqs_disabled())
6915 print_irqtrace_events(current);
6916 dump_stack();
6917}
6918EXPORT_SYMBOL(__might_sleep);
6919#endif
6920
6921#ifdef CONFIG_MAGIC_SYSRQ
6922static void normalize_task(struct rq *rq, struct task_struct *p)
6923{
6924 const struct sched_class *prev_class = p->sched_class;
6925 int old_prio = p->prio;
6926 int on_rq;
6927
6928 on_rq = p->on_rq;
6929 if (on_rq)
6930 deactivate_task(rq, p, 0);
6931 __setscheduler(rq, p, SCHED_NORMAL, 0);
6932 if (on_rq) {
6933 activate_task(rq, p, 0);
6934 resched_task(rq->curr);
6935 }
6936
6937 check_class_changed(rq, p, prev_class, old_prio);
6938}
6939
6940void normalize_rt_tasks(void)
6941{
6942 struct task_struct *g, *p;
6943 unsigned long flags;
6944 struct rq *rq;
6945
6946 read_lock_irqsave(&tasklist_lock, flags);
6947 do_each_thread(g, p) {
6948 /*
6949 * Only normalize user tasks:
6950 */
6951 if (!p->mm)
6952 continue;
6953
6954 p->se.exec_start = 0;
6955#ifdef CONFIG_SCHEDSTATS
6956 p->se.statistics.wait_start = 0;
6957 p->se.statistics.sleep_start = 0;
6958 p->se.statistics.block_start = 0;
6959#endif
6960
6961 if (!rt_task(p)) {
6962 /*
6963 * Renice negative nice level userspace
6964 * tasks back to 0:
6965 */
6966 if (TASK_NICE(p) < 0 && p->mm)
6967 set_user_nice(p, 0);
6968 continue;
6969 }
6970
6971 raw_spin_lock(&p->pi_lock);
6972 rq = __task_rq_lock(p);
6973
6974 normalize_task(rq, p);
6975
6976 __task_rq_unlock(rq);
6977 raw_spin_unlock(&p->pi_lock);
6978 } while_each_thread(g, p);
6979
6980 read_unlock_irqrestore(&tasklist_lock, flags);
6981}
6982
6983#endif /* CONFIG_MAGIC_SYSRQ */
6984
6985#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
6986/*
6987 * These functions are only useful for the IA64 MCA handling, or kdb.
6988 *
6989 * They can only be called when the whole system has been
6990 * stopped - every CPU needs to be quiescent, and no scheduling
6991 * activity can take place. Using them for anything else would
6992 * be a serious bug, and as a result, they aren't even visible
6993 * under any other configuration.
6994 */
6995
6996/**
6997 * curr_task - return the current task for a given cpu.
6998 * @cpu: the processor in question.
6999 *
7000 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7001 */
7002struct task_struct *curr_task(int cpu)
7003{
7004 return cpu_curr(cpu);
7005}
7006
7007#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
7008
7009#ifdef CONFIG_IA64
7010/**
7011 * set_curr_task - set the current task for a given cpu.
7012 * @cpu: the processor in question.
7013 * @p: the task pointer to set.
7014 *
7015 * Description: This function must only be used when non-maskable interrupts
7016 * are serviced on a separate stack. It allows the architecture to switch the
7017 * notion of the current task on a cpu in a non-blocking manner. This function
7018 * must be called with all CPU's synchronized, and interrupts disabled, the
7019 * and caller must save the original value of the current task (see
7020 * curr_task() above) and restore that value before reenabling interrupts and
7021 * re-starting the system.
7022 *
7023 * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
7024 */
7025void set_curr_task(int cpu, struct task_struct *p)
7026{
7027 cpu_curr(cpu) = p;
7028}
7029
7030#endif
7031
7032#ifdef CONFIG_RT_GROUP_SCHED
7033#else /* !CONFIG_RT_GROUP_SCHED */
7034#endif /* CONFIG_RT_GROUP_SCHED */
7035
7036#ifdef CONFIG_CGROUP_SCHED
7037/* task_group_lock serializes the addition/removal of task groups */
7038static DEFINE_SPINLOCK(task_group_lock);
7039
7040static void free_sched_group(struct task_group *tg)
7041{
7042 free_fair_sched_group(tg);
7043 free_rt_sched_group(tg);
7044 autogroup_free(tg);
7045 kfree(tg);
7046}
7047
7048/* allocate runqueue etc for a new task group */
7049struct task_group *sched_create_group(struct task_group *parent)
7050{
7051 struct task_group *tg;
7052 unsigned long flags;
7053
7054 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
7055 if (!tg)
7056 return ERR_PTR(-ENOMEM);
7057
7058 if (!alloc_fair_sched_group(tg, parent))
7059 goto err;
7060
7061 if (!alloc_rt_sched_group(tg, parent))
7062 goto err;
7063
7064 spin_lock_irqsave(&task_group_lock, flags);
7065 list_add_rcu(&tg->list, &task_groups);
7066
7067 WARN_ON(!parent); /* root should already exist */
7068
7069 tg->parent = parent;
7070 INIT_LIST_HEAD(&tg->children);
7071 list_add_rcu(&tg->siblings, &parent->children);
7072 spin_unlock_irqrestore(&task_group_lock, flags);
7073
7074 return tg;
7075
7076err:
7077 free_sched_group(tg);
7078 return ERR_PTR(-ENOMEM);
7079}
7080
7081/* rcu callback to free various structures associated with a task group */
7082static void free_sched_group_rcu(struct rcu_head *rhp)
7083{
7084 /* now it should be safe to free those cfs_rqs */
7085 free_sched_group(container_of(rhp, struct task_group, rcu));
7086}
7087
7088/* Destroy runqueue etc associated with a task group */
7089void sched_destroy_group(struct task_group *tg)
7090{
7091 unsigned long flags;
7092 int i;
7093
7094 /* end participation in shares distribution */
7095 for_each_possible_cpu(i)
7096 unregister_fair_sched_group(tg, i);
7097
7098 spin_lock_irqsave(&task_group_lock, flags);
7099 list_del_rcu(&tg->list);
7100 list_del_rcu(&tg->siblings);
7101 spin_unlock_irqrestore(&task_group_lock, flags);
7102
7103 /* wait for possible concurrent references to cfs_rqs complete */
7104 call_rcu(&tg->rcu, free_sched_group_rcu);
7105}
7106
7107/* change task's runqueue when it moves between groups.
7108 * The caller of this function should have put the task in its new group
7109 * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
7110 * reflect its new group.
7111 */
7112void sched_move_task(struct task_struct *tsk)
7113{
7114 int on_rq, running;
7115 unsigned long flags;
7116 struct rq *rq;
7117
7118 rq = task_rq_lock(tsk, &flags);
7119
7120 running = task_current(rq, tsk);
7121 on_rq = tsk->on_rq;
7122
7123 if (on_rq)
7124 dequeue_task(rq, tsk, 0);
7125 if (unlikely(running))
7126 tsk->sched_class->put_prev_task(rq, tsk);
7127
7128#ifdef CONFIG_FAIR_GROUP_SCHED
7129 if (tsk->sched_class->task_move_group)
7130 tsk->sched_class->task_move_group(tsk, on_rq);
7131 else
7132#endif
7133 set_task_rq(tsk, task_cpu(tsk));
7134
7135 if (unlikely(running))
7136 tsk->sched_class->set_curr_task(rq);
7137 if (on_rq)
7138 enqueue_task(rq, tsk, 0);
7139
7140 task_rq_unlock(rq, tsk, &flags);
7141}
7142#endif /* CONFIG_CGROUP_SCHED */
7143
7144#ifdef CONFIG_FAIR_GROUP_SCHED
7145#endif
7146
7147#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
7148static unsigned long to_ratio(u64 period, u64 runtime)
7149{
7150 if (runtime == RUNTIME_INF)
7151 return 1ULL << 20;
7152
7153 return div64_u64(runtime << 20, period);
7154}
7155#endif
7156
7157#ifdef CONFIG_RT_GROUP_SCHED
7158/*
7159 * Ensure that the real time constraints are schedulable.
7160 */
7161static DEFINE_MUTEX(rt_constraints_mutex);
7162
7163/* Must be called with tasklist_lock held */
7164static inline int tg_has_rt_tasks(struct task_group *tg)
7165{
7166 struct task_struct *g, *p;
7167
7168 do_each_thread(g, p) {
7169 if (rt_task(p) && task_rq(p)->rt.tg == tg)
7170 return 1;
7171 } while_each_thread(g, p);
7172
7173 return 0;
7174}
7175
7176struct rt_schedulable_data {
7177 struct task_group *tg;
7178 u64 rt_period;
7179 u64 rt_runtime;
7180};
7181
7182static int tg_rt_schedulable(struct task_group *tg, void *data)
7183{
7184 struct rt_schedulable_data *d = data;
7185 struct task_group *child;
7186 unsigned long total, sum = 0;
7187 u64 period, runtime;
7188
7189 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7190 runtime = tg->rt_bandwidth.rt_runtime;
7191
7192 if (tg == d->tg) {
7193 period = d->rt_period;
7194 runtime = d->rt_runtime;
7195 }
7196
7197 /*
7198 * Cannot have more runtime than the period.
7199 */
7200 if (runtime > period && runtime != RUNTIME_INF)
7201 return -EINVAL;
7202
7203 /*
7204 * Ensure we don't starve existing RT tasks.
7205 */
7206 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
7207 return -EBUSY;
7208
7209 total = to_ratio(period, runtime);
7210
7211 /*
7212 * Nobody can have more than the global setting allows.
7213 */
7214 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
7215 return -EINVAL;
7216
7217 /*
7218 * The sum of our children's runtime should not exceed our own.
7219 */
7220 list_for_each_entry_rcu(child, &tg->children, siblings) {
7221 period = ktime_to_ns(child->rt_bandwidth.rt_period);
7222 runtime = child->rt_bandwidth.rt_runtime;
7223
7224 if (child == d->tg) {
7225 period = d->rt_period;
7226 runtime = d->rt_runtime;
7227 }
7228
7229 sum += to_ratio(period, runtime);
7230 }
7231
7232 if (sum > total)
7233 return -EINVAL;
7234
7235 return 0;
7236}
7237
7238static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
7239{
7240 int ret;
7241
7242 struct rt_schedulable_data data = {
7243 .tg = tg,
7244 .rt_period = period,
7245 .rt_runtime = runtime,
7246 };
7247
7248 rcu_read_lock();
7249 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
7250 rcu_read_unlock();
7251
7252 return ret;
7253}
7254
7255static int tg_set_rt_bandwidth(struct task_group *tg,
7256 u64 rt_period, u64 rt_runtime)
7257{
7258 int i, err = 0;
7259
7260 mutex_lock(&rt_constraints_mutex);
7261 read_lock(&tasklist_lock);
7262 err = __rt_schedulable(tg, rt_period, rt_runtime);
7263 if (err)
7264 goto unlock;
7265
7266 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7267 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
7268 tg->rt_bandwidth.rt_runtime = rt_runtime;
7269
7270 for_each_possible_cpu(i) {
7271 struct rt_rq *rt_rq = tg->rt_rq[i];
7272
7273 raw_spin_lock(&rt_rq->rt_runtime_lock);
7274 rt_rq->rt_runtime = rt_runtime;
7275 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7276 }
7277 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
7278unlock:
7279 read_unlock(&tasklist_lock);
7280 mutex_unlock(&rt_constraints_mutex);
7281
7282 return err;
7283}
7284
7285int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
7286{
7287 u64 rt_runtime, rt_period;
7288
7289 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
7290 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
7291 if (rt_runtime_us < 0)
7292 rt_runtime = RUNTIME_INF;
7293
7294 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7295}
7296
7297long sched_group_rt_runtime(struct task_group *tg)
7298{
7299 u64 rt_runtime_us;
7300
7301 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
7302 return -1;
7303
7304 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
7305 do_div(rt_runtime_us, NSEC_PER_USEC);
7306 return rt_runtime_us;
7307}
7308
7309int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7310{
7311 u64 rt_runtime, rt_period;
7312
7313 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7314 rt_runtime = tg->rt_bandwidth.rt_runtime;
7315
7316 if (rt_period == 0)
7317 return -EINVAL;
7318
7319 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7320}
7321
7322long sched_group_rt_period(struct task_group *tg)
7323{
7324 u64 rt_period_us;
7325
7326 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
7327 do_div(rt_period_us, NSEC_PER_USEC);
7328 return rt_period_us;
7329}
7330
7331static int sched_rt_global_constraints(void)
7332{
7333 u64 runtime, period;
7334 int ret = 0;
7335
7336 if (sysctl_sched_rt_period <= 0)
7337 return -EINVAL;
7338
7339 runtime = global_rt_runtime();
7340 period = global_rt_period();
7341
7342 /*
7343 * Sanity check on the sysctl variables.
7344 */
7345 if (runtime > period && runtime != RUNTIME_INF)
7346 return -EINVAL;
7347
7348 mutex_lock(&rt_constraints_mutex);
7349 read_lock(&tasklist_lock);
7350 ret = __rt_schedulable(NULL, 0, 0);
7351 read_unlock(&tasklist_lock);
7352 mutex_unlock(&rt_constraints_mutex);
7353
7354 return ret;
7355}
7356
7357int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
7358{
7359 /* Don't accept realtime tasks when there is no way for them to run */
7360 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
7361 return 0;
7362
7363 return 1;
7364}
7365
7366#else /* !CONFIG_RT_GROUP_SCHED */
7367static int sched_rt_global_constraints(void)
7368{
7369 unsigned long flags;
7370 int i;
7371
7372 if (sysctl_sched_rt_period <= 0)
7373 return -EINVAL;
7374
7375 /*
7376 * There's always some RT tasks in the root group
7377 * -- migration, kstopmachine etc..
7378 */
7379 if (sysctl_sched_rt_runtime == 0)
7380 return -EBUSY;
7381
7382 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
7383 for_each_possible_cpu(i) {
7384 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
7385
7386 raw_spin_lock(&rt_rq->rt_runtime_lock);
7387 rt_rq->rt_runtime = global_rt_runtime();
7388 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7389 }
7390 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
7391
7392 return 0;
7393}
7394#endif /* CONFIG_RT_GROUP_SCHED */
7395
7396int sched_rt_handler(struct ctl_table *table, int write,
7397 void __user *buffer, size_t *lenp,
7398 loff_t *ppos)
7399{
7400 int ret;
7401 int old_period, old_runtime;
7402 static DEFINE_MUTEX(mutex);
7403
7404 mutex_lock(&mutex);
7405 old_period = sysctl_sched_rt_period;
7406 old_runtime = sysctl_sched_rt_runtime;
7407
7408 ret = proc_dointvec(table, write, buffer, lenp, ppos);
7409
7410 if (!ret && write) {
7411 ret = sched_rt_global_constraints();
7412 if (ret) {
7413 sysctl_sched_rt_period = old_period;
7414 sysctl_sched_rt_runtime = old_runtime;
7415 } else {
7416 def_rt_bandwidth.rt_runtime = global_rt_runtime();
7417 def_rt_bandwidth.rt_period =
7418 ns_to_ktime(global_rt_period());
7419 }
7420 }
7421 mutex_unlock(&mutex);
7422
7423 return ret;
7424}
7425
7426#ifdef CONFIG_CGROUP_SCHED
7427
7428/* return corresponding task_group object of a cgroup */
7429static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
7430{
7431 return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
7432 struct task_group, css);
7433}
7434
7435static struct cgroup_subsys_state *
7436cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
7437{
7438 struct task_group *tg, *parent;
7439
7440 if (!cgrp->parent) {
7441 /* This is early initialization for the top cgroup */
7442 return &root_task_group.css;
7443 }
7444
7445 parent = cgroup_tg(cgrp->parent);
7446 tg = sched_create_group(parent);
7447 if (IS_ERR(tg))
7448 return ERR_PTR(-ENOMEM);
7449
7450 return &tg->css;
7451}
7452
7453static void
7454cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
7455{
7456 struct task_group *tg = cgroup_tg(cgrp);
7457
7458 sched_destroy_group(tg);
7459}
7460
7461static int
7462cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
7463{
7464#ifdef CONFIG_RT_GROUP_SCHED
7465 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
7466 return -EINVAL;
7467#else
7468 /* We don't support RT-tasks being in separate groups */
7469 if (tsk->sched_class != &fair_sched_class)
7470 return -EINVAL;
7471#endif
7472 return 0;
7473}
7474
7475static void
7476cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
7477{
7478 sched_move_task(tsk);
7479}
7480
7481static void
7482cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7483 struct cgroup *old_cgrp, struct task_struct *task)
7484{
7485 /*
7486 * cgroup_exit() is called in the copy_process() failure path.
7487 * Ignore this case since the task hasn't ran yet, this avoids
7488 * trying to poke a half freed task state from generic code.
7489 */
7490 if (!(task->flags & PF_EXITING))
7491 return;
7492
7493 sched_move_task(task);
7494}
7495
7496#ifdef CONFIG_FAIR_GROUP_SCHED
7497static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7498 u64 shareval)
7499{
7500 return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
7501}
7502
7503static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
7504{
7505 struct task_group *tg = cgroup_tg(cgrp);
7506
7507 return (u64) scale_load_down(tg->shares);
7508}
7509
7510#ifdef CONFIG_CFS_BANDWIDTH
7511static DEFINE_MUTEX(cfs_constraints_mutex);
7512
7513const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */
7514const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */
7515
7516static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime);
7517
7518static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
7519{
7520 int i, ret = 0, runtime_enabled, runtime_was_enabled;
7521 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7522
7523 if (tg == &root_task_group)
7524 return -EINVAL;
7525
7526 /*
7527 * Ensure we have at some amount of bandwidth every period. This is
7528 * to prevent reaching a state of large arrears when throttled via
7529 * entity_tick() resulting in prolonged exit starvation.
7530 */
7531 if (quota < min_cfs_quota_period || period < min_cfs_quota_period)
7532 return -EINVAL;
7533
7534 /*
7535 * Likewise, bound things on the otherside by preventing insane quota
7536 * periods. This also allows us to normalize in computing quota
7537 * feasibility.
7538 */
7539 if (period > max_cfs_quota_period)
7540 return -EINVAL;
7541
7542 mutex_lock(&cfs_constraints_mutex);
7543 ret = __cfs_schedulable(tg, period, quota);
7544 if (ret)
7545 goto out_unlock;
7546
7547 runtime_enabled = quota != RUNTIME_INF;
7548 runtime_was_enabled = cfs_b->quota != RUNTIME_INF;
7549 account_cfs_bandwidth_used(runtime_enabled, runtime_was_enabled);
7550 raw_spin_lock_irq(&cfs_b->lock);
7551 cfs_b->period = ns_to_ktime(period);
7552 cfs_b->quota = quota;
7553
7554 __refill_cfs_bandwidth_runtime(cfs_b);
7555 /* restart the period timer (if active) to handle new period expiry */
7556 if (runtime_enabled && cfs_b->timer_active) {
7557 /* force a reprogram */
7558 cfs_b->timer_active = 0;
7559 __start_cfs_bandwidth(cfs_b);
7560 }
7561 raw_spin_unlock_irq(&cfs_b->lock);
7562
7563 for_each_possible_cpu(i) {
7564 struct cfs_rq *cfs_rq = tg->cfs_rq[i];
7565 struct rq *rq = cfs_rq->rq;
7566
7567 raw_spin_lock_irq(&rq->lock);
7568 cfs_rq->runtime_enabled = runtime_enabled;
7569 cfs_rq->runtime_remaining = 0;
7570
7571 if (cfs_rq->throttled)
7572 unthrottle_cfs_rq(cfs_rq);
7573 raw_spin_unlock_irq(&rq->lock);
7574 }
7575out_unlock:
7576 mutex_unlock(&cfs_constraints_mutex);
7577
7578 return ret;
7579}
7580
7581int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
7582{
7583 u64 quota, period;
7584
7585 period = ktime_to_ns(tg->cfs_bandwidth.period);
7586 if (cfs_quota_us < 0)
7587 quota = RUNTIME_INF;
7588 else
7589 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
7590
7591 return tg_set_cfs_bandwidth(tg, period, quota);
7592}
7593
7594long tg_get_cfs_quota(struct task_group *tg)
7595{
7596 u64 quota_us;
7597
7598 if (tg->cfs_bandwidth.quota == RUNTIME_INF)
7599 return -1;
7600
7601 quota_us = tg->cfs_bandwidth.quota;
7602 do_div(quota_us, NSEC_PER_USEC);
7603
7604 return quota_us;
7605}
7606
7607int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
7608{
7609 u64 quota, period;
7610
7611 period = (u64)cfs_period_us * NSEC_PER_USEC;
7612 quota = tg->cfs_bandwidth.quota;
7613
7614 if (period <= 0)
7615 return -EINVAL;
7616
7617 return tg_set_cfs_bandwidth(tg, period, quota);
7618}
7619
7620long tg_get_cfs_period(struct task_group *tg)
7621{
7622 u64 cfs_period_us;
7623
7624 cfs_period_us = ktime_to_ns(tg->cfs_bandwidth.period);
7625 do_div(cfs_period_us, NSEC_PER_USEC);
7626
7627 return cfs_period_us;
7628}
7629
7630static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
7631{
7632 return tg_get_cfs_quota(cgroup_tg(cgrp));
7633}
7634
7635static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
7636 s64 cfs_quota_us)
7637{
7638 return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
7639}
7640
7641static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
7642{
7643 return tg_get_cfs_period(cgroup_tg(cgrp));
7644}
7645
7646static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
7647 u64 cfs_period_us)
7648{
7649 return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
7650}
7651
7652struct cfs_schedulable_data {
7653 struct task_group *tg;
7654 u64 period, quota;
7655};
7656
7657/*
7658 * normalize group quota/period to be quota/max_period
7659 * note: units are usecs
7660 */
7661static u64 normalize_cfs_quota(struct task_group *tg,
7662 struct cfs_schedulable_data *d)
7663{
7664 u64 quota, period;
7665
7666 if (tg == d->tg) {
7667 period = d->period;
7668 quota = d->quota;
7669 } else {
7670 period = tg_get_cfs_period(tg);
7671 quota = tg_get_cfs_quota(tg);
7672 }
7673
7674 /* note: these should typically be equivalent */
7675 if (quota == RUNTIME_INF || quota == -1)
7676 return RUNTIME_INF;
7677
7678 return to_ratio(period, quota);
7679}
7680
7681static int tg_cfs_schedulable_down(struct task_group *tg, void *data)
7682{
7683 struct cfs_schedulable_data *d = data;
7684 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7685 s64 quota = 0, parent_quota = -1;
7686
7687 if (!tg->parent) {
7688 quota = RUNTIME_INF;
7689 } else {
7690 struct cfs_bandwidth *parent_b = &tg->parent->cfs_bandwidth;
7691
7692 quota = normalize_cfs_quota(tg, d);
7693 parent_quota = parent_b->hierarchal_quota;
7694
7695 /*
7696 * ensure max(child_quota) <= parent_quota, inherit when no
7697 * limit is set
7698 */
7699 if (quota == RUNTIME_INF)
7700 quota = parent_quota;
7701 else if (parent_quota != RUNTIME_INF && quota > parent_quota)
7702 return -EINVAL;
7703 }
7704 cfs_b->hierarchal_quota = quota;
7705
7706 return 0;
7707}
7708
7709static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
7710{
7711 int ret;
7712 struct cfs_schedulable_data data = {
7713 .tg = tg,
7714 .period = period,
7715 .quota = quota,
7716 };
7717
7718 if (quota != RUNTIME_INF) {
7719 do_div(data.period, NSEC_PER_USEC);
7720 do_div(data.quota, NSEC_PER_USEC);
7721 }
7722
7723 rcu_read_lock();
7724 ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data);
7725 rcu_read_unlock();
7726
7727 return ret;
7728}
7729
7730static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
7731 struct cgroup_map_cb *cb)
7732{
7733 struct task_group *tg = cgroup_tg(cgrp);
7734 struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
7735
7736 cb->fill(cb, "nr_periods", cfs_b->nr_periods);
7737 cb->fill(cb, "nr_throttled", cfs_b->nr_throttled);
7738 cb->fill(cb, "throttled_time", cfs_b->throttled_time);
7739
7740 return 0;
7741}
7742#endif /* CONFIG_CFS_BANDWIDTH */
7743#endif /* CONFIG_FAIR_GROUP_SCHED */
7744
7745#ifdef CONFIG_RT_GROUP_SCHED
7746static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
7747 s64 val)
7748{
7749 return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
7750}
7751
7752static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
7753{
7754 return sched_group_rt_runtime(cgroup_tg(cgrp));
7755}
7756
7757static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
7758 u64 rt_period_us)
7759{
7760 return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
7761}
7762
7763static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
7764{
7765 return sched_group_rt_period(cgroup_tg(cgrp));
7766}
7767#endif /* CONFIG_RT_GROUP_SCHED */
7768
7769static struct cftype cpu_files[] = {
7770#ifdef CONFIG_FAIR_GROUP_SCHED
7771 {
7772 .name = "shares",
7773 .read_u64 = cpu_shares_read_u64,
7774 .write_u64 = cpu_shares_write_u64,
7775 },
7776#endif
7777#ifdef CONFIG_CFS_BANDWIDTH
7778 {
7779 .name = "cfs_quota_us",
7780 .read_s64 = cpu_cfs_quota_read_s64,
7781 .write_s64 = cpu_cfs_quota_write_s64,
7782 },
7783 {
7784 .name = "cfs_period_us",
7785 .read_u64 = cpu_cfs_period_read_u64,
7786 .write_u64 = cpu_cfs_period_write_u64,
7787 },
7788 {
7789 .name = "stat",
7790 .read_map = cpu_stats_show,
7791 },
7792#endif
7793#ifdef CONFIG_RT_GROUP_SCHED
7794 {
7795 .name = "rt_runtime_us",
7796 .read_s64 = cpu_rt_runtime_read,
7797 .write_s64 = cpu_rt_runtime_write,
7798 },
7799 {
7800 .name = "rt_period_us",
7801 .read_u64 = cpu_rt_period_read_uint,
7802 .write_u64 = cpu_rt_period_write_uint,
7803 },
7804#endif
7805};
7806
7807static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
7808{
7809 return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
7810}
7811
7812struct cgroup_subsys cpu_cgroup_subsys = {
7813 .name = "cpu",
7814 .create = cpu_cgroup_create,
7815 .destroy = cpu_cgroup_destroy,
7816 .can_attach_task = cpu_cgroup_can_attach_task,
7817 .attach_task = cpu_cgroup_attach_task,
7818 .exit = cpu_cgroup_exit,
7819 .populate = cpu_cgroup_populate,
7820 .subsys_id = cpu_cgroup_subsys_id,
7821 .early_init = 1,
7822};
7823
7824#endif /* CONFIG_CGROUP_SCHED */
7825
7826#ifdef CONFIG_CGROUP_CPUACCT
7827
7828/*
7829 * CPU accounting code for task groups.
7830 *
7831 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
7832 * (balbir@in.ibm.com).
7833 */
7834
7835/* track cpu usage of a group of tasks and its child groups */
7836struct cpuacct {
7837 struct cgroup_subsys_state css;
7838 /* cpuusage holds pointer to a u64-type object on every cpu */
7839 u64 __percpu *cpuusage;
7840 struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
7841 struct cpuacct *parent;
7842};
7843
7844struct cgroup_subsys cpuacct_subsys;
7845
7846/* return cpu accounting group corresponding to this container */
7847static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
7848{
7849 return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
7850 struct cpuacct, css);
7851}
7852
7853/* return cpu accounting group to which this task belongs */
7854static inline struct cpuacct *task_ca(struct task_struct *tsk)
7855{
7856 return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
7857 struct cpuacct, css);
7858}
7859
7860/* create a new cpu accounting group */
7861static struct cgroup_subsys_state *cpuacct_create(
7862 struct cgroup_subsys *ss, struct cgroup *cgrp)
7863{
7864 struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
7865 int i;
7866
7867 if (!ca)
7868 goto out;
7869
7870 ca->cpuusage = alloc_percpu(u64);
7871 if (!ca->cpuusage)
7872 goto out_free_ca;
7873
7874 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
7875 if (percpu_counter_init(&ca->cpustat[i], 0))
7876 goto out_free_counters;
7877
7878 if (cgrp->parent)
7879 ca->parent = cgroup_ca(cgrp->parent);
7880
7881 return &ca->css;
7882
7883out_free_counters:
7884 while (--i >= 0)
7885 percpu_counter_destroy(&ca->cpustat[i]);
7886 free_percpu(ca->cpuusage);
7887out_free_ca:
7888 kfree(ca);
7889out:
7890 return ERR_PTR(-ENOMEM);
7891}
7892
7893/* destroy an existing cpu accounting group */
7894static void
7895cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
7896{
7897 struct cpuacct *ca = cgroup_ca(cgrp);
7898 int i;
7899
7900 for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
7901 percpu_counter_destroy(&ca->cpustat[i]);
7902 free_percpu(ca->cpuusage);
7903 kfree(ca);
7904}
7905
7906static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
7907{
7908 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
7909 u64 data;
7910
7911#ifndef CONFIG_64BIT
7912 /*
7913 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
7914 */
7915 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
7916 data = *cpuusage;
7917 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
7918#else
7919 data = *cpuusage;
7920#endif
7921
7922 return data;
7923}
7924
7925static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
7926{
7927 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
7928
7929#ifndef CONFIG_64BIT
7930 /*
7931 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
7932 */
7933 raw_spin_lock_irq(&cpu_rq(cpu)->lock);
7934 *cpuusage = val;
7935 raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
7936#else
7937 *cpuusage = val;
7938#endif
7939}
7940
7941/* return total cpu usage (in nanoseconds) of a group */
7942static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
7943{
7944 struct cpuacct *ca = cgroup_ca(cgrp);
7945 u64 totalcpuusage = 0;
7946 int i;
7947
7948 for_each_present_cpu(i)
7949 totalcpuusage += cpuacct_cpuusage_read(ca, i);
7950
7951 return totalcpuusage;
7952}
7953
7954static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
7955 u64 reset)
7956{
7957 struct cpuacct *ca = cgroup_ca(cgrp);
7958 int err = 0;
7959 int i;
7960
7961 if (reset) {
7962 err = -EINVAL;
7963 goto out;
7964 }
7965
7966 for_each_present_cpu(i)
7967 cpuacct_cpuusage_write(ca, i, 0);
7968
7969out:
7970 return err;
7971}
7972
7973static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
7974 struct seq_file *m)
7975{
7976 struct cpuacct *ca = cgroup_ca(cgroup);
7977 u64 percpu;
7978 int i;
7979
7980 for_each_present_cpu(i) {
7981 percpu = cpuacct_cpuusage_read(ca, i);
7982 seq_printf(m, "%llu ", (unsigned long long) percpu);
7983 }
7984 seq_printf(m, "\n");
7985 return 0;
7986}
7987
7988static const char *cpuacct_stat_desc[] = {
7989 [CPUACCT_STAT_USER] = "user",
7990 [CPUACCT_STAT_SYSTEM] = "system",
7991};
7992
7993static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
7994 struct cgroup_map_cb *cb)
7995{
7996 struct cpuacct *ca = cgroup_ca(cgrp);
7997 int i;
7998
7999 for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
8000 s64 val = percpu_counter_read(&ca->cpustat[i]);
8001 val = cputime64_to_clock_t(val);
8002 cb->fill(cb, cpuacct_stat_desc[i], val);
8003 }
8004 return 0;
8005}
8006
8007static struct cftype files[] = {
8008 {
8009 .name = "usage",
8010 .read_u64 = cpuusage_read,
8011 .write_u64 = cpuusage_write,
8012 },
8013 {
8014 .name = "usage_percpu",
8015 .read_seq_string = cpuacct_percpu_seq_read,
8016 },
8017 {
8018 .name = "stat",
8019 .read_map = cpuacct_stats_show,
8020 },
8021};
8022
8023static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
8024{
8025 return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
8026}
8027
8028/*
8029 * charge this task's execution time to its accounting group.
8030 *
8031 * called with rq->lock held.
8032 */
8033void cpuacct_charge(struct task_struct *tsk, u64 cputime)
8034{
8035 struct cpuacct *ca;
8036 int cpu;
8037
8038 if (unlikely(!cpuacct_subsys.active))
8039 return;
8040
8041 cpu = task_cpu(tsk);
8042
8043 rcu_read_lock();
8044
8045 ca = task_ca(tsk);
8046
8047 for (; ca; ca = ca->parent) {
8048 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
8049 *cpuusage += cputime;
8050 }
8051
8052 rcu_read_unlock();
8053}
8054
8055/*
8056 * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
8057 * in cputime_t units. As a result, cpuacct_update_stats calls
8058 * percpu_counter_add with values large enough to always overflow the
8059 * per cpu batch limit causing bad SMP scalability.
8060 *
8061 * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
8062 * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
8063 * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
8064 */
8065#ifdef CONFIG_SMP
8066#define CPUACCT_BATCH \
8067 min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
8068#else
8069#define CPUACCT_BATCH 0
8070#endif
8071
8072/*
8073 * Charge the system/user time to the task's accounting group.
8074 */
8075void cpuacct_update_stats(struct task_struct *tsk,
8076 enum cpuacct_stat_index idx, cputime_t val)
8077{
8078 struct cpuacct *ca;
8079 int batch = CPUACCT_BATCH;
8080
8081 if (unlikely(!cpuacct_subsys.active))
8082 return;
8083
8084 rcu_read_lock();
8085 ca = task_ca(tsk);
8086
8087 do {
8088 __percpu_counter_add(&ca->cpustat[idx], val, batch);
8089 ca = ca->parent;
8090 } while (ca);
8091 rcu_read_unlock();
8092}
8093
8094struct cgroup_subsys cpuacct_subsys = {
8095 .name = "cpuacct",
8096 .create = cpuacct_create,
8097 .destroy = cpuacct_destroy,
8098 .populate = cpuacct_populate,
8099 .subsys_id = cpuacct_subsys_id,
8100};
8101#endif /* CONFIG_CGROUP_CPUACCT */
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
new file mode 100644
index 000000000000..b0d798eaf130
--- /dev/null
+++ b/kernel/sched/cpupri.c
@@ -0,0 +1,241 @@
1/*
2 * kernel/sched/cpupri.c
3 *
4 * CPU priority management
5 *
6 * Copyright (C) 2007-2008 Novell
7 *
8 * Author: Gregory Haskins <ghaskins@novell.com>
9 *
10 * This code tracks the priority of each CPU so that global migration
11 * decisions are easy to calculate. Each CPU can be in a state as follows:
12 *
13 * (INVALID), IDLE, NORMAL, RT1, ... RT99
14 *
15 * going from the lowest priority to the highest. CPUs in the INVALID state
16 * are not eligible for routing. The system maintains this state with
17 * a 2 dimensional bitmap (the first for priority class, the second for cpus
18 * in that class). Therefore a typical application without affinity
19 * restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
20 * searches). For tasks with affinity restrictions, the algorithm has a
21 * worst case complexity of O(min(102, nr_domcpus)), though the scenario that
22 * yields the worst case search is fairly contrived.
23 *
24 * This program is free software; you can redistribute it and/or
25 * modify it under the terms of the GNU General Public License
26 * as published by the Free Software Foundation; version 2
27 * of the License.
28 */
29
30#include <linux/gfp.h>
31#include "cpupri.h"
32
33/* Convert between a 140 based task->prio, and our 102 based cpupri */
34static int convert_prio(int prio)
35{
36 int cpupri;
37
38 if (prio == CPUPRI_INVALID)
39 cpupri = CPUPRI_INVALID;
40 else if (prio == MAX_PRIO)
41 cpupri = CPUPRI_IDLE;
42 else if (prio >= MAX_RT_PRIO)
43 cpupri = CPUPRI_NORMAL;
44 else
45 cpupri = MAX_RT_PRIO - prio + 1;
46
47 return cpupri;
48}
49
50/**
51 * cpupri_find - find the best (lowest-pri) CPU in the system
52 * @cp: The cpupri context
53 * @p: The task
54 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
55 *
56 * Note: This function returns the recommended CPUs as calculated during the
57 * current invocation. By the time the call returns, the CPUs may have in
58 * fact changed priorities any number of times. While not ideal, it is not
59 * an issue of correctness since the normal rebalancer logic will correct
60 * any discrepancies created by racing against the uncertainty of the current
61 * priority configuration.
62 *
63 * Returns: (int)bool - CPUs were found
64 */
65int cpupri_find(struct cpupri *cp, struct task_struct *p,
66 struct cpumask *lowest_mask)
67{
68 int idx = 0;
69 int task_pri = convert_prio(p->prio);
70
71 if (task_pri >= MAX_RT_PRIO)
72 return 0;
73
74 for (idx = 0; idx < task_pri; idx++) {
75 struct cpupri_vec *vec = &cp->pri_to_cpu[idx];
76 int skip = 0;
77
78 if (!atomic_read(&(vec)->count))
79 skip = 1;
80 /*
81 * When looking at the vector, we need to read the counter,
82 * do a memory barrier, then read the mask.
83 *
84 * Note: This is still all racey, but we can deal with it.
85 * Ideally, we only want to look at masks that are set.
86 *
87 * If a mask is not set, then the only thing wrong is that we
88 * did a little more work than necessary.
89 *
90 * If we read a zero count but the mask is set, because of the
91 * memory barriers, that can only happen when the highest prio
92 * task for a run queue has left the run queue, in which case,
93 * it will be followed by a pull. If the task we are processing
94 * fails to find a proper place to go, that pull request will
95 * pull this task if the run queue is running at a lower
96 * priority.
97 */
98 smp_rmb();
99
100 /* Need to do the rmb for every iteration */
101 if (skip)
102 continue;
103
104 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
105 continue;
106
107 if (lowest_mask) {
108 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
109
110 /*
111 * We have to ensure that we have at least one bit
112 * still set in the array, since the map could have
113 * been concurrently emptied between the first and
114 * second reads of vec->mask. If we hit this
115 * condition, simply act as though we never hit this
116 * priority level and continue on.
117 */
118 if (cpumask_any(lowest_mask) >= nr_cpu_ids)
119 continue;
120 }
121
122 return 1;
123 }
124
125 return 0;
126}
127
128/**
129 * cpupri_set - update the cpu priority setting
130 * @cp: The cpupri context
131 * @cpu: The target cpu
132 * @pri: The priority (INVALID-RT99) to assign to this CPU
133 *
134 * Note: Assumes cpu_rq(cpu)->lock is locked
135 *
136 * Returns: (void)
137 */
138void cpupri_set(struct cpupri *cp, int cpu, int newpri)
139{
140 int *currpri = &cp->cpu_to_pri[cpu];
141 int oldpri = *currpri;
142 int do_mb = 0;
143
144 newpri = convert_prio(newpri);
145
146 BUG_ON(newpri >= CPUPRI_NR_PRIORITIES);
147
148 if (newpri == oldpri)
149 return;
150
151 /*
152 * If the cpu was currently mapped to a different value, we
153 * need to map it to the new value then remove the old value.
154 * Note, we must add the new value first, otherwise we risk the
155 * cpu being missed by the priority loop in cpupri_find.
156 */
157 if (likely(newpri != CPUPRI_INVALID)) {
158 struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
159
160 cpumask_set_cpu(cpu, vec->mask);
161 /*
162 * When adding a new vector, we update the mask first,
163 * do a write memory barrier, and then update the count, to
164 * make sure the vector is visible when count is set.
165 */
166 smp_mb__before_atomic_inc();
167 atomic_inc(&(vec)->count);
168 do_mb = 1;
169 }
170 if (likely(oldpri != CPUPRI_INVALID)) {
171 struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
172
173 /*
174 * Because the order of modification of the vec->count
175 * is important, we must make sure that the update
176 * of the new prio is seen before we decrement the
177 * old prio. This makes sure that the loop sees
178 * one or the other when we raise the priority of
179 * the run queue. We don't care about when we lower the
180 * priority, as that will trigger an rt pull anyway.
181 *
182 * We only need to do a memory barrier if we updated
183 * the new priority vec.
184 */
185 if (do_mb)
186 smp_mb__after_atomic_inc();
187
188 /*
189 * When removing from the vector, we decrement the counter first
190 * do a memory barrier and then clear the mask.
191 */
192 atomic_dec(&(vec)->count);
193 smp_mb__after_atomic_inc();
194 cpumask_clear_cpu(cpu, vec->mask);
195 }
196
197 *currpri = newpri;
198}
199
200/**
201 * cpupri_init - initialize the cpupri structure
202 * @cp: The cpupri context
203 * @bootmem: true if allocations need to use bootmem
204 *
205 * Returns: -ENOMEM if memory fails.
206 */
207int cpupri_init(struct cpupri *cp)
208{
209 int i;
210
211 memset(cp, 0, sizeof(*cp));
212
213 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
214 struct cpupri_vec *vec = &cp->pri_to_cpu[i];
215
216 atomic_set(&vec->count, 0);
217 if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL))
218 goto cleanup;
219 }
220
221 for_each_possible_cpu(i)
222 cp->cpu_to_pri[i] = CPUPRI_INVALID;
223 return 0;
224
225cleanup:
226 for (i--; i >= 0; i--)
227 free_cpumask_var(cp->pri_to_cpu[i].mask);
228 return -ENOMEM;
229}
230
231/**
232 * cpupri_cleanup - clean up the cpupri structure
233 * @cp: The cpupri context
234 */
235void cpupri_cleanup(struct cpupri *cp)
236{
237 int i;
238
239 for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
240 free_cpumask_var(cp->pri_to_cpu[i].mask);
241}
diff --git a/kernel/sched/cpupri.h b/kernel/sched/cpupri.h
new file mode 100644
index 000000000000..f6d756173491
--- /dev/null
+++ b/kernel/sched/cpupri.h
@@ -0,0 +1,34 @@
1#ifndef _LINUX_CPUPRI_H
2#define _LINUX_CPUPRI_H
3
4#include <linux/sched.h>
5
6#define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2)
7
8#define CPUPRI_INVALID -1
9#define CPUPRI_IDLE 0
10#define CPUPRI_NORMAL 1
11/* values 2-101 are RT priorities 0-99 */
12
13struct cpupri_vec {
14 atomic_t count;
15 cpumask_var_t mask;
16};
17
18struct cpupri {
19 struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES];
20 int cpu_to_pri[NR_CPUS];
21};
22
23#ifdef CONFIG_SMP
24int cpupri_find(struct cpupri *cp,
25 struct task_struct *p, struct cpumask *lowest_mask);
26void cpupri_set(struct cpupri *cp, int cpu, int pri);
27int cpupri_init(struct cpupri *cp);
28void cpupri_cleanup(struct cpupri *cp);
29#else
30#define cpupri_set(cp, cpu, pri) do { } while (0)
31#define cpupri_init() do { } while (0)
32#endif
33
34#endif /* _LINUX_CPUPRI_H */
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
new file mode 100644
index 000000000000..2a075e10004b
--- /dev/null
+++ b/kernel/sched/debug.c
@@ -0,0 +1,510 @@
1/*
2 * kernel/sched/debug.c
3 *
4 * Print the CFS rbtree
5 *
6 * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/kallsyms.h>
17#include <linux/utsname.h>
18
19#include "sched.h"
20
21static DEFINE_SPINLOCK(sched_debug_lock);
22
23/*
24 * This allows printing both to /proc/sched_debug and
25 * to the console
26 */
27#define SEQ_printf(m, x...) \
28 do { \
29 if (m) \
30 seq_printf(m, x); \
31 else \
32 printk(x); \
33 } while (0)
34
35/*
36 * Ease the printing of nsec fields:
37 */
38static long long nsec_high(unsigned long long nsec)
39{
40 if ((long long)nsec < 0) {
41 nsec = -nsec;
42 do_div(nsec, 1000000);
43 return -nsec;
44 }
45 do_div(nsec, 1000000);
46
47 return nsec;
48}
49
50static unsigned long nsec_low(unsigned long long nsec)
51{
52 if ((long long)nsec < 0)
53 nsec = -nsec;
54
55 return do_div(nsec, 1000000);
56}
57
58#define SPLIT_NS(x) nsec_high(x), nsec_low(x)
59
60#ifdef CONFIG_FAIR_GROUP_SCHED
61static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
62{
63 struct sched_entity *se = tg->se[cpu];
64 if (!se)
65 return;
66
67#define P(F) \
68 SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
69#define PN(F) \
70 SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
71
72 PN(se->exec_start);
73 PN(se->vruntime);
74 PN(se->sum_exec_runtime);
75#ifdef CONFIG_SCHEDSTATS
76 PN(se->statistics.wait_start);
77 PN(se->statistics.sleep_start);
78 PN(se->statistics.block_start);
79 PN(se->statistics.sleep_max);
80 PN(se->statistics.block_max);
81 PN(se->statistics.exec_max);
82 PN(se->statistics.slice_max);
83 PN(se->statistics.wait_max);
84 PN(se->statistics.wait_sum);
85 P(se->statistics.wait_count);
86#endif
87 P(se->load.weight);
88#undef PN
89#undef P
90}
91#endif
92
93#ifdef CONFIG_CGROUP_SCHED
94static char group_path[PATH_MAX];
95
96static char *task_group_path(struct task_group *tg)
97{
98 if (autogroup_path(tg, group_path, PATH_MAX))
99 return group_path;
100
101 /*
102 * May be NULL if the underlying cgroup isn't fully-created yet
103 */
104 if (!tg->css.cgroup) {
105 group_path[0] = '\0';
106 return group_path;
107 }
108 cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
109 return group_path;
110}
111#endif
112
113static void
114print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
115{
116 if (rq->curr == p)
117 SEQ_printf(m, "R");
118 else
119 SEQ_printf(m, " ");
120
121 SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
122 p->comm, p->pid,
123 SPLIT_NS(p->se.vruntime),
124 (long long)(p->nvcsw + p->nivcsw),
125 p->prio);
126#ifdef CONFIG_SCHEDSTATS
127 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
128 SPLIT_NS(p->se.vruntime),
129 SPLIT_NS(p->se.sum_exec_runtime),
130 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
131#else
132 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
133 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
134#endif
135#ifdef CONFIG_CGROUP_SCHED
136 SEQ_printf(m, " %s", task_group_path(task_group(p)));
137#endif
138
139 SEQ_printf(m, "\n");
140}
141
142static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
143{
144 struct task_struct *g, *p;
145 unsigned long flags;
146
147 SEQ_printf(m,
148 "\nrunnable tasks:\n"
149 " task PID tree-key switches prio"
150 " exec-runtime sum-exec sum-sleep\n"
151 "------------------------------------------------------"
152 "----------------------------------------------------\n");
153
154 read_lock_irqsave(&tasklist_lock, flags);
155
156 do_each_thread(g, p) {
157 if (!p->on_rq || task_cpu(p) != rq_cpu)
158 continue;
159
160 print_task(m, rq, p);
161 } while_each_thread(g, p);
162
163 read_unlock_irqrestore(&tasklist_lock, flags);
164}
165
166void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
167{
168 s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
169 spread, rq0_min_vruntime, spread0;
170 struct rq *rq = cpu_rq(cpu);
171 struct sched_entity *last;
172 unsigned long flags;
173
174#ifdef CONFIG_FAIR_GROUP_SCHED
175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
176#else
177 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
178#endif
179 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
180 SPLIT_NS(cfs_rq->exec_clock));
181
182 raw_spin_lock_irqsave(&rq->lock, flags);
183 if (cfs_rq->rb_leftmost)
184 MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
185 last = __pick_last_entity(cfs_rq);
186 if (last)
187 max_vruntime = last->vruntime;
188 min_vruntime = cfs_rq->min_vruntime;
189 rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
190 raw_spin_unlock_irqrestore(&rq->lock, flags);
191 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
192 SPLIT_NS(MIN_vruntime));
193 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
194 SPLIT_NS(min_vruntime));
195 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
196 SPLIT_NS(max_vruntime));
197 spread = max_vruntime - MIN_vruntime;
198 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
199 SPLIT_NS(spread));
200 spread0 = min_vruntime - rq0_min_vruntime;
201 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
202 SPLIT_NS(spread0));
203 SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
204 cfs_rq->nr_spread_over);
205 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
206 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
207#ifdef CONFIG_FAIR_GROUP_SCHED
208#ifdef CONFIG_SMP
209 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg",
210 SPLIT_NS(cfs_rq->load_avg));
211 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period",
212 SPLIT_NS(cfs_rq->load_period));
213 SEQ_printf(m, " .%-30s: %ld\n", "load_contrib",
214 cfs_rq->load_contribution);
215 SEQ_printf(m, " .%-30s: %d\n", "load_tg",
216 atomic_read(&cfs_rq->tg->load_weight));
217#endif
218
219 print_cfs_group_stats(m, cpu, cfs_rq->tg);
220#endif
221}
222
223void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
224{
225#ifdef CONFIG_RT_GROUP_SCHED
226 SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
227#else
228 SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
229#endif
230
231#define P(x) \
232 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
233#define PN(x) \
234 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
235
236 P(rt_nr_running);
237 P(rt_throttled);
238 PN(rt_time);
239 PN(rt_runtime);
240
241#undef PN
242#undef P
243}
244
245extern __read_mostly int sched_clock_running;
246
247static void print_cpu(struct seq_file *m, int cpu)
248{
249 struct rq *rq = cpu_rq(cpu);
250 unsigned long flags;
251
252#ifdef CONFIG_X86
253 {
254 unsigned int freq = cpu_khz ? : 1;
255
256 SEQ_printf(m, "\ncpu#%d, %u.%03u MHz\n",
257 cpu, freq / 1000, (freq % 1000));
258 }
259#else
260 SEQ_printf(m, "\ncpu#%d\n", cpu);
261#endif
262
263#define P(x) \
264 SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x))
265#define PN(x) \
266 SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
267
268 P(nr_running);
269 SEQ_printf(m, " .%-30s: %lu\n", "load",
270 rq->load.weight);
271 P(nr_switches);
272 P(nr_load_updates);
273 P(nr_uninterruptible);
274 PN(next_balance);
275 P(curr->pid);
276 PN(clock);
277 P(cpu_load[0]);
278 P(cpu_load[1]);
279 P(cpu_load[2]);
280 P(cpu_load[3]);
281 P(cpu_load[4]);
282#undef P
283#undef PN
284
285#ifdef CONFIG_SCHEDSTATS
286#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
287#define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
288
289 P(yld_count);
290
291 P(sched_switch);
292 P(sched_count);
293 P(sched_goidle);
294#ifdef CONFIG_SMP
295 P64(avg_idle);
296#endif
297
298 P(ttwu_count);
299 P(ttwu_local);
300
301#undef P
302#undef P64
303#endif
304 spin_lock_irqsave(&sched_debug_lock, flags);
305 print_cfs_stats(m, cpu);
306 print_rt_stats(m, cpu);
307
308 rcu_read_lock();
309 print_rq(m, rq, cpu);
310 rcu_read_unlock();
311 spin_unlock_irqrestore(&sched_debug_lock, flags);
312}
313
314static const char *sched_tunable_scaling_names[] = {
315 "none",
316 "logaritmic",
317 "linear"
318};
319
320static int sched_debug_show(struct seq_file *m, void *v)
321{
322 u64 ktime, sched_clk, cpu_clk;
323 unsigned long flags;
324 int cpu;
325
326 local_irq_save(flags);
327 ktime = ktime_to_ns(ktime_get());
328 sched_clk = sched_clock();
329 cpu_clk = local_clock();
330 local_irq_restore(flags);
331
332 SEQ_printf(m, "Sched Debug Version: v0.10, %s %.*s\n",
333 init_utsname()->release,
334 (int)strcspn(init_utsname()->version, " "),
335 init_utsname()->version);
336
337#define P(x) \
338 SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
339#define PN(x) \
340 SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
341 PN(ktime);
342 PN(sched_clk);
343 PN(cpu_clk);
344 P(jiffies);
345#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
346 P(sched_clock_stable);
347#endif
348#undef PN
349#undef P
350
351 SEQ_printf(m, "\n");
352 SEQ_printf(m, "sysctl_sched\n");
353
354#define P(x) \
355 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
356#define PN(x) \
357 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
358 PN(sysctl_sched_latency);
359 PN(sysctl_sched_min_granularity);
360 PN(sysctl_sched_wakeup_granularity);
361 P(sysctl_sched_child_runs_first);
362 P(sysctl_sched_features);
363#undef PN
364#undef P
365
366 SEQ_printf(m, " .%-40s: %d (%s)\n", "sysctl_sched_tunable_scaling",
367 sysctl_sched_tunable_scaling,
368 sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
369
370 for_each_online_cpu(cpu)
371 print_cpu(m, cpu);
372
373 SEQ_printf(m, "\n");
374
375 return 0;
376}
377
378void sysrq_sched_debug_show(void)
379{
380 sched_debug_show(NULL, NULL);
381}
382
383static int sched_debug_open(struct inode *inode, struct file *filp)
384{
385 return single_open(filp, sched_debug_show, NULL);
386}
387
388static const struct file_operations sched_debug_fops = {
389 .open = sched_debug_open,
390 .read = seq_read,
391 .llseek = seq_lseek,
392 .release = single_release,
393};
394
395static int __init init_sched_debug_procfs(void)
396{
397 struct proc_dir_entry *pe;
398
399 pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
400 if (!pe)
401 return -ENOMEM;
402 return 0;
403}
404
405__initcall(init_sched_debug_procfs);
406
407void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
408{
409 unsigned long nr_switches;
410
411 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid,
412 get_nr_threads(p));
413 SEQ_printf(m,
414 "---------------------------------------------------------\n");
415#define __P(F) \
416 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)F)
417#define P(F) \
418 SEQ_printf(m, "%-35s:%21Ld\n", #F, (long long)p->F)
419#define __PN(F) \
420 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
421#define PN(F) \
422 SEQ_printf(m, "%-35s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
423
424 PN(se.exec_start);
425 PN(se.vruntime);
426 PN(se.sum_exec_runtime);
427
428 nr_switches = p->nvcsw + p->nivcsw;
429
430#ifdef CONFIG_SCHEDSTATS
431 PN(se.statistics.wait_start);
432 PN(se.statistics.sleep_start);
433 PN(se.statistics.block_start);
434 PN(se.statistics.sleep_max);
435 PN(se.statistics.block_max);
436 PN(se.statistics.exec_max);
437 PN(se.statistics.slice_max);
438 PN(se.statistics.wait_max);
439 PN(se.statistics.wait_sum);
440 P(se.statistics.wait_count);
441 PN(se.statistics.iowait_sum);
442 P(se.statistics.iowait_count);
443 P(se.nr_migrations);
444 P(se.statistics.nr_migrations_cold);
445 P(se.statistics.nr_failed_migrations_affine);
446 P(se.statistics.nr_failed_migrations_running);
447 P(se.statistics.nr_failed_migrations_hot);
448 P(se.statistics.nr_forced_migrations);
449 P(se.statistics.nr_wakeups);
450 P(se.statistics.nr_wakeups_sync);
451 P(se.statistics.nr_wakeups_migrate);
452 P(se.statistics.nr_wakeups_local);
453 P(se.statistics.nr_wakeups_remote);
454 P(se.statistics.nr_wakeups_affine);
455 P(se.statistics.nr_wakeups_affine_attempts);
456 P(se.statistics.nr_wakeups_passive);
457 P(se.statistics.nr_wakeups_idle);
458
459 {
460 u64 avg_atom, avg_per_cpu;
461
462 avg_atom = p->se.sum_exec_runtime;
463 if (nr_switches)
464 do_div(avg_atom, nr_switches);
465 else
466 avg_atom = -1LL;
467
468 avg_per_cpu = p->se.sum_exec_runtime;
469 if (p->se.nr_migrations) {
470 avg_per_cpu = div64_u64(avg_per_cpu,
471 p->se.nr_migrations);
472 } else {
473 avg_per_cpu = -1LL;
474 }
475
476 __PN(avg_atom);
477 __PN(avg_per_cpu);
478 }
479#endif
480 __P(nr_switches);
481 SEQ_printf(m, "%-35s:%21Ld\n",
482 "nr_voluntary_switches", (long long)p->nvcsw);
483 SEQ_printf(m, "%-35s:%21Ld\n",
484 "nr_involuntary_switches", (long long)p->nivcsw);
485
486 P(se.load.weight);
487 P(policy);
488 P(prio);
489#undef PN
490#undef __PN
491#undef P
492#undef __P
493
494 {
495 unsigned int this_cpu = raw_smp_processor_id();
496 u64 t0, t1;
497
498 t0 = cpu_clock(this_cpu);
499 t1 = cpu_clock(this_cpu);
500 SEQ_printf(m, "%-35s:%21Ld\n",
501 "clock-delta", (long long)(t1-t0));
502 }
503}
504
505void proc_sched_set_task(struct task_struct *p)
506{
507#ifdef CONFIG_SCHEDSTATS
508 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
509#endif
510}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
new file mode 100644
index 000000000000..cd3b64219d9f
--- /dev/null
+++ b/kernel/sched/fair.c
@@ -0,0 +1,5601 @@
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
21 */
22
23#include <linux/latencytop.h>
24#include <linux/sched.h>
25#include <linux/cpumask.h>
26#include <linux/slab.h>
27#include <linux/profile.h>
28#include <linux/interrupt.h>
29
30#include <trace/events/sched.h>
31
32#include "sched.h"
33
34/*
35 * Targeted preemption latency for CPU-bound tasks:
36 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
37 *
38 * NOTE: this latency value is not the same as the concept of
39 * 'timeslice length' - timeslices in CFS are of variable length
40 * and have no persistent notion like in traditional, time-slice
41 * based scheduling concepts.
42 *
43 * (to see the precise effective timeslice length of your workload,
44 * run vmstat and monitor the context-switches (cs) field)
45 */
46unsigned int sysctl_sched_latency = 6000000ULL;
47unsigned int normalized_sysctl_sched_latency = 6000000ULL;
48
49/*
50 * The initial- and re-scaling of tunables is configurable
51 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
52 *
53 * Options are:
54 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
55 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
56 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
57 */
58enum sched_tunable_scaling sysctl_sched_tunable_scaling
59 = SCHED_TUNABLESCALING_LOG;
60
61/*
62 * Minimal preemption granularity for CPU-bound tasks:
63 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
64 */
65unsigned int sysctl_sched_min_granularity = 750000ULL;
66unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
67
68/*
69 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
70 */
71static unsigned int sched_nr_latency = 8;
72
73/*
74 * After fork, child runs first. If set to 0 (default) then
75 * parent will (try to) run first.
76 */
77unsigned int sysctl_sched_child_runs_first __read_mostly;
78
79/*
80 * SCHED_OTHER wake-up granularity.
81 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
82 *
83 * This option delays the preemption effects of decoupled workloads
84 * and reduces their over-scheduling. Synchronous workloads will still
85 * have immediate wakeup/sleep latencies.
86 */
87unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
88unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
89
90const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
91
92/*
93 * The exponential sliding window over which load is averaged for shares
94 * distribution.
95 * (default: 10msec)
96 */
97unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
98
99#ifdef CONFIG_CFS_BANDWIDTH
100/*
101 * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool
102 * each time a cfs_rq requests quota.
103 *
104 * Note: in the case that the slice exceeds the runtime remaining (either due
105 * to consumption or the quota being specified to be smaller than the slice)
106 * we will always only issue the remaining available time.
107 *
108 * default: 5 msec, units: microseconds
109 */
110unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
111#endif
112
113/*
114 * Increase the granularity value when there are more CPUs,
115 * because with more CPUs the 'effective latency' as visible
116 * to users decreases. But the relationship is not linear,
117 * so pick a second-best guess by going with the log2 of the
118 * number of CPUs.
119 *
120 * This idea comes from the SD scheduler of Con Kolivas:
121 */
122static int get_update_sysctl_factor(void)
123{
124 unsigned int cpus = min_t(int, num_online_cpus(), 8);
125 unsigned int factor;
126
127 switch (sysctl_sched_tunable_scaling) {
128 case SCHED_TUNABLESCALING_NONE:
129 factor = 1;
130 break;
131 case SCHED_TUNABLESCALING_LINEAR:
132 factor = cpus;
133 break;
134 case SCHED_TUNABLESCALING_LOG:
135 default:
136 factor = 1 + ilog2(cpus);
137 break;
138 }
139
140 return factor;
141}
142
143static void update_sysctl(void)
144{
145 unsigned int factor = get_update_sysctl_factor();
146
147#define SET_SYSCTL(name) \
148 (sysctl_##name = (factor) * normalized_sysctl_##name)
149 SET_SYSCTL(sched_min_granularity);
150 SET_SYSCTL(sched_latency);
151 SET_SYSCTL(sched_wakeup_granularity);
152#undef SET_SYSCTL
153}
154
155void sched_init_granularity(void)
156{
157 update_sysctl();
158}
159
160#if BITS_PER_LONG == 32
161# define WMULT_CONST (~0UL)
162#else
163# define WMULT_CONST (1UL << 32)
164#endif
165
166#define WMULT_SHIFT 32
167
168/*
169 * Shift right and round:
170 */
171#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
172
173/*
174 * delta *= weight / lw
175 */
176static unsigned long
177calc_delta_mine(unsigned long delta_exec, unsigned long weight,
178 struct load_weight *lw)
179{
180 u64 tmp;
181
182 /*
183 * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
184 * entities since MIN_SHARES = 2. Treat weight as 1 if less than
185 * 2^SCHED_LOAD_RESOLUTION.
186 */
187 if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
188 tmp = (u64)delta_exec * scale_load_down(weight);
189 else
190 tmp = (u64)delta_exec;
191
192 if (!lw->inv_weight) {
193 unsigned long w = scale_load_down(lw->weight);
194
195 if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
196 lw->inv_weight = 1;
197 else if (unlikely(!w))
198 lw->inv_weight = WMULT_CONST;
199 else
200 lw->inv_weight = WMULT_CONST / w;
201 }
202
203 /*
204 * Check whether we'd overflow the 64-bit multiplication:
205 */
206 if (unlikely(tmp > WMULT_CONST))
207 tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
208 WMULT_SHIFT/2);
209 else
210 tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
211
212 return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
213}
214
215
216const struct sched_class fair_sched_class;
217
218/**************************************************************
219 * CFS operations on generic schedulable entities:
220 */
221
222#ifdef CONFIG_FAIR_GROUP_SCHED
223
224/* cpu runqueue to which this cfs_rq is attached */
225static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
226{
227 return cfs_rq->rq;
228}
229
230/* An entity is a task if it doesn't "own" a runqueue */
231#define entity_is_task(se) (!se->my_q)
232
233static inline struct task_struct *task_of(struct sched_entity *se)
234{
235#ifdef CONFIG_SCHED_DEBUG
236 WARN_ON_ONCE(!entity_is_task(se));
237#endif
238 return container_of(se, struct task_struct, se);
239}
240
241/* Walk up scheduling entities hierarchy */
242#define for_each_sched_entity(se) \
243 for (; se; se = se->parent)
244
245static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
246{
247 return p->se.cfs_rq;
248}
249
250/* runqueue on which this entity is (to be) queued */
251static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
252{
253 return se->cfs_rq;
254}
255
256/* runqueue "owned" by this group */
257static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
258{
259 return grp->my_q;
260}
261
262static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
263{
264 if (!cfs_rq->on_list) {
265 /*
266 * Ensure we either appear before our parent (if already
267 * enqueued) or force our parent to appear after us when it is
268 * enqueued. The fact that we always enqueue bottom-up
269 * reduces this to two cases.
270 */
271 if (cfs_rq->tg->parent &&
272 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
273 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
274 &rq_of(cfs_rq)->leaf_cfs_rq_list);
275 } else {
276 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
277 &rq_of(cfs_rq)->leaf_cfs_rq_list);
278 }
279
280 cfs_rq->on_list = 1;
281 }
282}
283
284static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
285{
286 if (cfs_rq->on_list) {
287 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
288 cfs_rq->on_list = 0;
289 }
290}
291
292/* Iterate thr' all leaf cfs_rq's on a runqueue */
293#define for_each_leaf_cfs_rq(rq, cfs_rq) \
294 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
295
296/* Do the two (enqueued) entities belong to the same group ? */
297static inline int
298is_same_group(struct sched_entity *se, struct sched_entity *pse)
299{
300 if (se->cfs_rq == pse->cfs_rq)
301 return 1;
302
303 return 0;
304}
305
306static inline struct sched_entity *parent_entity(struct sched_entity *se)
307{
308 return se->parent;
309}
310
311/* return depth at which a sched entity is present in the hierarchy */
312static inline int depth_se(struct sched_entity *se)
313{
314 int depth = 0;
315
316 for_each_sched_entity(se)
317 depth++;
318
319 return depth;
320}
321
322static void
323find_matching_se(struct sched_entity **se, struct sched_entity **pse)
324{
325 int se_depth, pse_depth;
326
327 /*
328 * preemption test can be made between sibling entities who are in the
329 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
330 * both tasks until we find their ancestors who are siblings of common
331 * parent.
332 */
333
334 /* First walk up until both entities are at same depth */
335 se_depth = depth_se(*se);
336 pse_depth = depth_se(*pse);
337
338 while (se_depth > pse_depth) {
339 se_depth--;
340 *se = parent_entity(*se);
341 }
342
343 while (pse_depth > se_depth) {
344 pse_depth--;
345 *pse = parent_entity(*pse);
346 }
347
348 while (!is_same_group(*se, *pse)) {
349 *se = parent_entity(*se);
350 *pse = parent_entity(*pse);
351 }
352}
353
354#else /* !CONFIG_FAIR_GROUP_SCHED */
355
356static inline struct task_struct *task_of(struct sched_entity *se)
357{
358 return container_of(se, struct task_struct, se);
359}
360
361static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
362{
363 return container_of(cfs_rq, struct rq, cfs);
364}
365
366#define entity_is_task(se) 1
367
368#define for_each_sched_entity(se) \
369 for (; se; se = NULL)
370
371static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
372{
373 return &task_rq(p)->cfs;
374}
375
376static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
377{
378 struct task_struct *p = task_of(se);
379 struct rq *rq = task_rq(p);
380
381 return &rq->cfs;
382}
383
384/* runqueue "owned" by this group */
385static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
386{
387 return NULL;
388}
389
390static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
391{
392}
393
394static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
395{
396}
397
398#define for_each_leaf_cfs_rq(rq, cfs_rq) \
399 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
400
401static inline int
402is_same_group(struct sched_entity *se, struct sched_entity *pse)
403{
404 return 1;
405}
406
407static inline struct sched_entity *parent_entity(struct sched_entity *se)
408{
409 return NULL;
410}
411
412static inline void
413find_matching_se(struct sched_entity **se, struct sched_entity **pse)
414{
415}
416
417#endif /* CONFIG_FAIR_GROUP_SCHED */
418
419static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
420 unsigned long delta_exec);
421
422/**************************************************************
423 * Scheduling class tree data structure manipulation methods:
424 */
425
426static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
427{
428 s64 delta = (s64)(vruntime - min_vruntime);
429 if (delta > 0)
430 min_vruntime = vruntime;
431
432 return min_vruntime;
433}
434
435static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
436{
437 s64 delta = (s64)(vruntime - min_vruntime);
438 if (delta < 0)
439 min_vruntime = vruntime;
440
441 return min_vruntime;
442}
443
444static inline int entity_before(struct sched_entity *a,
445 struct sched_entity *b)
446{
447 return (s64)(a->vruntime - b->vruntime) < 0;
448}
449
450static void update_min_vruntime(struct cfs_rq *cfs_rq)
451{
452 u64 vruntime = cfs_rq->min_vruntime;
453
454 if (cfs_rq->curr)
455 vruntime = cfs_rq->curr->vruntime;
456
457 if (cfs_rq->rb_leftmost) {
458 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
459 struct sched_entity,
460 run_node);
461
462 if (!cfs_rq->curr)
463 vruntime = se->vruntime;
464 else
465 vruntime = min_vruntime(vruntime, se->vruntime);
466 }
467
468 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
469#ifndef CONFIG_64BIT
470 smp_wmb();
471 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
472#endif
473}
474
475/*
476 * Enqueue an entity into the rb-tree:
477 */
478static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
479{
480 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
481 struct rb_node *parent = NULL;
482 struct sched_entity *entry;
483 int leftmost = 1;
484
485 /*
486 * Find the right place in the rbtree:
487 */
488 while (*link) {
489 parent = *link;
490 entry = rb_entry(parent, struct sched_entity, run_node);
491 /*
492 * We dont care about collisions. Nodes with
493 * the same key stay together.
494 */
495 if (entity_before(se, entry)) {
496 link = &parent->rb_left;
497 } else {
498 link = &parent->rb_right;
499 leftmost = 0;
500 }
501 }
502
503 /*
504 * Maintain a cache of leftmost tree entries (it is frequently
505 * used):
506 */
507 if (leftmost)
508 cfs_rq->rb_leftmost = &se->run_node;
509
510 rb_link_node(&se->run_node, parent, link);
511 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
512}
513
514static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
515{
516 if (cfs_rq->rb_leftmost == &se->run_node) {
517 struct rb_node *next_node;
518
519 next_node = rb_next(&se->run_node);
520 cfs_rq->rb_leftmost = next_node;
521 }
522
523 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
524}
525
526struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
527{
528 struct rb_node *left = cfs_rq->rb_leftmost;
529
530 if (!left)
531 return NULL;
532
533 return rb_entry(left, struct sched_entity, run_node);
534}
535
536static struct sched_entity *__pick_next_entity(struct sched_entity *se)
537{
538 struct rb_node *next = rb_next(&se->run_node);
539
540 if (!next)
541 return NULL;
542
543 return rb_entry(next, struct sched_entity, run_node);
544}
545
546#ifdef CONFIG_SCHED_DEBUG
547struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
548{
549 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
550
551 if (!last)
552 return NULL;
553
554 return rb_entry(last, struct sched_entity, run_node);
555}
556
557/**************************************************************
558 * Scheduling class statistics methods:
559 */
560
561int sched_proc_update_handler(struct ctl_table *table, int write,
562 void __user *buffer, size_t *lenp,
563 loff_t *ppos)
564{
565 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
566 int factor = get_update_sysctl_factor();
567
568 if (ret || !write)
569 return ret;
570
571 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
572 sysctl_sched_min_granularity);
573
574#define WRT_SYSCTL(name) \
575 (normalized_sysctl_##name = sysctl_##name / (factor))
576 WRT_SYSCTL(sched_min_granularity);
577 WRT_SYSCTL(sched_latency);
578 WRT_SYSCTL(sched_wakeup_granularity);
579#undef WRT_SYSCTL
580
581 return 0;
582}
583#endif
584
585/*
586 * delta /= w
587 */
588static inline unsigned long
589calc_delta_fair(unsigned long delta, struct sched_entity *se)
590{
591 if (unlikely(se->load.weight != NICE_0_LOAD))
592 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
593
594 return delta;
595}
596
597/*
598 * The idea is to set a period in which each task runs once.
599 *
600 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
601 * this period because otherwise the slices get too small.
602 *
603 * p = (nr <= nl) ? l : l*nr/nl
604 */
605static u64 __sched_period(unsigned long nr_running)
606{
607 u64 period = sysctl_sched_latency;
608 unsigned long nr_latency = sched_nr_latency;
609
610 if (unlikely(nr_running > nr_latency)) {
611 period = sysctl_sched_min_granularity;
612 period *= nr_running;
613 }
614
615 return period;
616}
617
618/*
619 * We calculate the wall-time slice from the period by taking a part
620 * proportional to the weight.
621 *
622 * s = p*P[w/rw]
623 */
624static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
625{
626 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
627
628 for_each_sched_entity(se) {
629 struct load_weight *load;
630 struct load_weight lw;
631
632 cfs_rq = cfs_rq_of(se);
633 load = &cfs_rq->load;
634
635 if (unlikely(!se->on_rq)) {
636 lw = cfs_rq->load;
637
638 update_load_add(&lw, se->load.weight);
639 load = &lw;
640 }
641 slice = calc_delta_mine(slice, se->load.weight, load);
642 }
643 return slice;
644}
645
646/*
647 * We calculate the vruntime slice of a to be inserted task
648 *
649 * vs = s/w
650 */
651static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
652{
653 return calc_delta_fair(sched_slice(cfs_rq, se), se);
654}
655
656static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
657static void update_cfs_shares(struct cfs_rq *cfs_rq);
658
659/*
660 * Update the current task's runtime statistics. Skip current tasks that
661 * are not in our scheduling class.
662 */
663static inline void
664__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
665 unsigned long delta_exec)
666{
667 unsigned long delta_exec_weighted;
668
669 schedstat_set(curr->statistics.exec_max,
670 max((u64)delta_exec, curr->statistics.exec_max));
671
672 curr->sum_exec_runtime += delta_exec;
673 schedstat_add(cfs_rq, exec_clock, delta_exec);
674 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
675
676 curr->vruntime += delta_exec_weighted;
677 update_min_vruntime(cfs_rq);
678
679#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
680 cfs_rq->load_unacc_exec_time += delta_exec;
681#endif
682}
683
684static void update_curr(struct cfs_rq *cfs_rq)
685{
686 struct sched_entity *curr = cfs_rq->curr;
687 u64 now = rq_of(cfs_rq)->clock_task;
688 unsigned long delta_exec;
689
690 if (unlikely(!curr))
691 return;
692
693 /*
694 * Get the amount of time the current task was running
695 * since the last time we changed load (this cannot
696 * overflow on 32 bits):
697 */
698 delta_exec = (unsigned long)(now - curr->exec_start);
699 if (!delta_exec)
700 return;
701
702 __update_curr(cfs_rq, curr, delta_exec);
703 curr->exec_start = now;
704
705 if (entity_is_task(curr)) {
706 struct task_struct *curtask = task_of(curr);
707
708 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
709 cpuacct_charge(curtask, delta_exec);
710 account_group_exec_runtime(curtask, delta_exec);
711 }
712
713 account_cfs_rq_runtime(cfs_rq, delta_exec);
714}
715
716static inline void
717update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
718{
719 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
720}
721
722/*
723 * Task is being enqueued - update stats:
724 */
725static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
726{
727 /*
728 * Are we enqueueing a waiting task? (for current tasks
729 * a dequeue/enqueue event is a NOP)
730 */
731 if (se != cfs_rq->curr)
732 update_stats_wait_start(cfs_rq, se);
733}
734
735static void
736update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
737{
738 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
739 rq_of(cfs_rq)->clock - se->statistics.wait_start));
740 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
741 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
742 rq_of(cfs_rq)->clock - se->statistics.wait_start);
743#ifdef CONFIG_SCHEDSTATS
744 if (entity_is_task(se)) {
745 trace_sched_stat_wait(task_of(se),
746 rq_of(cfs_rq)->clock - se->statistics.wait_start);
747 }
748#endif
749 schedstat_set(se->statistics.wait_start, 0);
750}
751
752static inline void
753update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
754{
755 /*
756 * Mark the end of the wait period if dequeueing a
757 * waiting task:
758 */
759 if (se != cfs_rq->curr)
760 update_stats_wait_end(cfs_rq, se);
761}
762
763/*
764 * We are picking a new current task - update its stats:
765 */
766static inline void
767update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
768{
769 /*
770 * We are starting a new run period:
771 */
772 se->exec_start = rq_of(cfs_rq)->clock_task;
773}
774
775/**************************************************
776 * Scheduling class queueing methods:
777 */
778
779#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
780static void
781add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
782{
783 cfs_rq->task_weight += weight;
784}
785#else
786static inline void
787add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
788{
789}
790#endif
791
792static void
793account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
794{
795 update_load_add(&cfs_rq->load, se->load.weight);
796 if (!parent_entity(se))
797 update_load_add(&rq_of(cfs_rq)->load, se->load.weight);
798 if (entity_is_task(se)) {
799 add_cfs_task_weight(cfs_rq, se->load.weight);
800 list_add(&se->group_node, &cfs_rq->tasks);
801 }
802 cfs_rq->nr_running++;
803}
804
805static void
806account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
807{
808 update_load_sub(&cfs_rq->load, se->load.weight);
809 if (!parent_entity(se))
810 update_load_sub(&rq_of(cfs_rq)->load, se->load.weight);
811 if (entity_is_task(se)) {
812 add_cfs_task_weight(cfs_rq, -se->load.weight);
813 list_del_init(&se->group_node);
814 }
815 cfs_rq->nr_running--;
816}
817
818#ifdef CONFIG_FAIR_GROUP_SCHED
819/* we need this in update_cfs_load and load-balance functions below */
820static inline int throttled_hierarchy(struct cfs_rq *cfs_rq);
821# ifdef CONFIG_SMP
822static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
823 int global_update)
824{
825 struct task_group *tg = cfs_rq->tg;
826 long load_avg;
827
828 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
829 load_avg -= cfs_rq->load_contribution;
830
831 if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
832 atomic_add(load_avg, &tg->load_weight);
833 cfs_rq->load_contribution += load_avg;
834 }
835}
836
837static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
838{
839 u64 period = sysctl_sched_shares_window;
840 u64 now, delta;
841 unsigned long load = cfs_rq->load.weight;
842
843 if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq))
844 return;
845
846 now = rq_of(cfs_rq)->clock_task;
847 delta = now - cfs_rq->load_stamp;
848
849 /* truncate load history at 4 idle periods */
850 if (cfs_rq->load_stamp > cfs_rq->load_last &&
851 now - cfs_rq->load_last > 4 * period) {
852 cfs_rq->load_period = 0;
853 cfs_rq->load_avg = 0;
854 delta = period - 1;
855 }
856
857 cfs_rq->load_stamp = now;
858 cfs_rq->load_unacc_exec_time = 0;
859 cfs_rq->load_period += delta;
860 if (load) {
861 cfs_rq->load_last = now;
862 cfs_rq->load_avg += delta * load;
863 }
864
865 /* consider updating load contribution on each fold or truncate */
866 if (global_update || cfs_rq->load_period > period
867 || !cfs_rq->load_period)
868 update_cfs_rq_load_contribution(cfs_rq, global_update);
869
870 while (cfs_rq->load_period > period) {
871 /*
872 * Inline assembly required to prevent the compiler
873 * optimising this loop into a divmod call.
874 * See __iter_div_u64_rem() for another example of this.
875 */
876 asm("" : "+rm" (cfs_rq->load_period));
877 cfs_rq->load_period /= 2;
878 cfs_rq->load_avg /= 2;
879 }
880
881 if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
882 list_del_leaf_cfs_rq(cfs_rq);
883}
884
885static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
886{
887 long tg_weight;
888
889 /*
890 * Use this CPU's actual weight instead of the last load_contribution
891 * to gain a more accurate current total weight. See
892 * update_cfs_rq_load_contribution().
893 */
894 tg_weight = atomic_read(&tg->load_weight);
895 tg_weight -= cfs_rq->load_contribution;
896 tg_weight += cfs_rq->load.weight;
897
898 return tg_weight;
899}
900
901static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
902{
903 long tg_weight, load, shares;
904
905 tg_weight = calc_tg_weight(tg, cfs_rq);
906 load = cfs_rq->load.weight;
907
908 shares = (tg->shares * load);
909 if (tg_weight)
910 shares /= tg_weight;
911
912 if (shares < MIN_SHARES)
913 shares = MIN_SHARES;
914 if (shares > tg->shares)
915 shares = tg->shares;
916
917 return shares;
918}
919
920static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
921{
922 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
923 update_cfs_load(cfs_rq, 0);
924 update_cfs_shares(cfs_rq);
925 }
926}
927# else /* CONFIG_SMP */
928static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
929{
930}
931
932static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
933{
934 return tg->shares;
935}
936
937static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
938{
939}
940# endif /* CONFIG_SMP */
941static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
942 unsigned long weight)
943{
944 if (se->on_rq) {
945 /* commit outstanding execution time */
946 if (cfs_rq->curr == se)
947 update_curr(cfs_rq);
948 account_entity_dequeue(cfs_rq, se);
949 }
950
951 update_load_set(&se->load, weight);
952
953 if (se->on_rq)
954 account_entity_enqueue(cfs_rq, se);
955}
956
957static void update_cfs_shares(struct cfs_rq *cfs_rq)
958{
959 struct task_group *tg;
960 struct sched_entity *se;
961 long shares;
962
963 tg = cfs_rq->tg;
964 se = tg->se[cpu_of(rq_of(cfs_rq))];
965 if (!se || throttled_hierarchy(cfs_rq))
966 return;
967#ifndef CONFIG_SMP
968 if (likely(se->load.weight == tg->shares))
969 return;
970#endif
971 shares = calc_cfs_shares(cfs_rq, tg);
972
973 reweight_entity(cfs_rq_of(se), se, shares);
974}
975#else /* CONFIG_FAIR_GROUP_SCHED */
976static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
977{
978}
979
980static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
981{
982}
983
984static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
985{
986}
987#endif /* CONFIG_FAIR_GROUP_SCHED */
988
989static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
990{
991#ifdef CONFIG_SCHEDSTATS
992 struct task_struct *tsk = NULL;
993
994 if (entity_is_task(se))
995 tsk = task_of(se);
996
997 if (se->statistics.sleep_start) {
998 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
999
1000 if ((s64)delta < 0)
1001 delta = 0;
1002
1003 if (unlikely(delta > se->statistics.sleep_max))
1004 se->statistics.sleep_max = delta;
1005
1006 se->statistics.sleep_start = 0;
1007 se->statistics.sum_sleep_runtime += delta;
1008
1009 if (tsk) {
1010 account_scheduler_latency(tsk, delta >> 10, 1);
1011 trace_sched_stat_sleep(tsk, delta);
1012 }
1013 }
1014 if (se->statistics.block_start) {
1015 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
1016
1017 if ((s64)delta < 0)
1018 delta = 0;
1019
1020 if (unlikely(delta > se->statistics.block_max))
1021 se->statistics.block_max = delta;
1022
1023 se->statistics.block_start = 0;
1024 se->statistics.sum_sleep_runtime += delta;
1025
1026 if (tsk) {
1027 if (tsk->in_iowait) {
1028 se->statistics.iowait_sum += delta;
1029 se->statistics.iowait_count++;
1030 trace_sched_stat_iowait(tsk, delta);
1031 }
1032
1033 /*
1034 * Blocking time is in units of nanosecs, so shift by
1035 * 20 to get a milliseconds-range estimation of the
1036 * amount of time that the task spent sleeping:
1037 */
1038 if (unlikely(prof_on == SLEEP_PROFILING)) {
1039 profile_hits(SLEEP_PROFILING,
1040 (void *)get_wchan(tsk),
1041 delta >> 20);
1042 }
1043 account_scheduler_latency(tsk, delta >> 10, 0);
1044 }
1045 }
1046#endif
1047}
1048
1049static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
1050{
1051#ifdef CONFIG_SCHED_DEBUG
1052 s64 d = se->vruntime - cfs_rq->min_vruntime;
1053
1054 if (d < 0)
1055 d = -d;
1056
1057 if (d > 3*sysctl_sched_latency)
1058 schedstat_inc(cfs_rq, nr_spread_over);
1059#endif
1060}
1061
1062static void
1063place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
1064{
1065 u64 vruntime = cfs_rq->min_vruntime;
1066
1067 /*
1068 * The 'current' period is already promised to the current tasks,
1069 * however the extra weight of the new task will slow them down a
1070 * little, place the new task so that it fits in the slot that
1071 * stays open at the end.
1072 */
1073 if (initial && sched_feat(START_DEBIT))
1074 vruntime += sched_vslice(cfs_rq, se);
1075
1076 /* sleeps up to a single latency don't count. */
1077 if (!initial) {
1078 unsigned long thresh = sysctl_sched_latency;
1079
1080 /*
1081 * Halve their sleep time's effect, to allow
1082 * for a gentler effect of sleepers:
1083 */
1084 if (sched_feat(GENTLE_FAIR_SLEEPERS))
1085 thresh >>= 1;
1086
1087 vruntime -= thresh;
1088 }
1089
1090 /* ensure we never gain time by being placed backwards. */
1091 vruntime = max_vruntime(se->vruntime, vruntime);
1092
1093 se->vruntime = vruntime;
1094}
1095
1096static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
1097
1098static void
1099enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1100{
1101 /*
1102 * Update the normalized vruntime before updating min_vruntime
1103 * through callig update_curr().
1104 */
1105 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
1106 se->vruntime += cfs_rq->min_vruntime;
1107
1108 /*
1109 * Update run-time statistics of the 'current'.
1110 */
1111 update_curr(cfs_rq);
1112 update_cfs_load(cfs_rq, 0);
1113 account_entity_enqueue(cfs_rq, se);
1114 update_cfs_shares(cfs_rq);
1115
1116 if (flags & ENQUEUE_WAKEUP) {
1117 place_entity(cfs_rq, se, 0);
1118 enqueue_sleeper(cfs_rq, se);
1119 }
1120
1121 update_stats_enqueue(cfs_rq, se);
1122 check_spread(cfs_rq, se);
1123 if (se != cfs_rq->curr)
1124 __enqueue_entity(cfs_rq, se);
1125 se->on_rq = 1;
1126
1127 if (cfs_rq->nr_running == 1) {
1128 list_add_leaf_cfs_rq(cfs_rq);
1129 check_enqueue_throttle(cfs_rq);
1130 }
1131}
1132
1133static void __clear_buddies_last(struct sched_entity *se)
1134{
1135 for_each_sched_entity(se) {
1136 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1137 if (cfs_rq->last == se)
1138 cfs_rq->last = NULL;
1139 else
1140 break;
1141 }
1142}
1143
1144static void __clear_buddies_next(struct sched_entity *se)
1145{
1146 for_each_sched_entity(se) {
1147 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1148 if (cfs_rq->next == se)
1149 cfs_rq->next = NULL;
1150 else
1151 break;
1152 }
1153}
1154
1155static void __clear_buddies_skip(struct sched_entity *se)
1156{
1157 for_each_sched_entity(se) {
1158 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1159 if (cfs_rq->skip == se)
1160 cfs_rq->skip = NULL;
1161 else
1162 break;
1163 }
1164}
1165
1166static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1167{
1168 if (cfs_rq->last == se)
1169 __clear_buddies_last(se);
1170
1171 if (cfs_rq->next == se)
1172 __clear_buddies_next(se);
1173
1174 if (cfs_rq->skip == se)
1175 __clear_buddies_skip(se);
1176}
1177
1178static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1179
1180static void
1181dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
1182{
1183 /*
1184 * Update run-time statistics of the 'current'.
1185 */
1186 update_curr(cfs_rq);
1187
1188 update_stats_dequeue(cfs_rq, se);
1189 if (flags & DEQUEUE_SLEEP) {
1190#ifdef CONFIG_SCHEDSTATS
1191 if (entity_is_task(se)) {
1192 struct task_struct *tsk = task_of(se);
1193
1194 if (tsk->state & TASK_INTERRUPTIBLE)
1195 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
1196 if (tsk->state & TASK_UNINTERRUPTIBLE)
1197 se->statistics.block_start = rq_of(cfs_rq)->clock;
1198 }
1199#endif
1200 }
1201
1202 clear_buddies(cfs_rq, se);
1203
1204 if (se != cfs_rq->curr)
1205 __dequeue_entity(cfs_rq, se);
1206 se->on_rq = 0;
1207 update_cfs_load(cfs_rq, 0);
1208 account_entity_dequeue(cfs_rq, se);
1209
1210 /*
1211 * Normalize the entity after updating the min_vruntime because the
1212 * update can refer to the ->curr item and we need to reflect this
1213 * movement in our normalized position.
1214 */
1215 if (!(flags & DEQUEUE_SLEEP))
1216 se->vruntime -= cfs_rq->min_vruntime;
1217
1218 /* return excess runtime on last dequeue */
1219 return_cfs_rq_runtime(cfs_rq);
1220
1221 update_min_vruntime(cfs_rq);
1222 update_cfs_shares(cfs_rq);
1223}
1224
1225/*
1226 * Preempt the current task with a newly woken task if needed:
1227 */
1228static void
1229check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
1230{
1231 unsigned long ideal_runtime, delta_exec;
1232 struct sched_entity *se;
1233 s64 delta;
1234
1235 ideal_runtime = sched_slice(cfs_rq, curr);
1236 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1237 if (delta_exec > ideal_runtime) {
1238 resched_task(rq_of(cfs_rq)->curr);
1239 /*
1240 * The current task ran long enough, ensure it doesn't get
1241 * re-elected due to buddy favours.
1242 */
1243 clear_buddies(cfs_rq, curr);
1244 return;
1245 }
1246
1247 /*
1248 * Ensure that a task that missed wakeup preemption by a
1249 * narrow margin doesn't have to wait for a full slice.
1250 * This also mitigates buddy induced latencies under load.
1251 */
1252 if (delta_exec < sysctl_sched_min_granularity)
1253 return;
1254
1255 se = __pick_first_entity(cfs_rq);
1256 delta = curr->vruntime - se->vruntime;
1257
1258 if (delta < 0)
1259 return;
1260
1261 if (delta > ideal_runtime)
1262 resched_task(rq_of(cfs_rq)->curr);
1263}
1264
1265static void
1266set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
1267{
1268 /* 'current' is not kept within the tree. */
1269 if (se->on_rq) {
1270 /*
1271 * Any task has to be enqueued before it get to execute on
1272 * a CPU. So account for the time it spent waiting on the
1273 * runqueue.
1274 */
1275 update_stats_wait_end(cfs_rq, se);
1276 __dequeue_entity(cfs_rq, se);
1277 }
1278
1279 update_stats_curr_start(cfs_rq, se);
1280 cfs_rq->curr = se;
1281#ifdef CONFIG_SCHEDSTATS
1282 /*
1283 * Track our maximum slice length, if the CPU's load is at
1284 * least twice that of our own weight (i.e. dont track it
1285 * when there are only lesser-weight tasks around):
1286 */
1287 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
1288 se->statistics.slice_max = max(se->statistics.slice_max,
1289 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1290 }
1291#endif
1292 se->prev_sum_exec_runtime = se->sum_exec_runtime;
1293}
1294
1295static int
1296wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1297
1298/*
1299 * Pick the next process, keeping these things in mind, in this order:
1300 * 1) keep things fair between processes/task groups
1301 * 2) pick the "next" process, since someone really wants that to run
1302 * 3) pick the "last" process, for cache locality
1303 * 4) do not run the "skip" process, if something else is available
1304 */
1305static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
1306{
1307 struct sched_entity *se = __pick_first_entity(cfs_rq);
1308 struct sched_entity *left = se;
1309
1310 /*
1311 * Avoid running the skip buddy, if running something else can
1312 * be done without getting too unfair.
1313 */
1314 if (cfs_rq->skip == se) {
1315 struct sched_entity *second = __pick_next_entity(se);
1316 if (second && wakeup_preempt_entity(second, left) < 1)
1317 se = second;
1318 }
1319
1320 /*
1321 * Prefer last buddy, try to return the CPU to a preempted task.
1322 */
1323 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1324 se = cfs_rq->last;
1325
1326 /*
1327 * Someone really wants this to run. If it's not unfair, run it.
1328 */
1329 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1330 se = cfs_rq->next;
1331
1332 clear_buddies(cfs_rq, se);
1333
1334 return se;
1335}
1336
1337static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq);
1338
1339static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
1340{
1341 /*
1342 * If still on the runqueue then deactivate_task()
1343 * was not called and update_curr() has to be done:
1344 */
1345 if (prev->on_rq)
1346 update_curr(cfs_rq);
1347
1348 /* throttle cfs_rqs exceeding runtime */
1349 check_cfs_rq_runtime(cfs_rq);
1350
1351 check_spread(cfs_rq, prev);
1352 if (prev->on_rq) {
1353 update_stats_wait_start(cfs_rq, prev);
1354 /* Put 'current' back into the tree. */
1355 __enqueue_entity(cfs_rq, prev);
1356 }
1357 cfs_rq->curr = NULL;
1358}
1359
1360static void
1361entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1362{
1363 /*
1364 * Update run-time statistics of the 'current'.
1365 */
1366 update_curr(cfs_rq);
1367
1368 /*
1369 * Update share accounting for long-running entities.
1370 */
1371 update_entity_shares_tick(cfs_rq);
1372
1373#ifdef CONFIG_SCHED_HRTICK
1374 /*
1375 * queued ticks are scheduled to match the slice, so don't bother
1376 * validating it and just reschedule.
1377 */
1378 if (queued) {
1379 resched_task(rq_of(cfs_rq)->curr);
1380 return;
1381 }
1382 /*
1383 * don't let the period tick interfere with the hrtick preemption
1384 */
1385 if (!sched_feat(DOUBLE_TICK) &&
1386 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1387 return;
1388#endif
1389
1390 if (cfs_rq->nr_running > 1)
1391 check_preempt_tick(cfs_rq, curr);
1392}
1393
1394
1395/**************************************************
1396 * CFS bandwidth control machinery
1397 */
1398
1399#ifdef CONFIG_CFS_BANDWIDTH
1400
1401#ifdef HAVE_JUMP_LABEL
1402static struct jump_label_key __cfs_bandwidth_used;
1403
1404static inline bool cfs_bandwidth_used(void)
1405{
1406 return static_branch(&__cfs_bandwidth_used);
1407}
1408
1409void account_cfs_bandwidth_used(int enabled, int was_enabled)
1410{
1411 /* only need to count groups transitioning between enabled/!enabled */
1412 if (enabled && !was_enabled)
1413 jump_label_inc(&__cfs_bandwidth_used);
1414 else if (!enabled && was_enabled)
1415 jump_label_dec(&__cfs_bandwidth_used);
1416}
1417#else /* HAVE_JUMP_LABEL */
1418static bool cfs_bandwidth_used(void)
1419{
1420 return true;
1421}
1422
1423void account_cfs_bandwidth_used(int enabled, int was_enabled) {}
1424#endif /* HAVE_JUMP_LABEL */
1425
1426/*
1427 * default period for cfs group bandwidth.
1428 * default: 0.1s, units: nanoseconds
1429 */
1430static inline u64 default_cfs_period(void)
1431{
1432 return 100000000ULL;
1433}
1434
1435static inline u64 sched_cfs_bandwidth_slice(void)
1436{
1437 return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC;
1438}
1439
1440/*
1441 * Replenish runtime according to assigned quota and update expiration time.
1442 * We use sched_clock_cpu directly instead of rq->clock to avoid adding
1443 * additional synchronization around rq->lock.
1444 *
1445 * requires cfs_b->lock
1446 */
1447void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
1448{
1449 u64 now;
1450
1451 if (cfs_b->quota == RUNTIME_INF)
1452 return;
1453
1454 now = sched_clock_cpu(smp_processor_id());
1455 cfs_b->runtime = cfs_b->quota;
1456 cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
1457}
1458
1459static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
1460{
1461 return &tg->cfs_bandwidth;
1462}
1463
1464/* returns 0 on failure to allocate runtime */
1465static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1466{
1467 struct task_group *tg = cfs_rq->tg;
1468 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
1469 u64 amount = 0, min_amount, expires;
1470
1471 /* note: this is a positive sum as runtime_remaining <= 0 */
1472 min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
1473
1474 raw_spin_lock(&cfs_b->lock);
1475 if (cfs_b->quota == RUNTIME_INF)
1476 amount = min_amount;
1477 else {
1478 /*
1479 * If the bandwidth pool has become inactive, then at least one
1480 * period must have elapsed since the last consumption.
1481 * Refresh the global state and ensure bandwidth timer becomes
1482 * active.
1483 */
1484 if (!cfs_b->timer_active) {
1485 __refill_cfs_bandwidth_runtime(cfs_b);
1486 __start_cfs_bandwidth(cfs_b);
1487 }
1488
1489 if (cfs_b->runtime > 0) {
1490 amount = min(cfs_b->runtime, min_amount);
1491 cfs_b->runtime -= amount;
1492 cfs_b->idle = 0;
1493 }
1494 }
1495 expires = cfs_b->runtime_expires;
1496 raw_spin_unlock(&cfs_b->lock);
1497
1498 cfs_rq->runtime_remaining += amount;
1499 /*
1500 * we may have advanced our local expiration to account for allowed
1501 * spread between our sched_clock and the one on which runtime was
1502 * issued.
1503 */
1504 if ((s64)(expires - cfs_rq->runtime_expires) > 0)
1505 cfs_rq->runtime_expires = expires;
1506
1507 return cfs_rq->runtime_remaining > 0;
1508}
1509
1510/*
1511 * Note: This depends on the synchronization provided by sched_clock and the
1512 * fact that rq->clock snapshots this value.
1513 */
1514static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1515{
1516 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1517 struct rq *rq = rq_of(cfs_rq);
1518
1519 /* if the deadline is ahead of our clock, nothing to do */
1520 if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0))
1521 return;
1522
1523 if (cfs_rq->runtime_remaining < 0)
1524 return;
1525
1526 /*
1527 * If the local deadline has passed we have to consider the
1528 * possibility that our sched_clock is 'fast' and the global deadline
1529 * has not truly expired.
1530 *
1531 * Fortunately we can check determine whether this the case by checking
1532 * whether the global deadline has advanced.
1533 */
1534
1535 if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) {
1536 /* extend local deadline, drift is bounded above by 2 ticks */
1537 cfs_rq->runtime_expires += TICK_NSEC;
1538 } else {
1539 /* global deadline is ahead, expiration has passed */
1540 cfs_rq->runtime_remaining = 0;
1541 }
1542}
1543
1544static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1545 unsigned long delta_exec)
1546{
1547 /* dock delta_exec before expiring quota (as it could span periods) */
1548 cfs_rq->runtime_remaining -= delta_exec;
1549 expire_cfs_rq_runtime(cfs_rq);
1550
1551 if (likely(cfs_rq->runtime_remaining > 0))
1552 return;
1553
1554 /*
1555 * if we're unable to extend our runtime we resched so that the active
1556 * hierarchy can be throttled
1557 */
1558 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
1559 resched_task(rq_of(cfs_rq)->curr);
1560}
1561
1562static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
1563 unsigned long delta_exec)
1564{
1565 if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
1566 return;
1567
1568 __account_cfs_rq_runtime(cfs_rq, delta_exec);
1569}
1570
1571static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
1572{
1573 return cfs_bandwidth_used() && cfs_rq->throttled;
1574}
1575
1576/* check whether cfs_rq, or any parent, is throttled */
1577static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
1578{
1579 return cfs_bandwidth_used() && cfs_rq->throttle_count;
1580}
1581
1582/*
1583 * Ensure that neither of the group entities corresponding to src_cpu or
1584 * dest_cpu are members of a throttled hierarchy when performing group
1585 * load-balance operations.
1586 */
1587static inline int throttled_lb_pair(struct task_group *tg,
1588 int src_cpu, int dest_cpu)
1589{
1590 struct cfs_rq *src_cfs_rq, *dest_cfs_rq;
1591
1592 src_cfs_rq = tg->cfs_rq[src_cpu];
1593 dest_cfs_rq = tg->cfs_rq[dest_cpu];
1594
1595 return throttled_hierarchy(src_cfs_rq) ||
1596 throttled_hierarchy(dest_cfs_rq);
1597}
1598
1599/* updated child weight may affect parent so we have to do this bottom up */
1600static int tg_unthrottle_up(struct task_group *tg, void *data)
1601{
1602 struct rq *rq = data;
1603 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1604
1605 cfs_rq->throttle_count--;
1606#ifdef CONFIG_SMP
1607 if (!cfs_rq->throttle_count) {
1608 u64 delta = rq->clock_task - cfs_rq->load_stamp;
1609
1610 /* leaving throttled state, advance shares averaging windows */
1611 cfs_rq->load_stamp += delta;
1612 cfs_rq->load_last += delta;
1613
1614 /* update entity weight now that we are on_rq again */
1615 update_cfs_shares(cfs_rq);
1616 }
1617#endif
1618
1619 return 0;
1620}
1621
1622static int tg_throttle_down(struct task_group *tg, void *data)
1623{
1624 struct rq *rq = data;
1625 struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)];
1626
1627 /* group is entering throttled state, record last load */
1628 if (!cfs_rq->throttle_count)
1629 update_cfs_load(cfs_rq, 0);
1630 cfs_rq->throttle_count++;
1631
1632 return 0;
1633}
1634
1635static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
1636{
1637 struct rq *rq = rq_of(cfs_rq);
1638 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1639 struct sched_entity *se;
1640 long task_delta, dequeue = 1;
1641
1642 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1643
1644 /* account load preceding throttle */
1645 rcu_read_lock();
1646 walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq);
1647 rcu_read_unlock();
1648
1649 task_delta = cfs_rq->h_nr_running;
1650 for_each_sched_entity(se) {
1651 struct cfs_rq *qcfs_rq = cfs_rq_of(se);
1652 /* throttled entity or throttle-on-deactivate */
1653 if (!se->on_rq)
1654 break;
1655
1656 if (dequeue)
1657 dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
1658 qcfs_rq->h_nr_running -= task_delta;
1659
1660 if (qcfs_rq->load.weight)
1661 dequeue = 0;
1662 }
1663
1664 if (!se)
1665 rq->nr_running -= task_delta;
1666
1667 cfs_rq->throttled = 1;
1668 cfs_rq->throttled_timestamp = rq->clock;
1669 raw_spin_lock(&cfs_b->lock);
1670 list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
1671 raw_spin_unlock(&cfs_b->lock);
1672}
1673
1674void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
1675{
1676 struct rq *rq = rq_of(cfs_rq);
1677 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1678 struct sched_entity *se;
1679 int enqueue = 1;
1680 long task_delta;
1681
1682 se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
1683
1684 cfs_rq->throttled = 0;
1685 raw_spin_lock(&cfs_b->lock);
1686 cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp;
1687 list_del_rcu(&cfs_rq->throttled_list);
1688 raw_spin_unlock(&cfs_b->lock);
1689 cfs_rq->throttled_timestamp = 0;
1690
1691 update_rq_clock(rq);
1692 /* update hierarchical throttle state */
1693 walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq);
1694
1695 if (!cfs_rq->load.weight)
1696 return;
1697
1698 task_delta = cfs_rq->h_nr_running;
1699 for_each_sched_entity(se) {
1700 if (se->on_rq)
1701 enqueue = 0;
1702
1703 cfs_rq = cfs_rq_of(se);
1704 if (enqueue)
1705 enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
1706 cfs_rq->h_nr_running += task_delta;
1707
1708 if (cfs_rq_throttled(cfs_rq))
1709 break;
1710 }
1711
1712 if (!se)
1713 rq->nr_running += task_delta;
1714
1715 /* determine whether we need to wake up potentially idle cpu */
1716 if (rq->curr == rq->idle && rq->cfs.nr_running)
1717 resched_task(rq->curr);
1718}
1719
1720static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
1721 u64 remaining, u64 expires)
1722{
1723 struct cfs_rq *cfs_rq;
1724 u64 runtime = remaining;
1725
1726 rcu_read_lock();
1727 list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
1728 throttled_list) {
1729 struct rq *rq = rq_of(cfs_rq);
1730
1731 raw_spin_lock(&rq->lock);
1732 if (!cfs_rq_throttled(cfs_rq))
1733 goto next;
1734
1735 runtime = -cfs_rq->runtime_remaining + 1;
1736 if (runtime > remaining)
1737 runtime = remaining;
1738 remaining -= runtime;
1739
1740 cfs_rq->runtime_remaining += runtime;
1741 cfs_rq->runtime_expires = expires;
1742
1743 /* we check whether we're throttled above */
1744 if (cfs_rq->runtime_remaining > 0)
1745 unthrottle_cfs_rq(cfs_rq);
1746
1747next:
1748 raw_spin_unlock(&rq->lock);
1749
1750 if (!remaining)
1751 break;
1752 }
1753 rcu_read_unlock();
1754
1755 return remaining;
1756}
1757
1758/*
1759 * Responsible for refilling a task_group's bandwidth and unthrottling its
1760 * cfs_rqs as appropriate. If there has been no activity within the last
1761 * period the timer is deactivated until scheduling resumes; cfs_b->idle is
1762 * used to track this state.
1763 */
1764static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
1765{
1766 u64 runtime, runtime_expires;
1767 int idle = 1, throttled;
1768
1769 raw_spin_lock(&cfs_b->lock);
1770 /* no need to continue the timer with no bandwidth constraint */
1771 if (cfs_b->quota == RUNTIME_INF)
1772 goto out_unlock;
1773
1774 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1775 /* idle depends on !throttled (for the case of a large deficit) */
1776 idle = cfs_b->idle && !throttled;
1777 cfs_b->nr_periods += overrun;
1778
1779 /* if we're going inactive then everything else can be deferred */
1780 if (idle)
1781 goto out_unlock;
1782
1783 __refill_cfs_bandwidth_runtime(cfs_b);
1784
1785 if (!throttled) {
1786 /* mark as potentially idle for the upcoming period */
1787 cfs_b->idle = 1;
1788 goto out_unlock;
1789 }
1790
1791 /* account preceding periods in which throttling occurred */
1792 cfs_b->nr_throttled += overrun;
1793
1794 /*
1795 * There are throttled entities so we must first use the new bandwidth
1796 * to unthrottle them before making it generally available. This
1797 * ensures that all existing debts will be paid before a new cfs_rq is
1798 * allowed to run.
1799 */
1800 runtime = cfs_b->runtime;
1801 runtime_expires = cfs_b->runtime_expires;
1802 cfs_b->runtime = 0;
1803
1804 /*
1805 * This check is repeated as we are holding onto the new bandwidth
1806 * while we unthrottle. This can potentially race with an unthrottled
1807 * group trying to acquire new bandwidth from the global pool.
1808 */
1809 while (throttled && runtime > 0) {
1810 raw_spin_unlock(&cfs_b->lock);
1811 /* we can't nest cfs_b->lock while distributing bandwidth */
1812 runtime = distribute_cfs_runtime(cfs_b, runtime,
1813 runtime_expires);
1814 raw_spin_lock(&cfs_b->lock);
1815
1816 throttled = !list_empty(&cfs_b->throttled_cfs_rq);
1817 }
1818
1819 /* return (any) remaining runtime */
1820 cfs_b->runtime = runtime;
1821 /*
1822 * While we are ensured activity in the period following an
1823 * unthrottle, this also covers the case in which the new bandwidth is
1824 * insufficient to cover the existing bandwidth deficit. (Forcing the
1825 * timer to remain active while there are any throttled entities.)
1826 */
1827 cfs_b->idle = 0;
1828out_unlock:
1829 if (idle)
1830 cfs_b->timer_active = 0;
1831 raw_spin_unlock(&cfs_b->lock);
1832
1833 return idle;
1834}
1835
1836/* a cfs_rq won't donate quota below this amount */
1837static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC;
1838/* minimum remaining period time to redistribute slack quota */
1839static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC;
1840/* how long we wait to gather additional slack before distributing */
1841static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
1842
1843/* are we near the end of the current quota period? */
1844static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
1845{
1846 struct hrtimer *refresh_timer = &cfs_b->period_timer;
1847 u64 remaining;
1848
1849 /* if the call-back is running a quota refresh is already occurring */
1850 if (hrtimer_callback_running(refresh_timer))
1851 return 1;
1852
1853 /* is a quota refresh about to occur? */
1854 remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer));
1855 if (remaining < min_expire)
1856 return 1;
1857
1858 return 0;
1859}
1860
1861static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
1862{
1863 u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration;
1864
1865 /* if there's a quota refresh soon don't bother with slack */
1866 if (runtime_refresh_within(cfs_b, min_left))
1867 return;
1868
1869 start_bandwidth_timer(&cfs_b->slack_timer,
1870 ns_to_ktime(cfs_bandwidth_slack_period));
1871}
1872
1873/* we know any runtime found here is valid as update_curr() precedes return */
1874static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1875{
1876 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
1877 s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime;
1878
1879 if (slack_runtime <= 0)
1880 return;
1881
1882 raw_spin_lock(&cfs_b->lock);
1883 if (cfs_b->quota != RUNTIME_INF &&
1884 cfs_rq->runtime_expires == cfs_b->runtime_expires) {
1885 cfs_b->runtime += slack_runtime;
1886
1887 /* we are under rq->lock, defer unthrottling using a timer */
1888 if (cfs_b->runtime > sched_cfs_bandwidth_slice() &&
1889 !list_empty(&cfs_b->throttled_cfs_rq))
1890 start_cfs_slack_bandwidth(cfs_b);
1891 }
1892 raw_spin_unlock(&cfs_b->lock);
1893
1894 /* even if it's not valid for return we don't want to try again */
1895 cfs_rq->runtime_remaining -= slack_runtime;
1896}
1897
1898static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1899{
1900 if (!cfs_bandwidth_used())
1901 return;
1902
1903 if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
1904 return;
1905
1906 __return_cfs_rq_runtime(cfs_rq);
1907}
1908
1909/*
1910 * This is done with a timer (instead of inline with bandwidth return) since
1911 * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs.
1912 */
1913static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
1914{
1915 u64 runtime = 0, slice = sched_cfs_bandwidth_slice();
1916 u64 expires;
1917
1918 /* confirm we're still not at a refresh boundary */
1919 if (runtime_refresh_within(cfs_b, min_bandwidth_expiration))
1920 return;
1921
1922 raw_spin_lock(&cfs_b->lock);
1923 if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
1924 runtime = cfs_b->runtime;
1925 cfs_b->runtime = 0;
1926 }
1927 expires = cfs_b->runtime_expires;
1928 raw_spin_unlock(&cfs_b->lock);
1929
1930 if (!runtime)
1931 return;
1932
1933 runtime = distribute_cfs_runtime(cfs_b, runtime, expires);
1934
1935 raw_spin_lock(&cfs_b->lock);
1936 if (expires == cfs_b->runtime_expires)
1937 cfs_b->runtime = runtime;
1938 raw_spin_unlock(&cfs_b->lock);
1939}
1940
1941/*
1942 * When a group wakes up we want to make sure that its quota is not already
1943 * expired/exceeded, otherwise it may be allowed to steal additional ticks of
1944 * runtime as update_curr() throttling can not not trigger until it's on-rq.
1945 */
1946static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
1947{
1948 if (!cfs_bandwidth_used())
1949 return;
1950
1951 /* an active group must be handled by the update_curr()->put() path */
1952 if (!cfs_rq->runtime_enabled || cfs_rq->curr)
1953 return;
1954
1955 /* ensure the group is not already throttled */
1956 if (cfs_rq_throttled(cfs_rq))
1957 return;
1958
1959 /* update runtime allocation */
1960 account_cfs_rq_runtime(cfs_rq, 0);
1961 if (cfs_rq->runtime_remaining <= 0)
1962 throttle_cfs_rq(cfs_rq);
1963}
1964
1965/* conditionally throttle active cfs_rq's from put_prev_entity() */
1966static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq)
1967{
1968 if (!cfs_bandwidth_used())
1969 return;
1970
1971 if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0))
1972 return;
1973
1974 /*
1975 * it's possible for a throttled entity to be forced into a running
1976 * state (e.g. set_curr_task), in this case we're finished.
1977 */
1978 if (cfs_rq_throttled(cfs_rq))
1979 return;
1980
1981 throttle_cfs_rq(cfs_rq);
1982}
1983
1984static inline u64 default_cfs_period(void);
1985static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun);
1986static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b);
1987
1988static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
1989{
1990 struct cfs_bandwidth *cfs_b =
1991 container_of(timer, struct cfs_bandwidth, slack_timer);
1992 do_sched_cfs_slack_timer(cfs_b);
1993
1994 return HRTIMER_NORESTART;
1995}
1996
1997static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
1998{
1999 struct cfs_bandwidth *cfs_b =
2000 container_of(timer, struct cfs_bandwidth, period_timer);
2001 ktime_t now;
2002 int overrun;
2003 int idle = 0;
2004
2005 for (;;) {
2006 now = hrtimer_cb_get_time(timer);
2007 overrun = hrtimer_forward(timer, now, cfs_b->period);
2008
2009 if (!overrun)
2010 break;
2011
2012 idle = do_sched_cfs_period_timer(cfs_b, overrun);
2013 }
2014
2015 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
2016}
2017
2018void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2019{
2020 raw_spin_lock_init(&cfs_b->lock);
2021 cfs_b->runtime = 0;
2022 cfs_b->quota = RUNTIME_INF;
2023 cfs_b->period = ns_to_ktime(default_cfs_period());
2024
2025 INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
2026 hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2027 cfs_b->period_timer.function = sched_cfs_period_timer;
2028 hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2029 cfs_b->slack_timer.function = sched_cfs_slack_timer;
2030}
2031
2032static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
2033{
2034 cfs_rq->runtime_enabled = 0;
2035 INIT_LIST_HEAD(&cfs_rq->throttled_list);
2036}
2037
2038/* requires cfs_b->lock, may release to reprogram timer */
2039void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2040{
2041 /*
2042 * The timer may be active because we're trying to set a new bandwidth
2043 * period or because we're racing with the tear-down path
2044 * (timer_active==0 becomes visible before the hrtimer call-back
2045 * terminates). In either case we ensure that it's re-programmed
2046 */
2047 while (unlikely(hrtimer_active(&cfs_b->period_timer))) {
2048 raw_spin_unlock(&cfs_b->lock);
2049 /* ensure cfs_b->lock is available while we wait */
2050 hrtimer_cancel(&cfs_b->period_timer);
2051
2052 raw_spin_lock(&cfs_b->lock);
2053 /* if someone else restarted the timer then we're done */
2054 if (cfs_b->timer_active)
2055 return;
2056 }
2057
2058 cfs_b->timer_active = 1;
2059 start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
2060}
2061
2062static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2063{
2064 hrtimer_cancel(&cfs_b->period_timer);
2065 hrtimer_cancel(&cfs_b->slack_timer);
2066}
2067
2068void unthrottle_offline_cfs_rqs(struct rq *rq)
2069{
2070 struct cfs_rq *cfs_rq;
2071
2072 for_each_leaf_cfs_rq(rq, cfs_rq) {
2073 struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
2074
2075 if (!cfs_rq->runtime_enabled)
2076 continue;
2077
2078 /*
2079 * clock_task is not advancing so we just need to make sure
2080 * there's some valid quota amount
2081 */
2082 cfs_rq->runtime_remaining = cfs_b->quota;
2083 if (cfs_rq_throttled(cfs_rq))
2084 unthrottle_cfs_rq(cfs_rq);
2085 }
2086}
2087
2088#else /* CONFIG_CFS_BANDWIDTH */
2089static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
2090 unsigned long delta_exec) {}
2091static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2092static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
2093static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2094
2095static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq)
2096{
2097 return 0;
2098}
2099
2100static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
2101{
2102 return 0;
2103}
2104
2105static inline int throttled_lb_pair(struct task_group *tg,
2106 int src_cpu, int dest_cpu)
2107{
2108 return 0;
2109}
2110
2111void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2112
2113#ifdef CONFIG_FAIR_GROUP_SCHED
2114static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
2115#endif
2116
2117static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2118{
2119 return NULL;
2120}
2121static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2122void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2123
2124#endif /* CONFIG_CFS_BANDWIDTH */
2125
2126/**************************************************
2127 * CFS operations on tasks:
2128 */
2129
2130#ifdef CONFIG_SCHED_HRTICK
2131static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
2132{
2133 struct sched_entity *se = &p->se;
2134 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2135
2136 WARN_ON(task_rq(p) != rq);
2137
2138 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
2139 u64 slice = sched_slice(cfs_rq, se);
2140 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
2141 s64 delta = slice - ran;
2142
2143 if (delta < 0) {
2144 if (rq->curr == p)
2145 resched_task(p);
2146 return;
2147 }
2148
2149 /*
2150 * Don't schedule slices shorter than 10000ns, that just
2151 * doesn't make sense. Rely on vruntime for fairness.
2152 */
2153 if (rq->curr != p)
2154 delta = max_t(s64, 10000LL, delta);
2155
2156 hrtick_start(rq, delta);
2157 }
2158}
2159
2160/*
2161 * called from enqueue/dequeue and updates the hrtick when the
2162 * current task is from our class and nr_running is low enough
2163 * to matter.
2164 */
2165static void hrtick_update(struct rq *rq)
2166{
2167 struct task_struct *curr = rq->curr;
2168
2169 if (curr->sched_class != &fair_sched_class)
2170 return;
2171
2172 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
2173 hrtick_start_fair(rq, curr);
2174}
2175#else /* !CONFIG_SCHED_HRTICK */
2176static inline void
2177hrtick_start_fair(struct rq *rq, struct task_struct *p)
2178{
2179}
2180
2181static inline void hrtick_update(struct rq *rq)
2182{
2183}
2184#endif
2185
2186/*
2187 * The enqueue_task method is called before nr_running is
2188 * increased. Here we update the fair scheduling stats and
2189 * then put the task into the rbtree:
2190 */
2191static void
2192enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2193{
2194 struct cfs_rq *cfs_rq;
2195 struct sched_entity *se = &p->se;
2196
2197 for_each_sched_entity(se) {
2198 if (se->on_rq)
2199 break;
2200 cfs_rq = cfs_rq_of(se);
2201 enqueue_entity(cfs_rq, se, flags);
2202
2203 /*
2204 * end evaluation on encountering a throttled cfs_rq
2205 *
2206 * note: in the case of encountering a throttled cfs_rq we will
2207 * post the final h_nr_running increment below.
2208 */
2209 if (cfs_rq_throttled(cfs_rq))
2210 break;
2211 cfs_rq->h_nr_running++;
2212
2213 flags = ENQUEUE_WAKEUP;
2214 }
2215
2216 for_each_sched_entity(se) {
2217 cfs_rq = cfs_rq_of(se);
2218 cfs_rq->h_nr_running++;
2219
2220 if (cfs_rq_throttled(cfs_rq))
2221 break;
2222
2223 update_cfs_load(cfs_rq, 0);
2224 update_cfs_shares(cfs_rq);
2225 }
2226
2227 if (!se)
2228 inc_nr_running(rq);
2229 hrtick_update(rq);
2230}
2231
2232static void set_next_buddy(struct sched_entity *se);
2233
2234/*
2235 * The dequeue_task method is called before nr_running is
2236 * decreased. We remove the task from the rbtree and
2237 * update the fair scheduling stats:
2238 */
2239static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
2240{
2241 struct cfs_rq *cfs_rq;
2242 struct sched_entity *se = &p->se;
2243 int task_sleep = flags & DEQUEUE_SLEEP;
2244
2245 for_each_sched_entity(se) {
2246 cfs_rq = cfs_rq_of(se);
2247 dequeue_entity(cfs_rq, se, flags);
2248
2249 /*
2250 * end evaluation on encountering a throttled cfs_rq
2251 *
2252 * note: in the case of encountering a throttled cfs_rq we will
2253 * post the final h_nr_running decrement below.
2254 */
2255 if (cfs_rq_throttled(cfs_rq))
2256 break;
2257 cfs_rq->h_nr_running--;
2258
2259 /* Don't dequeue parent if it has other entities besides us */
2260 if (cfs_rq->load.weight) {
2261 /*
2262 * Bias pick_next to pick a task from this cfs_rq, as
2263 * p is sleeping when it is within its sched_slice.
2264 */
2265 if (task_sleep && parent_entity(se))
2266 set_next_buddy(parent_entity(se));
2267
2268 /* avoid re-evaluating load for this entity */
2269 se = parent_entity(se);
2270 break;
2271 }
2272 flags |= DEQUEUE_SLEEP;
2273 }
2274
2275 for_each_sched_entity(se) {
2276 cfs_rq = cfs_rq_of(se);
2277 cfs_rq->h_nr_running--;
2278
2279 if (cfs_rq_throttled(cfs_rq))
2280 break;
2281
2282 update_cfs_load(cfs_rq, 0);
2283 update_cfs_shares(cfs_rq);
2284 }
2285
2286 if (!se)
2287 dec_nr_running(rq);
2288 hrtick_update(rq);
2289}
2290
2291#ifdef CONFIG_SMP
2292/* Used instead of source_load when we know the type == 0 */
2293static unsigned long weighted_cpuload(const int cpu)
2294{
2295 return cpu_rq(cpu)->load.weight;
2296}
2297
2298/*
2299 * Return a low guess at the load of a migration-source cpu weighted
2300 * according to the scheduling class and "nice" value.
2301 *
2302 * We want to under-estimate the load of migration sources, to
2303 * balance conservatively.
2304 */
2305static unsigned long source_load(int cpu, int type)
2306{
2307 struct rq *rq = cpu_rq(cpu);
2308 unsigned long total = weighted_cpuload(cpu);
2309
2310 if (type == 0 || !sched_feat(LB_BIAS))
2311 return total;
2312
2313 return min(rq->cpu_load[type-1], total);
2314}
2315
2316/*
2317 * Return a high guess at the load of a migration-target cpu weighted
2318 * according to the scheduling class and "nice" value.
2319 */
2320static unsigned long target_load(int cpu, int type)
2321{
2322 struct rq *rq = cpu_rq(cpu);
2323 unsigned long total = weighted_cpuload(cpu);
2324
2325 if (type == 0 || !sched_feat(LB_BIAS))
2326 return total;
2327
2328 return max(rq->cpu_load[type-1], total);
2329}
2330
2331static unsigned long power_of(int cpu)
2332{
2333 return cpu_rq(cpu)->cpu_power;
2334}
2335
2336static unsigned long cpu_avg_load_per_task(int cpu)
2337{
2338 struct rq *rq = cpu_rq(cpu);
2339 unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
2340
2341 if (nr_running)
2342 return rq->load.weight / nr_running;
2343
2344 return 0;
2345}
2346
2347
2348static void task_waking_fair(struct task_struct *p)
2349{
2350 struct sched_entity *se = &p->se;
2351 struct cfs_rq *cfs_rq = cfs_rq_of(se);
2352 u64 min_vruntime;
2353
2354#ifndef CONFIG_64BIT
2355 u64 min_vruntime_copy;
2356
2357 do {
2358 min_vruntime_copy = cfs_rq->min_vruntime_copy;
2359 smp_rmb();
2360 min_vruntime = cfs_rq->min_vruntime;
2361 } while (min_vruntime != min_vruntime_copy);
2362#else
2363 min_vruntime = cfs_rq->min_vruntime;
2364#endif
2365
2366 se->vruntime -= min_vruntime;
2367}
2368
2369#ifdef CONFIG_FAIR_GROUP_SCHED
2370/*
2371 * effective_load() calculates the load change as seen from the root_task_group
2372 *
2373 * Adding load to a group doesn't make a group heavier, but can cause movement
2374 * of group shares between cpus. Assuming the shares were perfectly aligned one
2375 * can calculate the shift in shares.
2376 *
2377 * Calculate the effective load difference if @wl is added (subtracted) to @tg
2378 * on this @cpu and results in a total addition (subtraction) of @wg to the
2379 * total group weight.
2380 *
2381 * Given a runqueue weight distribution (rw_i) we can compute a shares
2382 * distribution (s_i) using:
2383 *
2384 * s_i = rw_i / \Sum rw_j (1)
2385 *
2386 * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
2387 * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
2388 * shares distribution (s_i):
2389 *
2390 * rw_i = { 2, 4, 1, 0 }
2391 * s_i = { 2/7, 4/7, 1/7, 0 }
2392 *
2393 * As per wake_affine() we're interested in the load of two CPUs (the CPU the
2394 * task used to run on and the CPU the waker is running on), we need to
2395 * compute the effect of waking a task on either CPU and, in case of a sync
2396 * wakeup, compute the effect of the current task going to sleep.
2397 *
2398 * So for a change of @wl to the local @cpu with an overall group weight change
2399 * of @wl we can compute the new shares distribution (s'_i) using:
2400 *
2401 * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
2402 *
2403 * Suppose we're interested in CPUs 0 and 1, and want to compute the load
2404 * differences in waking a task to CPU 0. The additional task changes the
2405 * weight and shares distributions like:
2406 *
2407 * rw'_i = { 3, 4, 1, 0 }
2408 * s'_i = { 3/8, 4/8, 1/8, 0 }
2409 *
2410 * We can then compute the difference in effective weight by using:
2411 *
2412 * dw_i = S * (s'_i - s_i) (3)
2413 *
2414 * Where 'S' is the group weight as seen by its parent.
2415 *
2416 * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
2417 * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
2418 * 4/7) times the weight of the group.
2419 */
2420static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
2421{
2422 struct sched_entity *se = tg->se[cpu];
2423
2424 if (!tg->parent) /* the trivial, non-cgroup case */
2425 return wl;
2426
2427 for_each_sched_entity(se) {
2428 long w, W;
2429
2430 tg = se->my_q->tg;
2431
2432 /*
2433 * W = @wg + \Sum rw_j
2434 */
2435 W = wg + calc_tg_weight(tg, se->my_q);
2436
2437 /*
2438 * w = rw_i + @wl
2439 */
2440 w = se->my_q->load.weight + wl;
2441
2442 /*
2443 * wl = S * s'_i; see (2)
2444 */
2445 if (W > 0 && w < W)
2446 wl = (w * tg->shares) / W;
2447 else
2448 wl = tg->shares;
2449
2450 /*
2451 * Per the above, wl is the new se->load.weight value; since
2452 * those are clipped to [MIN_SHARES, ...) do so now. See
2453 * calc_cfs_shares().
2454 */
2455 if (wl < MIN_SHARES)
2456 wl = MIN_SHARES;
2457
2458 /*
2459 * wl = dw_i = S * (s'_i - s_i); see (3)
2460 */
2461 wl -= se->load.weight;
2462
2463 /*
2464 * Recursively apply this logic to all parent groups to compute
2465 * the final effective load change on the root group. Since
2466 * only the @tg group gets extra weight, all parent groups can
2467 * only redistribute existing shares. @wl is the shift in shares
2468 * resulting from this level per the above.
2469 */
2470 wg = 0;
2471 }
2472
2473 return wl;
2474}
2475#else
2476
2477static inline unsigned long effective_load(struct task_group *tg, int cpu,
2478 unsigned long wl, unsigned long wg)
2479{
2480 return wl;
2481}
2482
2483#endif
2484
2485static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
2486{
2487 s64 this_load, load;
2488 int idx, this_cpu, prev_cpu;
2489 unsigned long tl_per_task;
2490 struct task_group *tg;
2491 unsigned long weight;
2492 int balanced;
2493
2494 idx = sd->wake_idx;
2495 this_cpu = smp_processor_id();
2496 prev_cpu = task_cpu(p);
2497 load = source_load(prev_cpu, idx);
2498 this_load = target_load(this_cpu, idx);
2499
2500 /*
2501 * If sync wakeup then subtract the (maximum possible)
2502 * effect of the currently running task from the load
2503 * of the current CPU:
2504 */
2505 if (sync) {
2506 tg = task_group(current);
2507 weight = current->se.load.weight;
2508
2509 this_load += effective_load(tg, this_cpu, -weight, -weight);
2510 load += effective_load(tg, prev_cpu, 0, -weight);
2511 }
2512
2513 tg = task_group(p);
2514 weight = p->se.load.weight;
2515
2516 /*
2517 * In low-load situations, where prev_cpu is idle and this_cpu is idle
2518 * due to the sync cause above having dropped this_load to 0, we'll
2519 * always have an imbalance, but there's really nothing you can do
2520 * about that, so that's good too.
2521 *
2522 * Otherwise check if either cpus are near enough in load to allow this
2523 * task to be woken on this_cpu.
2524 */
2525 if (this_load > 0) {
2526 s64 this_eff_load, prev_eff_load;
2527
2528 this_eff_load = 100;
2529 this_eff_load *= power_of(prev_cpu);
2530 this_eff_load *= this_load +
2531 effective_load(tg, this_cpu, weight, weight);
2532
2533 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
2534 prev_eff_load *= power_of(this_cpu);
2535 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
2536
2537 balanced = this_eff_load <= prev_eff_load;
2538 } else
2539 balanced = true;
2540
2541 /*
2542 * If the currently running task will sleep within
2543 * a reasonable amount of time then attract this newly
2544 * woken task:
2545 */
2546 if (sync && balanced)
2547 return 1;
2548
2549 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
2550 tl_per_task = cpu_avg_load_per_task(this_cpu);
2551
2552 if (balanced ||
2553 (this_load <= load &&
2554 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
2555 /*
2556 * This domain has SD_WAKE_AFFINE and
2557 * p is cache cold in this domain, and
2558 * there is no bad imbalance.
2559 */
2560 schedstat_inc(sd, ttwu_move_affine);
2561 schedstat_inc(p, se.statistics.nr_wakeups_affine);
2562
2563 return 1;
2564 }
2565 return 0;
2566}
2567
2568/*
2569 * find_idlest_group finds and returns the least busy CPU group within the
2570 * domain.
2571 */
2572static struct sched_group *
2573find_idlest_group(struct sched_domain *sd, struct task_struct *p,
2574 int this_cpu, int load_idx)
2575{
2576 struct sched_group *idlest = NULL, *group = sd->groups;
2577 unsigned long min_load = ULONG_MAX, this_load = 0;
2578 int imbalance = 100 + (sd->imbalance_pct-100)/2;
2579
2580 do {
2581 unsigned long load, avg_load;
2582 int local_group;
2583 int i;
2584
2585 /* Skip over this group if it has no CPUs allowed */
2586 if (!cpumask_intersects(sched_group_cpus(group),
2587 tsk_cpus_allowed(p)))
2588 continue;
2589
2590 local_group = cpumask_test_cpu(this_cpu,
2591 sched_group_cpus(group));
2592
2593 /* Tally up the load of all CPUs in the group */
2594 avg_load = 0;
2595
2596 for_each_cpu(i, sched_group_cpus(group)) {
2597 /* Bias balancing toward cpus of our domain */
2598 if (local_group)
2599 load = source_load(i, load_idx);
2600 else
2601 load = target_load(i, load_idx);
2602
2603 avg_load += load;
2604 }
2605
2606 /* Adjust by relative CPU power of the group */
2607 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
2608
2609 if (local_group) {
2610 this_load = avg_load;
2611 } else if (avg_load < min_load) {
2612 min_load = avg_load;
2613 idlest = group;
2614 }
2615 } while (group = group->next, group != sd->groups);
2616
2617 if (!idlest || 100*this_load < imbalance*min_load)
2618 return NULL;
2619 return idlest;
2620}
2621
2622/*
2623 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2624 */
2625static int
2626find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2627{
2628 unsigned long load, min_load = ULONG_MAX;
2629 int idlest = -1;
2630 int i;
2631
2632 /* Traverse only the allowed CPUs */
2633 for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
2634 load = weighted_cpuload(i);
2635
2636 if (load < min_load || (load == min_load && i == this_cpu)) {
2637 min_load = load;
2638 idlest = i;
2639 }
2640 }
2641
2642 return idlest;
2643}
2644
2645/*
2646 * Try and locate an idle CPU in the sched_domain.
2647 */
2648static int select_idle_sibling(struct task_struct *p, int target)
2649{
2650 int cpu = smp_processor_id();
2651 int prev_cpu = task_cpu(p);
2652 struct sched_domain *sd;
2653 struct sched_group *sg;
2654 int i, smt = 0;
2655
2656 /*
2657 * If the task is going to be woken-up on this cpu and if it is
2658 * already idle, then it is the right target.
2659 */
2660 if (target == cpu && idle_cpu(cpu))
2661 return cpu;
2662
2663 /*
2664 * If the task is going to be woken-up on the cpu where it previously
2665 * ran and if it is currently idle, then it the right target.
2666 */
2667 if (target == prev_cpu && idle_cpu(prev_cpu))
2668 return prev_cpu;
2669
2670 /*
2671 * Otherwise, iterate the domains and find an elegible idle cpu.
2672 */
2673 rcu_read_lock();
2674again:
2675 for_each_domain(target, sd) {
2676 if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
2677 continue;
2678
2679 if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
2680 if (!smt) {
2681 smt = 1;
2682 goto again;
2683 }
2684 break;
2685 }
2686
2687 sg = sd->groups;
2688 do {
2689 if (!cpumask_intersects(sched_group_cpus(sg),
2690 tsk_cpus_allowed(p)))
2691 goto next;
2692
2693 for_each_cpu(i, sched_group_cpus(sg)) {
2694 if (!idle_cpu(i))
2695 goto next;
2696 }
2697
2698 target = cpumask_first_and(sched_group_cpus(sg),
2699 tsk_cpus_allowed(p));
2700 goto done;
2701next:
2702 sg = sg->next;
2703 } while (sg != sd->groups);
2704 }
2705done:
2706 rcu_read_unlock();
2707
2708 return target;
2709}
2710
2711/*
2712 * sched_balance_self: balance the current task (running on cpu) in domains
2713 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
2714 * SD_BALANCE_EXEC.
2715 *
2716 * Balance, ie. select the least loaded group.
2717 *
2718 * Returns the target CPU number, or the same CPU if no balancing is needed.
2719 *
2720 * preempt must be disabled.
2721 */
2722static int
2723select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
2724{
2725 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
2726 int cpu = smp_processor_id();
2727 int prev_cpu = task_cpu(p);
2728 int new_cpu = cpu;
2729 int want_affine = 0;
2730 int want_sd = 1;
2731 int sync = wake_flags & WF_SYNC;
2732
2733 if (sd_flag & SD_BALANCE_WAKE) {
2734 if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
2735 want_affine = 1;
2736 new_cpu = prev_cpu;
2737 }
2738
2739 rcu_read_lock();
2740 for_each_domain(cpu, tmp) {
2741 if (!(tmp->flags & SD_LOAD_BALANCE))
2742 continue;
2743
2744 /*
2745 * If power savings logic is enabled for a domain, see if we
2746 * are not overloaded, if so, don't balance wider.
2747 */
2748 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
2749 unsigned long power = 0;
2750 unsigned long nr_running = 0;
2751 unsigned long capacity;
2752 int i;
2753
2754 for_each_cpu(i, sched_domain_span(tmp)) {
2755 power += power_of(i);
2756 nr_running += cpu_rq(i)->cfs.nr_running;
2757 }
2758
2759 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
2760
2761 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
2762 nr_running /= 2;
2763
2764 if (nr_running < capacity)
2765 want_sd = 0;
2766 }
2767
2768 /*
2769 * If both cpu and prev_cpu are part of this domain,
2770 * cpu is a valid SD_WAKE_AFFINE target.
2771 */
2772 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
2773 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
2774 affine_sd = tmp;
2775 want_affine = 0;
2776 }
2777
2778 if (!want_sd && !want_affine)
2779 break;
2780
2781 if (!(tmp->flags & sd_flag))
2782 continue;
2783
2784 if (want_sd)
2785 sd = tmp;
2786 }
2787
2788 if (affine_sd) {
2789 if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
2790 prev_cpu = cpu;
2791
2792 new_cpu = select_idle_sibling(p, prev_cpu);
2793 goto unlock;
2794 }
2795
2796 while (sd) {
2797 int load_idx = sd->forkexec_idx;
2798 struct sched_group *group;
2799 int weight;
2800
2801 if (!(sd->flags & sd_flag)) {
2802 sd = sd->child;
2803 continue;
2804 }
2805
2806 if (sd_flag & SD_BALANCE_WAKE)
2807 load_idx = sd->wake_idx;
2808
2809 group = find_idlest_group(sd, p, cpu, load_idx);
2810 if (!group) {
2811 sd = sd->child;
2812 continue;
2813 }
2814
2815 new_cpu = find_idlest_cpu(group, p, cpu);
2816 if (new_cpu == -1 || new_cpu == cpu) {
2817 /* Now try balancing at a lower domain level of cpu */
2818 sd = sd->child;
2819 continue;
2820 }
2821
2822 /* Now try balancing at a lower domain level of new_cpu */
2823 cpu = new_cpu;
2824 weight = sd->span_weight;
2825 sd = NULL;
2826 for_each_domain(cpu, tmp) {
2827 if (weight <= tmp->span_weight)
2828 break;
2829 if (tmp->flags & sd_flag)
2830 sd = tmp;
2831 }
2832 /* while loop will break here if sd == NULL */
2833 }
2834unlock:
2835 rcu_read_unlock();
2836
2837 return new_cpu;
2838}
2839#endif /* CONFIG_SMP */
2840
2841static unsigned long
2842wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
2843{
2844 unsigned long gran = sysctl_sched_wakeup_granularity;
2845
2846 /*
2847 * Since its curr running now, convert the gran from real-time
2848 * to virtual-time in his units.
2849 *
2850 * By using 'se' instead of 'curr' we penalize light tasks, so
2851 * they get preempted easier. That is, if 'se' < 'curr' then
2852 * the resulting gran will be larger, therefore penalizing the
2853 * lighter, if otoh 'se' > 'curr' then the resulting gran will
2854 * be smaller, again penalizing the lighter task.
2855 *
2856 * This is especially important for buddies when the leftmost
2857 * task is higher priority than the buddy.
2858 */
2859 return calc_delta_fair(gran, se);
2860}
2861
2862/*
2863 * Should 'se' preempt 'curr'.
2864 *
2865 * |s1
2866 * |s2
2867 * |s3
2868 * g
2869 * |<--->|c
2870 *
2871 * w(c, s1) = -1
2872 * w(c, s2) = 0
2873 * w(c, s3) = 1
2874 *
2875 */
2876static int
2877wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
2878{
2879 s64 gran, vdiff = curr->vruntime - se->vruntime;
2880
2881 if (vdiff <= 0)
2882 return -1;
2883
2884 gran = wakeup_gran(curr, se);
2885 if (vdiff > gran)
2886 return 1;
2887
2888 return 0;
2889}
2890
2891static void set_last_buddy(struct sched_entity *se)
2892{
2893 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
2894 return;
2895
2896 for_each_sched_entity(se)
2897 cfs_rq_of(se)->last = se;
2898}
2899
2900static void set_next_buddy(struct sched_entity *se)
2901{
2902 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
2903 return;
2904
2905 for_each_sched_entity(se)
2906 cfs_rq_of(se)->next = se;
2907}
2908
2909static void set_skip_buddy(struct sched_entity *se)
2910{
2911 for_each_sched_entity(se)
2912 cfs_rq_of(se)->skip = se;
2913}
2914
2915/*
2916 * Preempt the current task with a newly woken task if needed:
2917 */
2918static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
2919{
2920 struct task_struct *curr = rq->curr;
2921 struct sched_entity *se = &curr->se, *pse = &p->se;
2922 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
2923 int scale = cfs_rq->nr_running >= sched_nr_latency;
2924 int next_buddy_marked = 0;
2925
2926 if (unlikely(se == pse))
2927 return;
2928
2929 /*
2930 * This is possible from callers such as pull_task(), in which we
2931 * unconditionally check_prempt_curr() after an enqueue (which may have
2932 * lead to a throttle). This both saves work and prevents false
2933 * next-buddy nomination below.
2934 */
2935 if (unlikely(throttled_hierarchy(cfs_rq_of(pse))))
2936 return;
2937
2938 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
2939 set_next_buddy(pse);
2940 next_buddy_marked = 1;
2941 }
2942
2943 /*
2944 * We can come here with TIF_NEED_RESCHED already set from new task
2945 * wake up path.
2946 *
2947 * Note: this also catches the edge-case of curr being in a throttled
2948 * group (e.g. via set_curr_task), since update_curr() (in the
2949 * enqueue of curr) will have resulted in resched being set. This
2950 * prevents us from potentially nominating it as a false LAST_BUDDY
2951 * below.
2952 */
2953 if (test_tsk_need_resched(curr))
2954 return;
2955
2956 /* Idle tasks are by definition preempted by non-idle tasks. */
2957 if (unlikely(curr->policy == SCHED_IDLE) &&
2958 likely(p->policy != SCHED_IDLE))
2959 goto preempt;
2960
2961 /*
2962 * Batch and idle tasks do not preempt non-idle tasks (their preemption
2963 * is driven by the tick):
2964 */
2965 if (unlikely(p->policy != SCHED_NORMAL))
2966 return;
2967
2968 find_matching_se(&se, &pse);
2969 update_curr(cfs_rq_of(se));
2970 BUG_ON(!pse);
2971 if (wakeup_preempt_entity(se, pse) == 1) {
2972 /*
2973 * Bias pick_next to pick the sched entity that is
2974 * triggering this preemption.
2975 */
2976 if (!next_buddy_marked)
2977 set_next_buddy(pse);
2978 goto preempt;
2979 }
2980
2981 return;
2982
2983preempt:
2984 resched_task(curr);
2985 /*
2986 * Only set the backward buddy when the current task is still
2987 * on the rq. This can happen when a wakeup gets interleaved
2988 * with schedule on the ->pre_schedule() or idle_balance()
2989 * point, either of which can * drop the rq lock.
2990 *
2991 * Also, during early boot the idle thread is in the fair class,
2992 * for obvious reasons its a bad idea to schedule back to it.
2993 */
2994 if (unlikely(!se->on_rq || curr == rq->idle))
2995 return;
2996
2997 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
2998 set_last_buddy(se);
2999}
3000
3001static struct task_struct *pick_next_task_fair(struct rq *rq)
3002{
3003 struct task_struct *p;
3004 struct cfs_rq *cfs_rq = &rq->cfs;
3005 struct sched_entity *se;
3006
3007 if (!cfs_rq->nr_running)
3008 return NULL;
3009
3010 do {
3011 se = pick_next_entity(cfs_rq);
3012 set_next_entity(cfs_rq, se);
3013 cfs_rq = group_cfs_rq(se);
3014 } while (cfs_rq);
3015
3016 p = task_of(se);
3017 hrtick_start_fair(rq, p);
3018
3019 return p;
3020}
3021
3022/*
3023 * Account for a descheduled task:
3024 */
3025static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
3026{
3027 struct sched_entity *se = &prev->se;
3028 struct cfs_rq *cfs_rq;
3029
3030 for_each_sched_entity(se) {
3031 cfs_rq = cfs_rq_of(se);
3032 put_prev_entity(cfs_rq, se);
3033 }
3034}
3035
3036/*
3037 * sched_yield() is very simple
3038 *
3039 * The magic of dealing with the ->skip buddy is in pick_next_entity.
3040 */
3041static void yield_task_fair(struct rq *rq)
3042{
3043 struct task_struct *curr = rq->curr;
3044 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
3045 struct sched_entity *se = &curr->se;
3046
3047 /*
3048 * Are we the only task in the tree?
3049 */
3050 if (unlikely(rq->nr_running == 1))
3051 return;
3052
3053 clear_buddies(cfs_rq, se);
3054
3055 if (curr->policy != SCHED_BATCH) {
3056 update_rq_clock(rq);
3057 /*
3058 * Update run-time statistics of the 'current'.
3059 */
3060 update_curr(cfs_rq);
3061 }
3062
3063 set_skip_buddy(se);
3064}
3065
3066static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
3067{
3068 struct sched_entity *se = &p->se;
3069
3070 /* throttled hierarchies are not runnable */
3071 if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se)))
3072 return false;
3073
3074 /* Tell the scheduler that we'd really like pse to run next. */
3075 set_next_buddy(se);
3076
3077 yield_task_fair(rq);
3078
3079 return true;
3080}
3081
3082#ifdef CONFIG_SMP
3083/**************************************************
3084 * Fair scheduling class load-balancing methods:
3085 */
3086
3087/*
3088 * pull_task - move a task from a remote runqueue to the local runqueue.
3089 * Both runqueues must be locked.
3090 */
3091static void pull_task(struct rq *src_rq, struct task_struct *p,
3092 struct rq *this_rq, int this_cpu)
3093{
3094 deactivate_task(src_rq, p, 0);
3095 set_task_cpu(p, this_cpu);
3096 activate_task(this_rq, p, 0);
3097 check_preempt_curr(this_rq, p, 0);
3098}
3099
3100/*
3101 * Is this task likely cache-hot:
3102 */
3103static int
3104task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
3105{
3106 s64 delta;
3107
3108 if (p->sched_class != &fair_sched_class)
3109 return 0;
3110
3111 if (unlikely(p->policy == SCHED_IDLE))
3112 return 0;
3113
3114 /*
3115 * Buddy candidates are cache hot:
3116 */
3117 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
3118 (&p->se == cfs_rq_of(&p->se)->next ||
3119 &p->se == cfs_rq_of(&p->se)->last))
3120 return 1;
3121
3122 if (sysctl_sched_migration_cost == -1)
3123 return 1;
3124 if (sysctl_sched_migration_cost == 0)
3125 return 0;
3126
3127 delta = now - p->se.exec_start;
3128
3129 return delta < (s64)sysctl_sched_migration_cost;
3130}
3131
3132/*
3133 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
3134 */
3135static
3136int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
3137 struct sched_domain *sd, enum cpu_idle_type idle,
3138 int *all_pinned)
3139{
3140 int tsk_cache_hot = 0;
3141 /*
3142 * We do not migrate tasks that are:
3143 * 1) running (obviously), or
3144 * 2) cannot be migrated to this CPU due to cpus_allowed, or
3145 * 3) are cache-hot on their current CPU.
3146 */
3147 if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) {
3148 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
3149 return 0;
3150 }
3151 *all_pinned = 0;
3152
3153 if (task_running(rq, p)) {
3154 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
3155 return 0;
3156 }
3157
3158 /*
3159 * Aggressive migration if:
3160 * 1) task is cache cold, or
3161 * 2) too many balance attempts have failed.
3162 */
3163
3164 tsk_cache_hot = task_hot(p, rq->clock_task, sd);
3165 if (!tsk_cache_hot ||
3166 sd->nr_balance_failed > sd->cache_nice_tries) {
3167#ifdef CONFIG_SCHEDSTATS
3168 if (tsk_cache_hot) {
3169 schedstat_inc(sd, lb_hot_gained[idle]);
3170 schedstat_inc(p, se.statistics.nr_forced_migrations);
3171 }
3172#endif
3173 return 1;
3174 }
3175
3176 if (tsk_cache_hot) {
3177 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
3178 return 0;
3179 }
3180 return 1;
3181}
3182
3183/*
3184 * move_one_task tries to move exactly one task from busiest to this_rq, as
3185 * part of active balancing operations within "domain".
3186 * Returns 1 if successful and 0 otherwise.
3187 *
3188 * Called with both runqueues locked.
3189 */
3190static int
3191move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3192 struct sched_domain *sd, enum cpu_idle_type idle)
3193{
3194 struct task_struct *p, *n;
3195 struct cfs_rq *cfs_rq;
3196 int pinned = 0;
3197
3198 for_each_leaf_cfs_rq(busiest, cfs_rq) {
3199 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
3200 if (throttled_lb_pair(task_group(p),
3201 busiest->cpu, this_cpu))
3202 break;
3203
3204 if (!can_migrate_task(p, busiest, this_cpu,
3205 sd, idle, &pinned))
3206 continue;
3207
3208 pull_task(busiest, p, this_rq, this_cpu);
3209 /*
3210 * Right now, this is only the second place pull_task()
3211 * is called, so we can safely collect pull_task()
3212 * stats here rather than inside pull_task().
3213 */
3214 schedstat_inc(sd, lb_gained[idle]);
3215 return 1;
3216 }
3217 }
3218
3219 return 0;
3220}
3221
3222static unsigned long
3223balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3224 unsigned long max_load_move, struct sched_domain *sd,
3225 enum cpu_idle_type idle, int *all_pinned,
3226 struct cfs_rq *busiest_cfs_rq)
3227{
3228 int loops = 0, pulled = 0;
3229 long rem_load_move = max_load_move;
3230 struct task_struct *p, *n;
3231
3232 if (max_load_move == 0)
3233 goto out;
3234
3235 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
3236 if (loops++ > sysctl_sched_nr_migrate)
3237 break;
3238
3239 if ((p->se.load.weight >> 1) > rem_load_move ||
3240 !can_migrate_task(p, busiest, this_cpu, sd, idle,
3241 all_pinned))
3242 continue;
3243
3244 pull_task(busiest, p, this_rq, this_cpu);
3245 pulled++;
3246 rem_load_move -= p->se.load.weight;
3247
3248#ifdef CONFIG_PREEMPT
3249 /*
3250 * NEWIDLE balancing is a source of latency, so preemptible
3251 * kernels will stop after the first task is pulled to minimize
3252 * the critical section.
3253 */
3254 if (idle == CPU_NEWLY_IDLE)
3255 break;
3256#endif
3257
3258 /*
3259 * We only want to steal up to the prescribed amount of
3260 * weighted load.
3261 */
3262 if (rem_load_move <= 0)
3263 break;
3264 }
3265out:
3266 /*
3267 * Right now, this is one of only two places pull_task() is called,
3268 * so we can safely collect pull_task() stats here rather than
3269 * inside pull_task().
3270 */
3271 schedstat_add(sd, lb_gained[idle], pulled);
3272
3273 return max_load_move - rem_load_move;
3274}
3275
3276#ifdef CONFIG_FAIR_GROUP_SCHED
3277/*
3278 * update tg->load_weight by folding this cpu's load_avg
3279 */
3280static int update_shares_cpu(struct task_group *tg, int cpu)
3281{
3282 struct cfs_rq *cfs_rq;
3283 unsigned long flags;
3284 struct rq *rq;
3285
3286 if (!tg->se[cpu])
3287 return 0;
3288
3289 rq = cpu_rq(cpu);
3290 cfs_rq = tg->cfs_rq[cpu];
3291
3292 raw_spin_lock_irqsave(&rq->lock, flags);
3293
3294 update_rq_clock(rq);
3295 update_cfs_load(cfs_rq, 1);
3296
3297 /*
3298 * We need to update shares after updating tg->load_weight in
3299 * order to adjust the weight of groups with long running tasks.
3300 */
3301 update_cfs_shares(cfs_rq);
3302
3303 raw_spin_unlock_irqrestore(&rq->lock, flags);
3304
3305 return 0;
3306}
3307
3308static void update_shares(int cpu)
3309{
3310 struct cfs_rq *cfs_rq;
3311 struct rq *rq = cpu_rq(cpu);
3312
3313 rcu_read_lock();
3314 /*
3315 * Iterates the task_group tree in a bottom up fashion, see
3316 * list_add_leaf_cfs_rq() for details.
3317 */
3318 for_each_leaf_cfs_rq(rq, cfs_rq) {
3319 /* throttled entities do not contribute to load */
3320 if (throttled_hierarchy(cfs_rq))
3321 continue;
3322
3323 update_shares_cpu(cfs_rq->tg, cpu);
3324 }
3325 rcu_read_unlock();
3326}
3327
3328/*
3329 * Compute the cpu's hierarchical load factor for each task group.
3330 * This needs to be done in a top-down fashion because the load of a child
3331 * group is a fraction of its parents load.
3332 */
3333static int tg_load_down(struct task_group *tg, void *data)
3334{
3335 unsigned long load;
3336 long cpu = (long)data;
3337
3338 if (!tg->parent) {
3339 load = cpu_rq(cpu)->load.weight;
3340 } else {
3341 load = tg->parent->cfs_rq[cpu]->h_load;
3342 load *= tg->se[cpu]->load.weight;
3343 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
3344 }
3345
3346 tg->cfs_rq[cpu]->h_load = load;
3347
3348 return 0;
3349}
3350
3351static void update_h_load(long cpu)
3352{
3353 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
3354}
3355
3356static unsigned long
3357load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3358 unsigned long max_load_move,
3359 struct sched_domain *sd, enum cpu_idle_type idle,
3360 int *all_pinned)
3361{
3362 long rem_load_move = max_load_move;
3363 struct cfs_rq *busiest_cfs_rq;
3364
3365 rcu_read_lock();
3366 update_h_load(cpu_of(busiest));
3367
3368 for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
3369 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
3370 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
3371 u64 rem_load, moved_load;
3372
3373 /*
3374 * empty group or part of a throttled hierarchy
3375 */
3376 if (!busiest_cfs_rq->task_weight ||
3377 throttled_lb_pair(busiest_cfs_rq->tg, cpu_of(busiest), this_cpu))
3378 continue;
3379
3380 rem_load = (u64)rem_load_move * busiest_weight;
3381 rem_load = div_u64(rem_load, busiest_h_load + 1);
3382
3383 moved_load = balance_tasks(this_rq, this_cpu, busiest,
3384 rem_load, sd, idle, all_pinned,
3385 busiest_cfs_rq);
3386
3387 if (!moved_load)
3388 continue;
3389
3390 moved_load *= busiest_h_load;
3391 moved_load = div_u64(moved_load, busiest_weight + 1);
3392
3393 rem_load_move -= moved_load;
3394 if (rem_load_move < 0)
3395 break;
3396 }
3397 rcu_read_unlock();
3398
3399 return max_load_move - rem_load_move;
3400}
3401#else
3402static inline void update_shares(int cpu)
3403{
3404}
3405
3406static unsigned long
3407load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
3408 unsigned long max_load_move,
3409 struct sched_domain *sd, enum cpu_idle_type idle,
3410 int *all_pinned)
3411{
3412 return balance_tasks(this_rq, this_cpu, busiest,
3413 max_load_move, sd, idle, all_pinned,
3414 &busiest->cfs);
3415}
3416#endif
3417
3418/*
3419 * move_tasks tries to move up to max_load_move weighted load from busiest to
3420 * this_rq, as part of a balancing operation within domain "sd".
3421 * Returns 1 if successful and 0 otherwise.
3422 *
3423 * Called with both runqueues locked.
3424 */
3425static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3426 unsigned long max_load_move,
3427 struct sched_domain *sd, enum cpu_idle_type idle,
3428 int *all_pinned)
3429{
3430 unsigned long total_load_moved = 0, load_moved;
3431
3432 do {
3433 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
3434 max_load_move - total_load_moved,
3435 sd, idle, all_pinned);
3436
3437 total_load_moved += load_moved;
3438
3439#ifdef CONFIG_PREEMPT
3440 /*
3441 * NEWIDLE balancing is a source of latency, so preemptible
3442 * kernels will stop after the first task is pulled to minimize
3443 * the critical section.
3444 */
3445 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3446 break;
3447
3448 if (raw_spin_is_contended(&this_rq->lock) ||
3449 raw_spin_is_contended(&busiest->lock))
3450 break;
3451#endif
3452 } while (load_moved && max_load_move > total_load_moved);
3453
3454 return total_load_moved > 0;
3455}
3456
3457/********** Helpers for find_busiest_group ************************/
3458/*
3459 * sd_lb_stats - Structure to store the statistics of a sched_domain
3460 * during load balancing.
3461 */
3462struct sd_lb_stats {
3463 struct sched_group *busiest; /* Busiest group in this sd */
3464 struct sched_group *this; /* Local group in this sd */
3465 unsigned long total_load; /* Total load of all groups in sd */
3466 unsigned long total_pwr; /* Total power of all groups in sd */
3467 unsigned long avg_load; /* Average load across all groups in sd */
3468
3469 /** Statistics of this group */
3470 unsigned long this_load;
3471 unsigned long this_load_per_task;
3472 unsigned long this_nr_running;
3473 unsigned long this_has_capacity;
3474 unsigned int this_idle_cpus;
3475
3476 /* Statistics of the busiest group */
3477 unsigned int busiest_idle_cpus;
3478 unsigned long max_load;
3479 unsigned long busiest_load_per_task;
3480 unsigned long busiest_nr_running;
3481 unsigned long busiest_group_capacity;
3482 unsigned long busiest_has_capacity;
3483 unsigned int busiest_group_weight;
3484
3485 int group_imb; /* Is there imbalance in this sd */
3486#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3487 int power_savings_balance; /* Is powersave balance needed for this sd */
3488 struct sched_group *group_min; /* Least loaded group in sd */
3489 struct sched_group *group_leader; /* Group which relieves group_min */
3490 unsigned long min_load_per_task; /* load_per_task in group_min */
3491 unsigned long leader_nr_running; /* Nr running of group_leader */
3492 unsigned long min_nr_running; /* Nr running of group_min */
3493#endif
3494};
3495
3496/*
3497 * sg_lb_stats - stats of a sched_group required for load_balancing
3498 */
3499struct sg_lb_stats {
3500 unsigned long avg_load; /*Avg load across the CPUs of the group */
3501 unsigned long group_load; /* Total load over the CPUs of the group */
3502 unsigned long sum_nr_running; /* Nr tasks running in the group */
3503 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3504 unsigned long group_capacity;
3505 unsigned long idle_cpus;
3506 unsigned long group_weight;
3507 int group_imb; /* Is there an imbalance in the group ? */
3508 int group_has_capacity; /* Is there extra capacity in the group? */
3509};
3510
3511/**
3512 * get_sd_load_idx - Obtain the load index for a given sched domain.
3513 * @sd: The sched_domain whose load_idx is to be obtained.
3514 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3515 */
3516static inline int get_sd_load_idx(struct sched_domain *sd,
3517 enum cpu_idle_type idle)
3518{
3519 int load_idx;
3520
3521 switch (idle) {
3522 case CPU_NOT_IDLE:
3523 load_idx = sd->busy_idx;
3524 break;
3525
3526 case CPU_NEWLY_IDLE:
3527 load_idx = sd->newidle_idx;
3528 break;
3529 default:
3530 load_idx = sd->idle_idx;
3531 break;
3532 }
3533
3534 return load_idx;
3535}
3536
3537
3538#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3539/**
3540 * init_sd_power_savings_stats - Initialize power savings statistics for
3541 * the given sched_domain, during load balancing.
3542 *
3543 * @sd: Sched domain whose power-savings statistics are to be initialized.
3544 * @sds: Variable containing the statistics for sd.
3545 * @idle: Idle status of the CPU at which we're performing load-balancing.
3546 */
3547static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3548 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3549{
3550 /*
3551 * Busy processors will not participate in power savings
3552 * balance.
3553 */
3554 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3555 sds->power_savings_balance = 0;
3556 else {
3557 sds->power_savings_balance = 1;
3558 sds->min_nr_running = ULONG_MAX;
3559 sds->leader_nr_running = 0;
3560 }
3561}
3562
3563/**
3564 * update_sd_power_savings_stats - Update the power saving stats for a
3565 * sched_domain while performing load balancing.
3566 *
3567 * @group: sched_group belonging to the sched_domain under consideration.
3568 * @sds: Variable containing the statistics of the sched_domain
3569 * @local_group: Does group contain the CPU for which we're performing
3570 * load balancing ?
3571 * @sgs: Variable containing the statistics of the group.
3572 */
3573static inline void update_sd_power_savings_stats(struct sched_group *group,
3574 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3575{
3576
3577 if (!sds->power_savings_balance)
3578 return;
3579
3580 /*
3581 * If the local group is idle or completely loaded
3582 * no need to do power savings balance at this domain
3583 */
3584 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3585 !sds->this_nr_running))
3586 sds->power_savings_balance = 0;
3587
3588 /*
3589 * If a group is already running at full capacity or idle,
3590 * don't include that group in power savings calculations
3591 */
3592 if (!sds->power_savings_balance ||
3593 sgs->sum_nr_running >= sgs->group_capacity ||
3594 !sgs->sum_nr_running)
3595 return;
3596
3597 /*
3598 * Calculate the group which has the least non-idle load.
3599 * This is the group from where we need to pick up the load
3600 * for saving power
3601 */
3602 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3603 (sgs->sum_nr_running == sds->min_nr_running &&
3604 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3605 sds->group_min = group;
3606 sds->min_nr_running = sgs->sum_nr_running;
3607 sds->min_load_per_task = sgs->sum_weighted_load /
3608 sgs->sum_nr_running;
3609 }
3610
3611 /*
3612 * Calculate the group which is almost near its
3613 * capacity but still has some space to pick up some load
3614 * from other group and save more power
3615 */
3616 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
3617 return;
3618
3619 if (sgs->sum_nr_running > sds->leader_nr_running ||
3620 (sgs->sum_nr_running == sds->leader_nr_running &&
3621 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3622 sds->group_leader = group;
3623 sds->leader_nr_running = sgs->sum_nr_running;
3624 }
3625}
3626
3627/**
3628 * check_power_save_busiest_group - see if there is potential for some power-savings balance
3629 * @sds: Variable containing the statistics of the sched_domain
3630 * under consideration.
3631 * @this_cpu: Cpu at which we're currently performing load-balancing.
3632 * @imbalance: Variable to store the imbalance.
3633 *
3634 * Description:
3635 * Check if we have potential to perform some power-savings balance.
3636 * If yes, set the busiest group to be the least loaded group in the
3637 * sched_domain, so that it's CPUs can be put to idle.
3638 *
3639 * Returns 1 if there is potential to perform power-savings balance.
3640 * Else returns 0.
3641 */
3642static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3643 int this_cpu, unsigned long *imbalance)
3644{
3645 if (!sds->power_savings_balance)
3646 return 0;
3647
3648 if (sds->this != sds->group_leader ||
3649 sds->group_leader == sds->group_min)
3650 return 0;
3651
3652 *imbalance = sds->min_load_per_task;
3653 sds->busiest = sds->group_min;
3654
3655 return 1;
3656
3657}
3658#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3659static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3660 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3661{
3662 return;
3663}
3664
3665static inline void update_sd_power_savings_stats(struct sched_group *group,
3666 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3667{
3668 return;
3669}
3670
3671static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3672 int this_cpu, unsigned long *imbalance)
3673{
3674 return 0;
3675}
3676#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3677
3678
3679unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
3680{
3681 return SCHED_POWER_SCALE;
3682}
3683
3684unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
3685{
3686 return default_scale_freq_power(sd, cpu);
3687}
3688
3689unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
3690{
3691 unsigned long weight = sd->span_weight;
3692 unsigned long smt_gain = sd->smt_gain;
3693
3694 smt_gain /= weight;
3695
3696 return smt_gain;
3697}
3698
3699unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3700{
3701 return default_scale_smt_power(sd, cpu);
3702}
3703
3704unsigned long scale_rt_power(int cpu)
3705{
3706 struct rq *rq = cpu_rq(cpu);
3707 u64 total, available;
3708
3709 total = sched_avg_period() + (rq->clock - rq->age_stamp);
3710
3711 if (unlikely(total < rq->rt_avg)) {
3712 /* Ensures that power won't end up being negative */
3713 available = 0;
3714 } else {
3715 available = total - rq->rt_avg;
3716 }
3717
3718 if (unlikely((s64)total < SCHED_POWER_SCALE))
3719 total = SCHED_POWER_SCALE;
3720
3721 total >>= SCHED_POWER_SHIFT;
3722
3723 return div_u64(available, total);
3724}
3725
3726static void update_cpu_power(struct sched_domain *sd, int cpu)
3727{
3728 unsigned long weight = sd->span_weight;
3729 unsigned long power = SCHED_POWER_SCALE;
3730 struct sched_group *sdg = sd->groups;
3731
3732 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
3733 if (sched_feat(ARCH_POWER))
3734 power *= arch_scale_smt_power(sd, cpu);
3735 else
3736 power *= default_scale_smt_power(sd, cpu);
3737
3738 power >>= SCHED_POWER_SHIFT;
3739 }
3740
3741 sdg->sgp->power_orig = power;
3742
3743 if (sched_feat(ARCH_POWER))
3744 power *= arch_scale_freq_power(sd, cpu);
3745 else
3746 power *= default_scale_freq_power(sd, cpu);
3747
3748 power >>= SCHED_POWER_SHIFT;
3749
3750 power *= scale_rt_power(cpu);
3751 power >>= SCHED_POWER_SHIFT;
3752
3753 if (!power)
3754 power = 1;
3755
3756 cpu_rq(cpu)->cpu_power = power;
3757 sdg->sgp->power = power;
3758}
3759
3760void update_group_power(struct sched_domain *sd, int cpu)
3761{
3762 struct sched_domain *child = sd->child;
3763 struct sched_group *group, *sdg = sd->groups;
3764 unsigned long power;
3765
3766 if (!child) {
3767 update_cpu_power(sd, cpu);
3768 return;
3769 }
3770
3771 power = 0;
3772
3773 group = child->groups;
3774 do {
3775 power += group->sgp->power;
3776 group = group->next;
3777 } while (group != child->groups);
3778
3779 sdg->sgp->power = power;
3780}
3781
3782/*
3783 * Try and fix up capacity for tiny siblings, this is needed when
3784 * things like SD_ASYM_PACKING need f_b_g to select another sibling
3785 * which on its own isn't powerful enough.
3786 *
3787 * See update_sd_pick_busiest() and check_asym_packing().
3788 */
3789static inline int
3790fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3791{
3792 /*
3793 * Only siblings can have significantly less than SCHED_POWER_SCALE
3794 */
3795 if (!(sd->flags & SD_SHARE_CPUPOWER))
3796 return 0;
3797
3798 /*
3799 * If ~90% of the cpu_power is still there, we're good.
3800 */
3801 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
3802 return 1;
3803
3804 return 0;
3805}
3806
3807/**
3808 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3809 * @sd: The sched_domain whose statistics are to be updated.
3810 * @group: sched_group whose statistics are to be updated.
3811 * @this_cpu: Cpu for which load balance is currently performed.
3812 * @idle: Idle status of this_cpu
3813 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3814 * @local_group: Does group contain this_cpu.
3815 * @cpus: Set of cpus considered for load balancing.
3816 * @balance: Should we balance.
3817 * @sgs: variable to hold the statistics for this group.
3818 */
3819static inline void update_sg_lb_stats(struct sched_domain *sd,
3820 struct sched_group *group, int this_cpu,
3821 enum cpu_idle_type idle, int load_idx,
3822 int local_group, const struct cpumask *cpus,
3823 int *balance, struct sg_lb_stats *sgs)
3824{
3825 unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
3826 int i;
3827 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3828 unsigned long avg_load_per_task = 0;
3829
3830 if (local_group)
3831 balance_cpu = group_first_cpu(group);
3832
3833 /* Tally up the load of all CPUs in the group */
3834 max_cpu_load = 0;
3835 min_cpu_load = ~0UL;
3836 max_nr_running = 0;
3837
3838 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3839 struct rq *rq = cpu_rq(i);
3840
3841 /* Bias balancing toward cpus of our domain */
3842 if (local_group) {
3843 if (idle_cpu(i) && !first_idle_cpu) {
3844 first_idle_cpu = 1;
3845 balance_cpu = i;
3846 }
3847
3848 load = target_load(i, load_idx);
3849 } else {
3850 load = source_load(i, load_idx);
3851 if (load > max_cpu_load) {
3852 max_cpu_load = load;
3853 max_nr_running = rq->nr_running;
3854 }
3855 if (min_cpu_load > load)
3856 min_cpu_load = load;
3857 }
3858
3859 sgs->group_load += load;
3860 sgs->sum_nr_running += rq->nr_running;
3861 sgs->sum_weighted_load += weighted_cpuload(i);
3862 if (idle_cpu(i))
3863 sgs->idle_cpus++;
3864 }
3865
3866 /*
3867 * First idle cpu or the first cpu(busiest) in this sched group
3868 * is eligible for doing load balancing at this and above
3869 * domains. In the newly idle case, we will allow all the cpu's
3870 * to do the newly idle load balance.
3871 */
3872 if (idle != CPU_NEWLY_IDLE && local_group) {
3873 if (balance_cpu != this_cpu) {
3874 *balance = 0;
3875 return;
3876 }
3877 update_group_power(sd, this_cpu);
3878 }
3879
3880 /* Adjust by relative CPU power of the group */
3881 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
3882
3883 /*
3884 * Consider the group unbalanced when the imbalance is larger
3885 * than the average weight of a task.
3886 *
3887 * APZ: with cgroup the avg task weight can vary wildly and
3888 * might not be a suitable number - should we keep a
3889 * normalized nr_running number somewhere that negates
3890 * the hierarchy?
3891 */
3892 if (sgs->sum_nr_running)
3893 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
3894
3895 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
3896 sgs->group_imb = 1;
3897
3898 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
3899 SCHED_POWER_SCALE);
3900 if (!sgs->group_capacity)
3901 sgs->group_capacity = fix_small_capacity(sd, group);
3902 sgs->group_weight = group->group_weight;
3903
3904 if (sgs->group_capacity > sgs->sum_nr_running)
3905 sgs->group_has_capacity = 1;
3906}
3907
3908/**
3909 * update_sd_pick_busiest - return 1 on busiest group
3910 * @sd: sched_domain whose statistics are to be checked
3911 * @sds: sched_domain statistics
3912 * @sg: sched_group candidate to be checked for being the busiest
3913 * @sgs: sched_group statistics
3914 * @this_cpu: the current cpu
3915 *
3916 * Determine if @sg is a busier group than the previously selected
3917 * busiest group.
3918 */
3919static bool update_sd_pick_busiest(struct sched_domain *sd,
3920 struct sd_lb_stats *sds,
3921 struct sched_group *sg,
3922 struct sg_lb_stats *sgs,
3923 int this_cpu)
3924{
3925 if (sgs->avg_load <= sds->max_load)
3926 return false;
3927
3928 if (sgs->sum_nr_running > sgs->group_capacity)
3929 return true;
3930
3931 if (sgs->group_imb)
3932 return true;
3933
3934 /*
3935 * ASYM_PACKING needs to move all the work to the lowest
3936 * numbered CPUs in the group, therefore mark all groups
3937 * higher than ourself as busy.
3938 */
3939 if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
3940 this_cpu < group_first_cpu(sg)) {
3941 if (!sds->busiest)
3942 return true;
3943
3944 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
3945 return true;
3946 }
3947
3948 return false;
3949}
3950
3951/**
3952 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3953 * @sd: sched_domain whose statistics are to be updated.
3954 * @this_cpu: Cpu for which load balance is currently performed.
3955 * @idle: Idle status of this_cpu
3956 * @cpus: Set of cpus considered for load balancing.
3957 * @balance: Should we balance.
3958 * @sds: variable to hold the statistics for this sched_domain.
3959 */
3960static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3961 enum cpu_idle_type idle, const struct cpumask *cpus,
3962 int *balance, struct sd_lb_stats *sds)
3963{
3964 struct sched_domain *child = sd->child;
3965 struct sched_group *sg = sd->groups;
3966 struct sg_lb_stats sgs;
3967 int load_idx, prefer_sibling = 0;
3968
3969 if (child && child->flags & SD_PREFER_SIBLING)
3970 prefer_sibling = 1;
3971
3972 init_sd_power_savings_stats(sd, sds, idle);
3973 load_idx = get_sd_load_idx(sd, idle);
3974
3975 do {
3976 int local_group;
3977
3978 local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
3979 memset(&sgs, 0, sizeof(sgs));
3980 update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
3981 local_group, cpus, balance, &sgs);
3982
3983 if (local_group && !(*balance))
3984 return;
3985
3986 sds->total_load += sgs.group_load;
3987 sds->total_pwr += sg->sgp->power;
3988
3989 /*
3990 * In case the child domain prefers tasks go to siblings
3991 * first, lower the sg capacity to one so that we'll try
3992 * and move all the excess tasks away. We lower the capacity
3993 * of a group only if the local group has the capacity to fit
3994 * these excess tasks, i.e. nr_running < group_capacity. The
3995 * extra check prevents the case where you always pull from the
3996 * heaviest group when it is already under-utilized (possible
3997 * with a large weight task outweighs the tasks on the system).
3998 */
3999 if (prefer_sibling && !local_group && sds->this_has_capacity)
4000 sgs.group_capacity = min(sgs.group_capacity, 1UL);
4001
4002 if (local_group) {
4003 sds->this_load = sgs.avg_load;
4004 sds->this = sg;
4005 sds->this_nr_running = sgs.sum_nr_running;
4006 sds->this_load_per_task = sgs.sum_weighted_load;
4007 sds->this_has_capacity = sgs.group_has_capacity;
4008 sds->this_idle_cpus = sgs.idle_cpus;
4009 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
4010 sds->max_load = sgs.avg_load;
4011 sds->busiest = sg;
4012 sds->busiest_nr_running = sgs.sum_nr_running;
4013 sds->busiest_idle_cpus = sgs.idle_cpus;
4014 sds->busiest_group_capacity = sgs.group_capacity;
4015 sds->busiest_load_per_task = sgs.sum_weighted_load;
4016 sds->busiest_has_capacity = sgs.group_has_capacity;
4017 sds->busiest_group_weight = sgs.group_weight;
4018 sds->group_imb = sgs.group_imb;
4019 }
4020
4021 update_sd_power_savings_stats(sg, sds, local_group, &sgs);
4022 sg = sg->next;
4023 } while (sg != sd->groups);
4024}
4025
4026/**
4027 * check_asym_packing - Check to see if the group is packed into the
4028 * sched doman.
4029 *
4030 * This is primarily intended to used at the sibling level. Some
4031 * cores like POWER7 prefer to use lower numbered SMT threads. In the
4032 * case of POWER7, it can move to lower SMT modes only when higher
4033 * threads are idle. When in lower SMT modes, the threads will
4034 * perform better since they share less core resources. Hence when we
4035 * have idle threads, we want them to be the higher ones.
4036 *
4037 * This packing function is run on idle threads. It checks to see if
4038 * the busiest CPU in this domain (core in the P7 case) has a higher
4039 * CPU number than the packing function is being run on. Here we are
4040 * assuming lower CPU number will be equivalent to lower a SMT thread
4041 * number.
4042 *
4043 * Returns 1 when packing is required and a task should be moved to
4044 * this CPU. The amount of the imbalance is returned in *imbalance.
4045 *
4046 * @sd: The sched_domain whose packing is to be checked.
4047 * @sds: Statistics of the sched_domain which is to be packed
4048 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4049 * @imbalance: returns amount of imbalanced due to packing.
4050 */
4051static int check_asym_packing(struct sched_domain *sd,
4052 struct sd_lb_stats *sds,
4053 int this_cpu, unsigned long *imbalance)
4054{
4055 int busiest_cpu;
4056
4057 if (!(sd->flags & SD_ASYM_PACKING))
4058 return 0;
4059
4060 if (!sds->busiest)
4061 return 0;
4062
4063 busiest_cpu = group_first_cpu(sds->busiest);
4064 if (this_cpu > busiest_cpu)
4065 return 0;
4066
4067 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
4068 SCHED_POWER_SCALE);
4069 return 1;
4070}
4071
4072/**
4073 * fix_small_imbalance - Calculate the minor imbalance that exists
4074 * amongst the groups of a sched_domain, during
4075 * load balancing.
4076 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
4077 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
4078 * @imbalance: Variable to store the imbalance.
4079 */
4080static inline void fix_small_imbalance(struct sd_lb_stats *sds,
4081 int this_cpu, unsigned long *imbalance)
4082{
4083 unsigned long tmp, pwr_now = 0, pwr_move = 0;
4084 unsigned int imbn = 2;
4085 unsigned long scaled_busy_load_per_task;
4086
4087 if (sds->this_nr_running) {
4088 sds->this_load_per_task /= sds->this_nr_running;
4089 if (sds->busiest_load_per_task >
4090 sds->this_load_per_task)
4091 imbn = 1;
4092 } else
4093 sds->this_load_per_task =
4094 cpu_avg_load_per_task(this_cpu);
4095
4096 scaled_busy_load_per_task = sds->busiest_load_per_task
4097 * SCHED_POWER_SCALE;
4098 scaled_busy_load_per_task /= sds->busiest->sgp->power;
4099
4100 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
4101 (scaled_busy_load_per_task * imbn)) {
4102 *imbalance = sds->busiest_load_per_task;
4103 return;
4104 }
4105
4106 /*
4107 * OK, we don't have enough imbalance to justify moving tasks,
4108 * however we may be able to increase total CPU power used by
4109 * moving them.
4110 */
4111
4112 pwr_now += sds->busiest->sgp->power *
4113 min(sds->busiest_load_per_task, sds->max_load);
4114 pwr_now += sds->this->sgp->power *
4115 min(sds->this_load_per_task, sds->this_load);
4116 pwr_now /= SCHED_POWER_SCALE;
4117
4118 /* Amount of load we'd subtract */
4119 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4120 sds->busiest->sgp->power;
4121 if (sds->max_load > tmp)
4122 pwr_move += sds->busiest->sgp->power *
4123 min(sds->busiest_load_per_task, sds->max_load - tmp);
4124
4125 /* Amount of load we'd add */
4126 if (sds->max_load * sds->busiest->sgp->power <
4127 sds->busiest_load_per_task * SCHED_POWER_SCALE)
4128 tmp = (sds->max_load * sds->busiest->sgp->power) /
4129 sds->this->sgp->power;
4130 else
4131 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
4132 sds->this->sgp->power;
4133 pwr_move += sds->this->sgp->power *
4134 min(sds->this_load_per_task, sds->this_load + tmp);
4135 pwr_move /= SCHED_POWER_SCALE;
4136
4137 /* Move if we gain throughput */
4138 if (pwr_move > pwr_now)
4139 *imbalance = sds->busiest_load_per_task;
4140}
4141
4142/**
4143 * calculate_imbalance - Calculate the amount of imbalance present within the
4144 * groups of a given sched_domain during load balance.
4145 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
4146 * @this_cpu: Cpu for which currently load balance is being performed.
4147 * @imbalance: The variable to store the imbalance.
4148 */
4149static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
4150 unsigned long *imbalance)
4151{
4152 unsigned long max_pull, load_above_capacity = ~0UL;
4153
4154 sds->busiest_load_per_task /= sds->busiest_nr_running;
4155 if (sds->group_imb) {
4156 sds->busiest_load_per_task =
4157 min(sds->busiest_load_per_task, sds->avg_load);
4158 }
4159
4160 /*
4161 * In the presence of smp nice balancing, certain scenarios can have
4162 * max load less than avg load(as we skip the groups at or below
4163 * its cpu_power, while calculating max_load..)
4164 */
4165 if (sds->max_load < sds->avg_load) {
4166 *imbalance = 0;
4167 return fix_small_imbalance(sds, this_cpu, imbalance);
4168 }
4169
4170 if (!sds->group_imb) {
4171 /*
4172 * Don't want to pull so many tasks that a group would go idle.
4173 */
4174 load_above_capacity = (sds->busiest_nr_running -
4175 sds->busiest_group_capacity);
4176
4177 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
4178
4179 load_above_capacity /= sds->busiest->sgp->power;
4180 }
4181
4182 /*
4183 * We're trying to get all the cpus to the average_load, so we don't
4184 * want to push ourselves above the average load, nor do we wish to
4185 * reduce the max loaded cpu below the average load. At the same time,
4186 * we also don't want to reduce the group load below the group capacity
4187 * (so that we can implement power-savings policies etc). Thus we look
4188 * for the minimum possible imbalance.
4189 * Be careful of negative numbers as they'll appear as very large values
4190 * with unsigned longs.
4191 */
4192 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
4193
4194 /* How much load to actually move to equalise the imbalance */
4195 *imbalance = min(max_pull * sds->busiest->sgp->power,
4196 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
4197 / SCHED_POWER_SCALE;
4198
4199 /*
4200 * if *imbalance is less than the average load per runnable task
4201 * there is no guarantee that any tasks will be moved so we'll have
4202 * a think about bumping its value to force at least one task to be
4203 * moved
4204 */
4205 if (*imbalance < sds->busiest_load_per_task)
4206 return fix_small_imbalance(sds, this_cpu, imbalance);
4207
4208}
4209
4210/******* find_busiest_group() helpers end here *********************/
4211
4212/**
4213 * find_busiest_group - Returns the busiest group within the sched_domain
4214 * if there is an imbalance. If there isn't an imbalance, and
4215 * the user has opted for power-savings, it returns a group whose
4216 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
4217 * such a group exists.
4218 *
4219 * Also calculates the amount of weighted load which should be moved
4220 * to restore balance.
4221 *
4222 * @sd: The sched_domain whose busiest group is to be returned.
4223 * @this_cpu: The cpu for which load balancing is currently being performed.
4224 * @imbalance: Variable which stores amount of weighted load which should
4225 * be moved to restore balance/put a group to idle.
4226 * @idle: The idle status of this_cpu.
4227 * @cpus: The set of CPUs under consideration for load-balancing.
4228 * @balance: Pointer to a variable indicating if this_cpu
4229 * is the appropriate cpu to perform load balancing at this_level.
4230 *
4231 * Returns: - the busiest group if imbalance exists.
4232 * - If no imbalance and user has opted for power-savings balance,
4233 * return the least loaded group whose CPUs can be
4234 * put to idle by rebalancing its tasks onto our group.
4235 */
4236static struct sched_group *
4237find_busiest_group(struct sched_domain *sd, int this_cpu,
4238 unsigned long *imbalance, enum cpu_idle_type idle,
4239 const struct cpumask *cpus, int *balance)
4240{
4241 struct sd_lb_stats sds;
4242
4243 memset(&sds, 0, sizeof(sds));
4244
4245 /*
4246 * Compute the various statistics relavent for load balancing at
4247 * this level.
4248 */
4249 update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
4250
4251 /*
4252 * this_cpu is not the appropriate cpu to perform load balancing at
4253 * this level.
4254 */
4255 if (!(*balance))
4256 goto ret;
4257
4258 if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
4259 check_asym_packing(sd, &sds, this_cpu, imbalance))
4260 return sds.busiest;
4261
4262 /* There is no busy sibling group to pull tasks from */
4263 if (!sds.busiest || sds.busiest_nr_running == 0)
4264 goto out_balanced;
4265
4266 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
4267
4268 /*
4269 * If the busiest group is imbalanced the below checks don't
4270 * work because they assumes all things are equal, which typically
4271 * isn't true due to cpus_allowed constraints and the like.
4272 */
4273 if (sds.group_imb)
4274 goto force_balance;
4275
4276 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
4277 if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
4278 !sds.busiest_has_capacity)
4279 goto force_balance;
4280
4281 /*
4282 * If the local group is more busy than the selected busiest group
4283 * don't try and pull any tasks.
4284 */
4285 if (sds.this_load >= sds.max_load)
4286 goto out_balanced;
4287
4288 /*
4289 * Don't pull any tasks if this group is already above the domain
4290 * average load.
4291 */
4292 if (sds.this_load >= sds.avg_load)
4293 goto out_balanced;
4294
4295 if (idle == CPU_IDLE) {
4296 /*
4297 * This cpu is idle. If the busiest group load doesn't
4298 * have more tasks than the number of available cpu's and
4299 * there is no imbalance between this and busiest group
4300 * wrt to idle cpu's, it is balanced.
4301 */
4302 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
4303 sds.busiest_nr_running <= sds.busiest_group_weight)
4304 goto out_balanced;
4305 } else {
4306 /*
4307 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
4308 * imbalance_pct to be conservative.
4309 */
4310 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
4311 goto out_balanced;
4312 }
4313
4314force_balance:
4315 /* Looks like there is an imbalance. Compute it */
4316 calculate_imbalance(&sds, this_cpu, imbalance);
4317 return sds.busiest;
4318
4319out_balanced:
4320 /*
4321 * There is no obvious imbalance. But check if we can do some balancing
4322 * to save power.
4323 */
4324 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
4325 return sds.busiest;
4326ret:
4327 *imbalance = 0;
4328 return NULL;
4329}
4330
4331/*
4332 * find_busiest_queue - find the busiest runqueue among the cpus in group.
4333 */
4334static struct rq *
4335find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
4336 enum cpu_idle_type idle, unsigned long imbalance,
4337 const struct cpumask *cpus)
4338{
4339 struct rq *busiest = NULL, *rq;
4340 unsigned long max_load = 0;
4341 int i;
4342
4343 for_each_cpu(i, sched_group_cpus(group)) {
4344 unsigned long power = power_of(i);
4345 unsigned long capacity = DIV_ROUND_CLOSEST(power,
4346 SCHED_POWER_SCALE);
4347 unsigned long wl;
4348
4349 if (!capacity)
4350 capacity = fix_small_capacity(sd, group);
4351
4352 if (!cpumask_test_cpu(i, cpus))
4353 continue;
4354
4355 rq = cpu_rq(i);
4356 wl = weighted_cpuload(i);
4357
4358 /*
4359 * When comparing with imbalance, use weighted_cpuload()
4360 * which is not scaled with the cpu power.
4361 */
4362 if (capacity && rq->nr_running == 1 && wl > imbalance)
4363 continue;
4364
4365 /*
4366 * For the load comparisons with the other cpu's, consider
4367 * the weighted_cpuload() scaled with the cpu power, so that
4368 * the load can be moved away from the cpu that is potentially
4369 * running at a lower capacity.
4370 */
4371 wl = (wl * SCHED_POWER_SCALE) / power;
4372
4373 if (wl > max_load) {
4374 max_load = wl;
4375 busiest = rq;
4376 }
4377 }
4378
4379 return busiest;
4380}
4381
4382/*
4383 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
4384 * so long as it is large enough.
4385 */
4386#define MAX_PINNED_INTERVAL 512
4387
4388/* Working cpumask for load_balance and load_balance_newidle. */
4389DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
4390
4391static int need_active_balance(struct sched_domain *sd, int idle,
4392 int busiest_cpu, int this_cpu)
4393{
4394 if (idle == CPU_NEWLY_IDLE) {
4395
4396 /*
4397 * ASYM_PACKING needs to force migrate tasks from busy but
4398 * higher numbered CPUs in order to pack all tasks in the
4399 * lowest numbered CPUs.
4400 */
4401 if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
4402 return 1;
4403
4404 /*
4405 * The only task running in a non-idle cpu can be moved to this
4406 * cpu in an attempt to completely freeup the other CPU
4407 * package.
4408 *
4409 * The package power saving logic comes from
4410 * find_busiest_group(). If there are no imbalance, then
4411 * f_b_g() will return NULL. However when sched_mc={1,2} then
4412 * f_b_g() will select a group from which a running task may be
4413 * pulled to this cpu in order to make the other package idle.
4414 * If there is no opportunity to make a package idle and if
4415 * there are no imbalance, then f_b_g() will return NULL and no
4416 * action will be taken in load_balance_newidle().
4417 *
4418 * Under normal task pull operation due to imbalance, there
4419 * will be more than one task in the source run queue and
4420 * move_tasks() will succeed. ld_moved will be true and this
4421 * active balance code will not be triggered.
4422 */
4423 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
4424 return 0;
4425 }
4426
4427 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
4428}
4429
4430static int active_load_balance_cpu_stop(void *data);
4431
4432/*
4433 * Check this_cpu to ensure it is balanced within domain. Attempt to move
4434 * tasks if there is an imbalance.
4435 */
4436static int load_balance(int this_cpu, struct rq *this_rq,
4437 struct sched_domain *sd, enum cpu_idle_type idle,
4438 int *balance)
4439{
4440 int ld_moved, all_pinned = 0, active_balance = 0;
4441 struct sched_group *group;
4442 unsigned long imbalance;
4443 struct rq *busiest;
4444 unsigned long flags;
4445 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
4446
4447 cpumask_copy(cpus, cpu_active_mask);
4448
4449 schedstat_inc(sd, lb_count[idle]);
4450
4451redo:
4452 group = find_busiest_group(sd, this_cpu, &imbalance, idle,
4453 cpus, balance);
4454
4455 if (*balance == 0)
4456 goto out_balanced;
4457
4458 if (!group) {
4459 schedstat_inc(sd, lb_nobusyg[idle]);
4460 goto out_balanced;
4461 }
4462
4463 busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
4464 if (!busiest) {
4465 schedstat_inc(sd, lb_nobusyq[idle]);
4466 goto out_balanced;
4467 }
4468
4469 BUG_ON(busiest == this_rq);
4470
4471 schedstat_add(sd, lb_imbalance[idle], imbalance);
4472
4473 ld_moved = 0;
4474 if (busiest->nr_running > 1) {
4475 /*
4476 * Attempt to move tasks. If find_busiest_group has found
4477 * an imbalance but busiest->nr_running <= 1, the group is
4478 * still unbalanced. ld_moved simply stays zero, so it is
4479 * correctly treated as an imbalance.
4480 */
4481 all_pinned = 1;
4482 local_irq_save(flags);
4483 double_rq_lock(this_rq, busiest);
4484 ld_moved = move_tasks(this_rq, this_cpu, busiest,
4485 imbalance, sd, idle, &all_pinned);
4486 double_rq_unlock(this_rq, busiest);
4487 local_irq_restore(flags);
4488
4489 /*
4490 * some other cpu did the load balance for us.
4491 */
4492 if (ld_moved && this_cpu != smp_processor_id())
4493 resched_cpu(this_cpu);
4494
4495 /* All tasks on this runqueue were pinned by CPU affinity */
4496 if (unlikely(all_pinned)) {
4497 cpumask_clear_cpu(cpu_of(busiest), cpus);
4498 if (!cpumask_empty(cpus))
4499 goto redo;
4500 goto out_balanced;
4501 }
4502 }
4503
4504 if (!ld_moved) {
4505 schedstat_inc(sd, lb_failed[idle]);
4506 /*
4507 * Increment the failure counter only on periodic balance.
4508 * We do not want newidle balance, which can be very
4509 * frequent, pollute the failure counter causing
4510 * excessive cache_hot migrations and active balances.
4511 */
4512 if (idle != CPU_NEWLY_IDLE)
4513 sd->nr_balance_failed++;
4514
4515 if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
4516 raw_spin_lock_irqsave(&busiest->lock, flags);
4517
4518 /* don't kick the active_load_balance_cpu_stop,
4519 * if the curr task on busiest cpu can't be
4520 * moved to this_cpu
4521 */
4522 if (!cpumask_test_cpu(this_cpu,
4523 tsk_cpus_allowed(busiest->curr))) {
4524 raw_spin_unlock_irqrestore(&busiest->lock,
4525 flags);
4526 all_pinned = 1;
4527 goto out_one_pinned;
4528 }
4529
4530 /*
4531 * ->active_balance synchronizes accesses to
4532 * ->active_balance_work. Once set, it's cleared
4533 * only after active load balance is finished.
4534 */
4535 if (!busiest->active_balance) {
4536 busiest->active_balance = 1;
4537 busiest->push_cpu = this_cpu;
4538 active_balance = 1;
4539 }
4540 raw_spin_unlock_irqrestore(&busiest->lock, flags);
4541
4542 if (active_balance)
4543 stop_one_cpu_nowait(cpu_of(busiest),
4544 active_load_balance_cpu_stop, busiest,
4545 &busiest->active_balance_work);
4546
4547 /*
4548 * We've kicked active balancing, reset the failure
4549 * counter.
4550 */
4551 sd->nr_balance_failed = sd->cache_nice_tries+1;
4552 }
4553 } else
4554 sd->nr_balance_failed = 0;
4555
4556 if (likely(!active_balance)) {
4557 /* We were unbalanced, so reset the balancing interval */
4558 sd->balance_interval = sd->min_interval;
4559 } else {
4560 /*
4561 * If we've begun active balancing, start to back off. This
4562 * case may not be covered by the all_pinned logic if there
4563 * is only 1 task on the busy runqueue (because we don't call
4564 * move_tasks).
4565 */
4566 if (sd->balance_interval < sd->max_interval)
4567 sd->balance_interval *= 2;
4568 }
4569
4570 goto out;
4571
4572out_balanced:
4573 schedstat_inc(sd, lb_balanced[idle]);
4574
4575 sd->nr_balance_failed = 0;
4576
4577out_one_pinned:
4578 /* tune up the balancing interval */
4579 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
4580 (sd->balance_interval < sd->max_interval))
4581 sd->balance_interval *= 2;
4582
4583 ld_moved = 0;
4584out:
4585 return ld_moved;
4586}
4587
4588/*
4589 * idle_balance is called by schedule() if this_cpu is about to become
4590 * idle. Attempts to pull tasks from other CPUs.
4591 */
4592void idle_balance(int this_cpu, struct rq *this_rq)
4593{
4594 struct sched_domain *sd;
4595 int pulled_task = 0;
4596 unsigned long next_balance = jiffies + HZ;
4597
4598 this_rq->idle_stamp = this_rq->clock;
4599
4600 if (this_rq->avg_idle < sysctl_sched_migration_cost)
4601 return;
4602
4603 /*
4604 * Drop the rq->lock, but keep IRQ/preempt disabled.
4605 */
4606 raw_spin_unlock(&this_rq->lock);
4607
4608 update_shares(this_cpu);
4609 rcu_read_lock();
4610 for_each_domain(this_cpu, sd) {
4611 unsigned long interval;
4612 int balance = 1;
4613
4614 if (!(sd->flags & SD_LOAD_BALANCE))
4615 continue;
4616
4617 if (sd->flags & SD_BALANCE_NEWIDLE) {
4618 /* If we've pulled tasks over stop searching: */
4619 pulled_task = load_balance(this_cpu, this_rq,
4620 sd, CPU_NEWLY_IDLE, &balance);
4621 }
4622
4623 interval = msecs_to_jiffies(sd->balance_interval);
4624 if (time_after(next_balance, sd->last_balance + interval))
4625 next_balance = sd->last_balance + interval;
4626 if (pulled_task) {
4627 this_rq->idle_stamp = 0;
4628 break;
4629 }
4630 }
4631 rcu_read_unlock();
4632
4633 raw_spin_lock(&this_rq->lock);
4634
4635 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
4636 /*
4637 * We are going idle. next_balance may be set based on
4638 * a busy processor. So reset next_balance.
4639 */
4640 this_rq->next_balance = next_balance;
4641 }
4642}
4643
4644/*
4645 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
4646 * running tasks off the busiest CPU onto idle CPUs. It requires at
4647 * least 1 task to be running on each physical CPU where possible, and
4648 * avoids physical / logical imbalances.
4649 */
4650static int active_load_balance_cpu_stop(void *data)
4651{
4652 struct rq *busiest_rq = data;
4653 int busiest_cpu = cpu_of(busiest_rq);
4654 int target_cpu = busiest_rq->push_cpu;
4655 struct rq *target_rq = cpu_rq(target_cpu);
4656 struct sched_domain *sd;
4657
4658 raw_spin_lock_irq(&busiest_rq->lock);
4659
4660 /* make sure the requested cpu hasn't gone down in the meantime */
4661 if (unlikely(busiest_cpu != smp_processor_id() ||
4662 !busiest_rq->active_balance))
4663 goto out_unlock;
4664
4665 /* Is there any task to move? */
4666 if (busiest_rq->nr_running <= 1)
4667 goto out_unlock;
4668
4669 /*
4670 * This condition is "impossible", if it occurs
4671 * we need to fix it. Originally reported by
4672 * Bjorn Helgaas on a 128-cpu setup.
4673 */
4674 BUG_ON(busiest_rq == target_rq);
4675
4676 /* move a task from busiest_rq to target_rq */
4677 double_lock_balance(busiest_rq, target_rq);
4678
4679 /* Search for an sd spanning us and the target CPU. */
4680 rcu_read_lock();
4681 for_each_domain(target_cpu, sd) {
4682 if ((sd->flags & SD_LOAD_BALANCE) &&
4683 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
4684 break;
4685 }
4686
4687 if (likely(sd)) {
4688 schedstat_inc(sd, alb_count);
4689
4690 if (move_one_task(target_rq, target_cpu, busiest_rq,
4691 sd, CPU_IDLE))
4692 schedstat_inc(sd, alb_pushed);
4693 else
4694 schedstat_inc(sd, alb_failed);
4695 }
4696 rcu_read_unlock();
4697 double_unlock_balance(busiest_rq, target_rq);
4698out_unlock:
4699 busiest_rq->active_balance = 0;
4700 raw_spin_unlock_irq(&busiest_rq->lock);
4701 return 0;
4702}
4703
4704#ifdef CONFIG_NO_HZ
4705/*
4706 * idle load balancing details
4707 * - One of the idle CPUs nominates itself as idle load_balancer, while
4708 * entering idle.
4709 * - This idle load balancer CPU will also go into tickless mode when
4710 * it is idle, just like all other idle CPUs
4711 * - When one of the busy CPUs notice that there may be an idle rebalancing
4712 * needed, they will kick the idle load balancer, which then does idle
4713 * load balancing for all the idle CPUs.
4714 */
4715static struct {
4716 atomic_t load_balancer;
4717 atomic_t first_pick_cpu;
4718 atomic_t second_pick_cpu;
4719 cpumask_var_t idle_cpus_mask;
4720 cpumask_var_t grp_idle_mask;
4721 unsigned long next_balance; /* in jiffy units */
4722} nohz ____cacheline_aligned;
4723
4724int get_nohz_load_balancer(void)
4725{
4726 return atomic_read(&nohz.load_balancer);
4727}
4728
4729#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4730/**
4731 * lowest_flag_domain - Return lowest sched_domain containing flag.
4732 * @cpu: The cpu whose lowest level of sched domain is to
4733 * be returned.
4734 * @flag: The flag to check for the lowest sched_domain
4735 * for the given cpu.
4736 *
4737 * Returns the lowest sched_domain of a cpu which contains the given flag.
4738 */
4739static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4740{
4741 struct sched_domain *sd;
4742
4743 for_each_domain(cpu, sd)
4744 if (sd->flags & flag)
4745 break;
4746
4747 return sd;
4748}
4749
4750/**
4751 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4752 * @cpu: The cpu whose domains we're iterating over.
4753 * @sd: variable holding the value of the power_savings_sd
4754 * for cpu.
4755 * @flag: The flag to filter the sched_domains to be iterated.
4756 *
4757 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4758 * set, starting from the lowest sched_domain to the highest.
4759 */
4760#define for_each_flag_domain(cpu, sd, flag) \
4761 for (sd = lowest_flag_domain(cpu, flag); \
4762 (sd && (sd->flags & flag)); sd = sd->parent)
4763
4764/**
4765 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4766 * @ilb_group: group to be checked for semi-idleness
4767 *
4768 * Returns: 1 if the group is semi-idle. 0 otherwise.
4769 *
4770 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4771 * and atleast one non-idle CPU. This helper function checks if the given
4772 * sched_group is semi-idle or not.
4773 */
4774static inline int is_semi_idle_group(struct sched_group *ilb_group)
4775{
4776 cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
4777 sched_group_cpus(ilb_group));
4778
4779 /*
4780 * A sched_group is semi-idle when it has atleast one busy cpu
4781 * and atleast one idle cpu.
4782 */
4783 if (cpumask_empty(nohz.grp_idle_mask))
4784 return 0;
4785
4786 if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
4787 return 0;
4788
4789 return 1;
4790}
4791/**
4792 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4793 * @cpu: The cpu which is nominating a new idle_load_balancer.
4794 *
4795 * Returns: Returns the id of the idle load balancer if it exists,
4796 * Else, returns >= nr_cpu_ids.
4797 *
4798 * This algorithm picks the idle load balancer such that it belongs to a
4799 * semi-idle powersavings sched_domain. The idea is to try and avoid
4800 * completely idle packages/cores just for the purpose of idle load balancing
4801 * when there are other idle cpu's which are better suited for that job.
4802 */
4803static int find_new_ilb(int cpu)
4804{
4805 struct sched_domain *sd;
4806 struct sched_group *ilb_group;
4807 int ilb = nr_cpu_ids;
4808
4809 /*
4810 * Have idle load balancer selection from semi-idle packages only
4811 * when power-aware load balancing is enabled
4812 */
4813 if (!(sched_smt_power_savings || sched_mc_power_savings))
4814 goto out_done;
4815
4816 /*
4817 * Optimize for the case when we have no idle CPUs or only one
4818 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4819 */
4820 if (cpumask_weight(nohz.idle_cpus_mask) < 2)
4821 goto out_done;
4822
4823 rcu_read_lock();
4824 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4825 ilb_group = sd->groups;
4826
4827 do {
4828 if (is_semi_idle_group(ilb_group)) {
4829 ilb = cpumask_first(nohz.grp_idle_mask);
4830 goto unlock;
4831 }
4832
4833 ilb_group = ilb_group->next;
4834
4835 } while (ilb_group != sd->groups);
4836 }
4837unlock:
4838 rcu_read_unlock();
4839
4840out_done:
4841 return ilb;
4842}
4843#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4844static inline int find_new_ilb(int call_cpu)
4845{
4846 return nr_cpu_ids;
4847}
4848#endif
4849
4850/*
4851 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
4852 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
4853 * CPU (if there is one).
4854 */
4855static void nohz_balancer_kick(int cpu)
4856{
4857 int ilb_cpu;
4858
4859 nohz.next_balance++;
4860
4861 ilb_cpu = get_nohz_load_balancer();
4862
4863 if (ilb_cpu >= nr_cpu_ids) {
4864 ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
4865 if (ilb_cpu >= nr_cpu_ids)
4866 return;
4867 }
4868
4869 if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
4870 cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
4871
4872 smp_mb();
4873 /*
4874 * Use smp_send_reschedule() instead of resched_cpu().
4875 * This way we generate a sched IPI on the target cpu which
4876 * is idle. And the softirq performing nohz idle load balance
4877 * will be run before returning from the IPI.
4878 */
4879 smp_send_reschedule(ilb_cpu);
4880 }
4881 return;
4882}
4883
4884/*
4885 * This routine will try to nominate the ilb (idle load balancing)
4886 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
4887 * load balancing on behalf of all those cpus.
4888 *
4889 * When the ilb owner becomes busy, we will not have new ilb owner until some
4890 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
4891 * idle load balancing by kicking one of the idle CPUs.
4892 *
4893 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
4894 * ilb owner CPU in future (when there is a need for idle load balancing on
4895 * behalf of all idle CPUs).
4896 */
4897void select_nohz_load_balancer(int stop_tick)
4898{
4899 int cpu = smp_processor_id();
4900
4901 if (stop_tick) {
4902 if (!cpu_active(cpu)) {
4903 if (atomic_read(&nohz.load_balancer) != cpu)
4904 return;
4905
4906 /*
4907 * If we are going offline and still the leader,
4908 * give up!
4909 */
4910 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
4911 nr_cpu_ids) != cpu)
4912 BUG();
4913
4914 return;
4915 }
4916
4917 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
4918
4919 if (atomic_read(&nohz.first_pick_cpu) == cpu)
4920 atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
4921 if (atomic_read(&nohz.second_pick_cpu) == cpu)
4922 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
4923
4924 if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
4925 int new_ilb;
4926
4927 /* make me the ilb owner */
4928 if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
4929 cpu) != nr_cpu_ids)
4930 return;
4931
4932 /*
4933 * Check to see if there is a more power-efficient
4934 * ilb.
4935 */
4936 new_ilb = find_new_ilb(cpu);
4937 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
4938 atomic_set(&nohz.load_balancer, nr_cpu_ids);
4939 resched_cpu(new_ilb);
4940 return;
4941 }
4942 return;
4943 }
4944 } else {
4945 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
4946 return;
4947
4948 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
4949
4950 if (atomic_read(&nohz.load_balancer) == cpu)
4951 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
4952 nr_cpu_ids) != cpu)
4953 BUG();
4954 }
4955 return;
4956}
4957#endif
4958
4959static DEFINE_SPINLOCK(balancing);
4960
4961static unsigned long __read_mostly max_load_balance_interval = HZ/10;
4962
4963/*
4964 * Scale the max load_balance interval with the number of CPUs in the system.
4965 * This trades load-balance latency on larger machines for less cross talk.
4966 */
4967void update_max_interval(void)
4968{
4969 max_load_balance_interval = HZ*num_online_cpus()/10;
4970}
4971
4972/*
4973 * It checks each scheduling domain to see if it is due to be balanced,
4974 * and initiates a balancing operation if so.
4975 *
4976 * Balancing parameters are set up in arch_init_sched_domains.
4977 */
4978static void rebalance_domains(int cpu, enum cpu_idle_type idle)
4979{
4980 int balance = 1;
4981 struct rq *rq = cpu_rq(cpu);
4982 unsigned long interval;
4983 struct sched_domain *sd;
4984 /* Earliest time when we have to do rebalance again */
4985 unsigned long next_balance = jiffies + 60*HZ;
4986 int update_next_balance = 0;
4987 int need_serialize;
4988
4989 update_shares(cpu);
4990
4991 rcu_read_lock();
4992 for_each_domain(cpu, sd) {
4993 if (!(sd->flags & SD_LOAD_BALANCE))
4994 continue;
4995
4996 interval = sd->balance_interval;
4997 if (idle != CPU_IDLE)
4998 interval *= sd->busy_factor;
4999
5000 /* scale ms to jiffies */
5001 interval = msecs_to_jiffies(interval);
5002 interval = clamp(interval, 1UL, max_load_balance_interval);
5003
5004 need_serialize = sd->flags & SD_SERIALIZE;
5005
5006 if (need_serialize) {
5007 if (!spin_trylock(&balancing))
5008 goto out;
5009 }
5010
5011 if (time_after_eq(jiffies, sd->last_balance + interval)) {
5012 if (load_balance(cpu, rq, sd, idle, &balance)) {
5013 /*
5014 * We've pulled tasks over so either we're no
5015 * longer idle.
5016 */
5017 idle = CPU_NOT_IDLE;
5018 }
5019 sd->last_balance = jiffies;
5020 }
5021 if (need_serialize)
5022 spin_unlock(&balancing);
5023out:
5024 if (time_after(next_balance, sd->last_balance + interval)) {
5025 next_balance = sd->last_balance + interval;
5026 update_next_balance = 1;
5027 }
5028
5029 /*
5030 * Stop the load balance at this level. There is another
5031 * CPU in our sched group which is doing load balancing more
5032 * actively.
5033 */
5034 if (!balance)
5035 break;
5036 }
5037 rcu_read_unlock();
5038
5039 /*
5040 * next_balance will be updated only when there is a need.
5041 * When the cpu is attached to null domain for ex, it will not be
5042 * updated.
5043 */
5044 if (likely(update_next_balance))
5045 rq->next_balance = next_balance;
5046}
5047
5048#ifdef CONFIG_NO_HZ
5049/*
5050 * In CONFIG_NO_HZ case, the idle balance kickee will do the
5051 * rebalancing for all the cpus for whom scheduler ticks are stopped.
5052 */
5053static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
5054{
5055 struct rq *this_rq = cpu_rq(this_cpu);
5056 struct rq *rq;
5057 int balance_cpu;
5058
5059 if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
5060 return;
5061
5062 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
5063 if (balance_cpu == this_cpu)
5064 continue;
5065
5066 /*
5067 * If this cpu gets work to do, stop the load balancing
5068 * work being done for other cpus. Next load
5069 * balancing owner will pick it up.
5070 */
5071 if (need_resched()) {
5072 this_rq->nohz_balance_kick = 0;
5073 break;
5074 }
5075
5076 raw_spin_lock_irq(&this_rq->lock);
5077 update_rq_clock(this_rq);
5078 update_cpu_load(this_rq);
5079 raw_spin_unlock_irq(&this_rq->lock);
5080
5081 rebalance_domains(balance_cpu, CPU_IDLE);
5082
5083 rq = cpu_rq(balance_cpu);
5084 if (time_after(this_rq->next_balance, rq->next_balance))
5085 this_rq->next_balance = rq->next_balance;
5086 }
5087 nohz.next_balance = this_rq->next_balance;
5088 this_rq->nohz_balance_kick = 0;
5089}
5090
5091/*
5092 * Current heuristic for kicking the idle load balancer
5093 * - first_pick_cpu is the one of the busy CPUs. It will kick
5094 * idle load balancer when it has more than one process active. This
5095 * eliminates the need for idle load balancing altogether when we have
5096 * only one running process in the system (common case).
5097 * - If there are more than one busy CPU, idle load balancer may have
5098 * to run for active_load_balance to happen (i.e., two busy CPUs are
5099 * SMT or core siblings and can run better if they move to different
5100 * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
5101 * which will kick idle load balancer as soon as it has any load.
5102 */
5103static inline int nohz_kick_needed(struct rq *rq, int cpu)
5104{
5105 unsigned long now = jiffies;
5106 int ret;
5107 int first_pick_cpu, second_pick_cpu;
5108
5109 if (time_before(now, nohz.next_balance))
5110 return 0;
5111
5112 if (idle_cpu(cpu))
5113 return 0;
5114
5115 first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
5116 second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
5117
5118 if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
5119 second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
5120 return 0;
5121
5122 ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
5123 if (ret == nr_cpu_ids || ret == cpu) {
5124 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
5125 if (rq->nr_running > 1)
5126 return 1;
5127 } else {
5128 ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
5129 if (ret == nr_cpu_ids || ret == cpu) {
5130 if (rq->nr_running)
5131 return 1;
5132 }
5133 }
5134 return 0;
5135}
5136#else
5137static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
5138#endif
5139
5140/*
5141 * run_rebalance_domains is triggered when needed from the scheduler tick.
5142 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
5143 */
5144static void run_rebalance_domains(struct softirq_action *h)
5145{
5146 int this_cpu = smp_processor_id();
5147 struct rq *this_rq = cpu_rq(this_cpu);
5148 enum cpu_idle_type idle = this_rq->idle_balance ?
5149 CPU_IDLE : CPU_NOT_IDLE;
5150
5151 rebalance_domains(this_cpu, idle);
5152
5153 /*
5154 * If this cpu has a pending nohz_balance_kick, then do the
5155 * balancing on behalf of the other idle cpus whose ticks are
5156 * stopped.
5157 */
5158 nohz_idle_balance(this_cpu, idle);
5159}
5160
5161static inline int on_null_domain(int cpu)
5162{
5163 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
5164}
5165
5166/*
5167 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
5168 */
5169void trigger_load_balance(struct rq *rq, int cpu)
5170{
5171 /* Don't need to rebalance while attached to NULL domain */
5172 if (time_after_eq(jiffies, rq->next_balance) &&
5173 likely(!on_null_domain(cpu)))
5174 raise_softirq(SCHED_SOFTIRQ);
5175#ifdef CONFIG_NO_HZ
5176 else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
5177 nohz_balancer_kick(cpu);
5178#endif
5179}
5180
5181static void rq_online_fair(struct rq *rq)
5182{
5183 update_sysctl();
5184}
5185
5186static void rq_offline_fair(struct rq *rq)
5187{
5188 update_sysctl();
5189}
5190
5191#endif /* CONFIG_SMP */
5192
5193/*
5194 * scheduler tick hitting a task of our scheduling class:
5195 */
5196static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
5197{
5198 struct cfs_rq *cfs_rq;
5199 struct sched_entity *se = &curr->se;
5200
5201 for_each_sched_entity(se) {
5202 cfs_rq = cfs_rq_of(se);
5203 entity_tick(cfs_rq, se, queued);
5204 }
5205}
5206
5207/*
5208 * called on fork with the child task as argument from the parent's context
5209 * - child not yet on the tasklist
5210 * - preemption disabled
5211 */
5212static void task_fork_fair(struct task_struct *p)
5213{
5214 struct cfs_rq *cfs_rq = task_cfs_rq(current);
5215 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
5216 int this_cpu = smp_processor_id();
5217 struct rq *rq = this_rq();
5218 unsigned long flags;
5219
5220 raw_spin_lock_irqsave(&rq->lock, flags);
5221
5222 update_rq_clock(rq);
5223
5224 if (unlikely(task_cpu(p) != this_cpu)) {
5225 rcu_read_lock();
5226 __set_task_cpu(p, this_cpu);
5227 rcu_read_unlock();
5228 }
5229
5230 update_curr(cfs_rq);
5231
5232 if (curr)
5233 se->vruntime = curr->vruntime;
5234 place_entity(cfs_rq, se, 1);
5235
5236 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
5237 /*
5238 * Upon rescheduling, sched_class::put_prev_task() will place
5239 * 'current' within the tree based on its new key value.
5240 */
5241 swap(curr->vruntime, se->vruntime);
5242 resched_task(rq->curr);
5243 }
5244
5245 se->vruntime -= cfs_rq->min_vruntime;
5246
5247 raw_spin_unlock_irqrestore(&rq->lock, flags);
5248}
5249
5250/*
5251 * Priority of the task has changed. Check to see if we preempt
5252 * the current task.
5253 */
5254static void
5255prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
5256{
5257 if (!p->se.on_rq)
5258 return;
5259
5260 /*
5261 * Reschedule if we are currently running on this runqueue and
5262 * our priority decreased, or if we are not currently running on
5263 * this runqueue and our priority is higher than the current's
5264 */
5265 if (rq->curr == p) {
5266 if (p->prio > oldprio)
5267 resched_task(rq->curr);
5268 } else
5269 check_preempt_curr(rq, p, 0);
5270}
5271
5272static void switched_from_fair(struct rq *rq, struct task_struct *p)
5273{
5274 struct sched_entity *se = &p->se;
5275 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5276
5277 /*
5278 * Ensure the task's vruntime is normalized, so that when its
5279 * switched back to the fair class the enqueue_entity(.flags=0) will
5280 * do the right thing.
5281 *
5282 * If it was on_rq, then the dequeue_entity(.flags=0) will already
5283 * have normalized the vruntime, if it was !on_rq, then only when
5284 * the task is sleeping will it still have non-normalized vruntime.
5285 */
5286 if (!se->on_rq && p->state != TASK_RUNNING) {
5287 /*
5288 * Fix up our vruntime so that the current sleep doesn't
5289 * cause 'unlimited' sleep bonus.
5290 */
5291 place_entity(cfs_rq, se, 0);
5292 se->vruntime -= cfs_rq->min_vruntime;
5293 }
5294}
5295
5296/*
5297 * We switched to the sched_fair class.
5298 */
5299static void switched_to_fair(struct rq *rq, struct task_struct *p)
5300{
5301 if (!p->se.on_rq)
5302 return;
5303
5304 /*
5305 * We were most likely switched from sched_rt, so
5306 * kick off the schedule if running, otherwise just see
5307 * if we can still preempt the current task.
5308 */
5309 if (rq->curr == p)
5310 resched_task(rq->curr);
5311 else
5312 check_preempt_curr(rq, p, 0);
5313}
5314
5315/* Account for a task changing its policy or group.
5316 *
5317 * This routine is mostly called to set cfs_rq->curr field when a task
5318 * migrates between groups/classes.
5319 */
5320static void set_curr_task_fair(struct rq *rq)
5321{
5322 struct sched_entity *se = &rq->curr->se;
5323
5324 for_each_sched_entity(se) {
5325 struct cfs_rq *cfs_rq = cfs_rq_of(se);
5326
5327 set_next_entity(cfs_rq, se);
5328 /* ensure bandwidth has been allocated on our new cfs_rq */
5329 account_cfs_rq_runtime(cfs_rq, 0);
5330 }
5331}
5332
5333void init_cfs_rq(struct cfs_rq *cfs_rq)
5334{
5335 cfs_rq->tasks_timeline = RB_ROOT;
5336 INIT_LIST_HEAD(&cfs_rq->tasks);
5337 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
5338#ifndef CONFIG_64BIT
5339 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
5340#endif
5341}
5342
5343#ifdef CONFIG_FAIR_GROUP_SCHED
5344static void task_move_group_fair(struct task_struct *p, int on_rq)
5345{
5346 /*
5347 * If the task was not on the rq at the time of this cgroup movement
5348 * it must have been asleep, sleeping tasks keep their ->vruntime
5349 * absolute on their old rq until wakeup (needed for the fair sleeper
5350 * bonus in place_entity()).
5351 *
5352 * If it was on the rq, we've just 'preempted' it, which does convert
5353 * ->vruntime to a relative base.
5354 *
5355 * Make sure both cases convert their relative position when migrating
5356 * to another cgroup's rq. This does somewhat interfere with the
5357 * fair sleeper stuff for the first placement, but who cares.
5358 */
5359 if (!on_rq)
5360 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
5361 set_task_rq(p, task_cpu(p));
5362 if (!on_rq)
5363 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
5364}
5365
5366void free_fair_sched_group(struct task_group *tg)
5367{
5368 int i;
5369
5370 destroy_cfs_bandwidth(tg_cfs_bandwidth(tg));
5371
5372 for_each_possible_cpu(i) {
5373 if (tg->cfs_rq)
5374 kfree(tg->cfs_rq[i]);
5375 if (tg->se)
5376 kfree(tg->se[i]);
5377 }
5378
5379 kfree(tg->cfs_rq);
5380 kfree(tg->se);
5381}
5382
5383int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5384{
5385 struct cfs_rq *cfs_rq;
5386 struct sched_entity *se;
5387 int i;
5388
5389 tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
5390 if (!tg->cfs_rq)
5391 goto err;
5392 tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
5393 if (!tg->se)
5394 goto err;
5395
5396 tg->shares = NICE_0_LOAD;
5397
5398 init_cfs_bandwidth(tg_cfs_bandwidth(tg));
5399
5400 for_each_possible_cpu(i) {
5401 cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
5402 GFP_KERNEL, cpu_to_node(i));
5403 if (!cfs_rq)
5404 goto err;
5405
5406 se = kzalloc_node(sizeof(struct sched_entity),
5407 GFP_KERNEL, cpu_to_node(i));
5408 if (!se)
5409 goto err_free_rq;
5410
5411 init_cfs_rq(cfs_rq);
5412 init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
5413 }
5414
5415 return 1;
5416
5417err_free_rq:
5418 kfree(cfs_rq);
5419err:
5420 return 0;
5421}
5422
5423void unregister_fair_sched_group(struct task_group *tg, int cpu)
5424{
5425 struct rq *rq = cpu_rq(cpu);
5426 unsigned long flags;
5427
5428 /*
5429 * Only empty task groups can be destroyed; so we can speculatively
5430 * check on_list without danger of it being re-added.
5431 */
5432 if (!tg->cfs_rq[cpu]->on_list)
5433 return;
5434
5435 raw_spin_lock_irqsave(&rq->lock, flags);
5436 list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
5437 raw_spin_unlock_irqrestore(&rq->lock, flags);
5438}
5439
5440void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
5441 struct sched_entity *se, int cpu,
5442 struct sched_entity *parent)
5443{
5444 struct rq *rq = cpu_rq(cpu);
5445
5446 cfs_rq->tg = tg;
5447 cfs_rq->rq = rq;
5448#ifdef CONFIG_SMP
5449 /* allow initial update_cfs_load() to truncate */
5450 cfs_rq->load_stamp = 1;
5451#endif
5452 init_cfs_rq_runtime(cfs_rq);
5453
5454 tg->cfs_rq[cpu] = cfs_rq;
5455 tg->se[cpu] = se;
5456
5457 /* se could be NULL for root_task_group */
5458 if (!se)
5459 return;
5460
5461 if (!parent)
5462 se->cfs_rq = &rq->cfs;
5463 else
5464 se->cfs_rq = parent->my_q;
5465
5466 se->my_q = cfs_rq;
5467 update_load_set(&se->load, 0);
5468 se->parent = parent;
5469}
5470
5471static DEFINE_MUTEX(shares_mutex);
5472
5473int sched_group_set_shares(struct task_group *tg, unsigned long shares)
5474{
5475 int i;
5476 unsigned long flags;
5477
5478 /*
5479 * We can't change the weight of the root cgroup.
5480 */
5481 if (!tg->se[0])
5482 return -EINVAL;
5483
5484 shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
5485
5486 mutex_lock(&shares_mutex);
5487 if (tg->shares == shares)
5488 goto done;
5489
5490 tg->shares = shares;
5491 for_each_possible_cpu(i) {
5492 struct rq *rq = cpu_rq(i);
5493 struct sched_entity *se;
5494
5495 se = tg->se[i];
5496 /* Propagate contribution to hierarchy */
5497 raw_spin_lock_irqsave(&rq->lock, flags);
5498 for_each_sched_entity(se)
5499 update_cfs_shares(group_cfs_rq(se));
5500 raw_spin_unlock_irqrestore(&rq->lock, flags);
5501 }
5502
5503done:
5504 mutex_unlock(&shares_mutex);
5505 return 0;
5506}
5507#else /* CONFIG_FAIR_GROUP_SCHED */
5508
5509void free_fair_sched_group(struct task_group *tg) { }
5510
5511int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
5512{
5513 return 1;
5514}
5515
5516void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
5517
5518#endif /* CONFIG_FAIR_GROUP_SCHED */
5519
5520
5521static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
5522{
5523 struct sched_entity *se = &task->se;
5524 unsigned int rr_interval = 0;
5525
5526 /*
5527 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
5528 * idle runqueue:
5529 */
5530 if (rq->cfs.load.weight)
5531 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
5532
5533 return rr_interval;
5534}
5535
5536/*
5537 * All the scheduling class methods:
5538 */
5539const struct sched_class fair_sched_class = {
5540 .next = &idle_sched_class,
5541 .enqueue_task = enqueue_task_fair,
5542 .dequeue_task = dequeue_task_fair,
5543 .yield_task = yield_task_fair,
5544 .yield_to_task = yield_to_task_fair,
5545
5546 .check_preempt_curr = check_preempt_wakeup,
5547
5548 .pick_next_task = pick_next_task_fair,
5549 .put_prev_task = put_prev_task_fair,
5550
5551#ifdef CONFIG_SMP
5552 .select_task_rq = select_task_rq_fair,
5553
5554 .rq_online = rq_online_fair,
5555 .rq_offline = rq_offline_fair,
5556
5557 .task_waking = task_waking_fair,
5558#endif
5559
5560 .set_curr_task = set_curr_task_fair,
5561 .task_tick = task_tick_fair,
5562 .task_fork = task_fork_fair,
5563
5564 .prio_changed = prio_changed_fair,
5565 .switched_from = switched_from_fair,
5566 .switched_to = switched_to_fair,
5567
5568 .get_rr_interval = get_rr_interval_fair,
5569
5570#ifdef CONFIG_FAIR_GROUP_SCHED
5571 .task_move_group = task_move_group_fair,
5572#endif
5573};
5574
5575#ifdef CONFIG_SCHED_DEBUG
5576void print_cfs_stats(struct seq_file *m, int cpu)
5577{
5578 struct cfs_rq *cfs_rq;
5579
5580 rcu_read_lock();
5581 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5582 print_cfs_rq(m, cpu, cfs_rq);
5583 rcu_read_unlock();
5584}
5585#endif
5586
5587__init void init_sched_fair_class(void)
5588{
5589#ifdef CONFIG_SMP
5590 open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
5591
5592#ifdef CONFIG_NO_HZ
5593 zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
5594 alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
5595 atomic_set(&nohz.load_balancer, nr_cpu_ids);
5596 atomic_set(&nohz.first_pick_cpu, nr_cpu_ids);
5597 atomic_set(&nohz.second_pick_cpu, nr_cpu_ids);
5598#endif
5599#endif /* SMP */
5600
5601}
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
new file mode 100644
index 000000000000..84802245abd2
--- /dev/null
+++ b/kernel/sched/features.h
@@ -0,0 +1,70 @@
1/*
2 * Only give sleepers 50% of their service deficit. This allows
3 * them to run sooner, but does not allow tons of sleepers to
4 * rip the spread apart.
5 */
6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
7
8/*
9 * Place new tasks ahead so that they do not starve already running
10 * tasks
11 */
12SCHED_FEAT(START_DEBIT, 1)
13
14/*
15 * Based on load and program behaviour, see if it makes sense to place
16 * a newly woken task on the same cpu as the task that woke it --
17 * improve cache locality. Typically used with SYNC wakeups as
18 * generated by pipes and the like, see also SYNC_WAKEUPS.
19 */
20SCHED_FEAT(AFFINE_WAKEUPS, 1)
21
22/*
23 * Prefer to schedule the task we woke last (assuming it failed
24 * wakeup-preemption), since its likely going to consume data we
25 * touched, increases cache locality.
26 */
27SCHED_FEAT(NEXT_BUDDY, 0)
28
29/*
30 * Prefer to schedule the task that ran last (when we did
31 * wake-preempt) as that likely will touch the same data, increases
32 * cache locality.
33 */
34SCHED_FEAT(LAST_BUDDY, 1)
35
36/*
37 * Consider buddies to be cache hot, decreases the likelyness of a
38 * cache buddy being migrated away, increases cache locality.
39 */
40SCHED_FEAT(CACHE_HOT_BUDDY, 1)
41
42/*
43 * Use arch dependent cpu power functions
44 */
45SCHED_FEAT(ARCH_POWER, 0)
46
47SCHED_FEAT(HRTICK, 0)
48SCHED_FEAT(DOUBLE_TICK, 0)
49SCHED_FEAT(LB_BIAS, 1)
50
51/*
52 * Spin-wait on mutex acquisition when the mutex owner is running on
53 * another cpu -- assumes that when the owner is running, it will soon
54 * release the lock. Decreases scheduling overhead.
55 */
56SCHED_FEAT(OWNER_SPIN, 1)
57
58/*
59 * Decrement CPU power based on time not spent running tasks
60 */
61SCHED_FEAT(NONTASK_POWER, 1)
62
63/*
64 * Queue remote wakeups on the target CPU and process them
65 * using the scheduler IPI. Reduces rq->lock contention/bounces.
66 */
67SCHED_FEAT(TTWU_QUEUE, 1)
68
69SCHED_FEAT(FORCE_SD_OVERLAP, 0)
70SCHED_FEAT(RT_RUNTIME_SHARE, 1)
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
new file mode 100644
index 000000000000..91b4c957f289
--- /dev/null
+++ b/kernel/sched/idle_task.c
@@ -0,0 +1,99 @@
1#include "sched.h"
2
3/*
4 * idle-task scheduling class.
5 *
6 * (NOTE: these are not related to SCHED_IDLE tasks which are
7 * handled in sched_fair.c)
8 */
9
10#ifdef CONFIG_SMP
11static int
12select_task_rq_idle(struct task_struct *p, int sd_flag, int flags)
13{
14 return task_cpu(p); /* IDLE tasks as never migrated */
15}
16#endif /* CONFIG_SMP */
17/*
18 * Idle tasks are unconditionally rescheduled:
19 */
20static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
21{
22 resched_task(rq->idle);
23}
24
25static struct task_struct *pick_next_task_idle(struct rq *rq)
26{
27 schedstat_inc(rq, sched_goidle);
28 calc_load_account_idle(rq);
29 return rq->idle;
30}
31
32/*
33 * It is not legal to sleep in the idle task - print a warning
34 * message if some code attempts to do it:
35 */
36static void
37dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
38{
39 raw_spin_unlock_irq(&rq->lock);
40 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
41 dump_stack();
42 raw_spin_lock_irq(&rq->lock);
43}
44
45static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
46{
47}
48
49static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
50{
51}
52
53static void set_curr_task_idle(struct rq *rq)
54{
55}
56
57static void switched_to_idle(struct rq *rq, struct task_struct *p)
58{
59 BUG();
60}
61
62static void
63prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
64{
65 BUG();
66}
67
68static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
69{
70 return 0;
71}
72
73/*
74 * Simple, special scheduling class for the per-CPU idle tasks:
75 */
76const struct sched_class idle_sched_class = {
77 /* .next is NULL */
78 /* no enqueue/yield_task for idle tasks */
79
80 /* dequeue is not valid, we print a debug message there: */
81 .dequeue_task = dequeue_task_idle,
82
83 .check_preempt_curr = check_preempt_curr_idle,
84
85 .pick_next_task = pick_next_task_idle,
86 .put_prev_task = put_prev_task_idle,
87
88#ifdef CONFIG_SMP
89 .select_task_rq = select_task_rq_idle,
90#endif
91
92 .set_curr_task = set_curr_task_idle,
93 .task_tick = task_tick_idle,
94
95 .get_rr_interval = get_rr_interval_idle,
96
97 .prio_changed = prio_changed_idle,
98 .switched_to = switched_to_idle,
99};
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
new file mode 100644
index 000000000000..023b35502509
--- /dev/null
+++ b/kernel/sched/rt.c
@@ -0,0 +1,2045 @@
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#include "sched.h"
7
8#include <linux/slab.h>
9
10static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
11
12struct rt_bandwidth def_rt_bandwidth;
13
14static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
15{
16 struct rt_bandwidth *rt_b =
17 container_of(timer, struct rt_bandwidth, rt_period_timer);
18 ktime_t now;
19 int overrun;
20 int idle = 0;
21
22 for (;;) {
23 now = hrtimer_cb_get_time(timer);
24 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
25
26 if (!overrun)
27 break;
28
29 idle = do_sched_rt_period_timer(rt_b, overrun);
30 }
31
32 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
33}
34
35void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
36{
37 rt_b->rt_period = ns_to_ktime(period);
38 rt_b->rt_runtime = runtime;
39
40 raw_spin_lock_init(&rt_b->rt_runtime_lock);
41
42 hrtimer_init(&rt_b->rt_period_timer,
43 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
44 rt_b->rt_period_timer.function = sched_rt_period_timer;
45}
46
47static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
48{
49 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
50 return;
51
52 if (hrtimer_active(&rt_b->rt_period_timer))
53 return;
54
55 raw_spin_lock(&rt_b->rt_runtime_lock);
56 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
57 raw_spin_unlock(&rt_b->rt_runtime_lock);
58}
59
60void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
61{
62 struct rt_prio_array *array;
63 int i;
64
65 array = &rt_rq->active;
66 for (i = 0; i < MAX_RT_PRIO; i++) {
67 INIT_LIST_HEAD(array->queue + i);
68 __clear_bit(i, array->bitmap);
69 }
70 /* delimiter for bitsearch: */
71 __set_bit(MAX_RT_PRIO, array->bitmap);
72
73#if defined CONFIG_SMP
74 rt_rq->highest_prio.curr = MAX_RT_PRIO;
75 rt_rq->highest_prio.next = MAX_RT_PRIO;
76 rt_rq->rt_nr_migratory = 0;
77 rt_rq->overloaded = 0;
78 plist_head_init(&rt_rq->pushable_tasks);
79#endif
80
81 rt_rq->rt_time = 0;
82 rt_rq->rt_throttled = 0;
83 rt_rq->rt_runtime = 0;
84 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
85}
86
87#ifdef CONFIG_RT_GROUP_SCHED
88static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
89{
90 hrtimer_cancel(&rt_b->rt_period_timer);
91}
92
93#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
94
95static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
96{
97#ifdef CONFIG_SCHED_DEBUG
98 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
99#endif
100 return container_of(rt_se, struct task_struct, rt);
101}
102
103static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
104{
105 return rt_rq->rq;
106}
107
108static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
109{
110 return rt_se->rt_rq;
111}
112
113void free_rt_sched_group(struct task_group *tg)
114{
115 int i;
116
117 if (tg->rt_se)
118 destroy_rt_bandwidth(&tg->rt_bandwidth);
119
120 for_each_possible_cpu(i) {
121 if (tg->rt_rq)
122 kfree(tg->rt_rq[i]);
123 if (tg->rt_se)
124 kfree(tg->rt_se[i]);
125 }
126
127 kfree(tg->rt_rq);
128 kfree(tg->rt_se);
129}
130
131void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
132 struct sched_rt_entity *rt_se, int cpu,
133 struct sched_rt_entity *parent)
134{
135 struct rq *rq = cpu_rq(cpu);
136
137 rt_rq->highest_prio.curr = MAX_RT_PRIO;
138 rt_rq->rt_nr_boosted = 0;
139 rt_rq->rq = rq;
140 rt_rq->tg = tg;
141
142 tg->rt_rq[cpu] = rt_rq;
143 tg->rt_se[cpu] = rt_se;
144
145 if (!rt_se)
146 return;
147
148 if (!parent)
149 rt_se->rt_rq = &rq->rt;
150 else
151 rt_se->rt_rq = parent->my_q;
152
153 rt_se->my_q = rt_rq;
154 rt_se->parent = parent;
155 INIT_LIST_HEAD(&rt_se->run_list);
156}
157
158int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
159{
160 struct rt_rq *rt_rq;
161 struct sched_rt_entity *rt_se;
162 int i;
163
164 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
165 if (!tg->rt_rq)
166 goto err;
167 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
168 if (!tg->rt_se)
169 goto err;
170
171 init_rt_bandwidth(&tg->rt_bandwidth,
172 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
173
174 for_each_possible_cpu(i) {
175 rt_rq = kzalloc_node(sizeof(struct rt_rq),
176 GFP_KERNEL, cpu_to_node(i));
177 if (!rt_rq)
178 goto err;
179
180 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
181 GFP_KERNEL, cpu_to_node(i));
182 if (!rt_se)
183 goto err_free_rq;
184
185 init_rt_rq(rt_rq, cpu_rq(i));
186 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
187 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
188 }
189
190 return 1;
191
192err_free_rq:
193 kfree(rt_rq);
194err:
195 return 0;
196}
197
198#else /* CONFIG_RT_GROUP_SCHED */
199
200#define rt_entity_is_task(rt_se) (1)
201
202static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
203{
204 return container_of(rt_se, struct task_struct, rt);
205}
206
207static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
208{
209 return container_of(rt_rq, struct rq, rt);
210}
211
212static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
213{
214 struct task_struct *p = rt_task_of(rt_se);
215 struct rq *rq = task_rq(p);
216
217 return &rq->rt;
218}
219
220void free_rt_sched_group(struct task_group *tg) { }
221
222int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
223{
224 return 1;
225}
226#endif /* CONFIG_RT_GROUP_SCHED */
227
228#ifdef CONFIG_SMP
229
230static inline int rt_overloaded(struct rq *rq)
231{
232 return atomic_read(&rq->rd->rto_count);
233}
234
235static inline void rt_set_overload(struct rq *rq)
236{
237 if (!rq->online)
238 return;
239
240 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
241 /*
242 * Make sure the mask is visible before we set
243 * the overload count. That is checked to determine
244 * if we should look at the mask. It would be a shame
245 * if we looked at the mask, but the mask was not
246 * updated yet.
247 */
248 wmb();
249 atomic_inc(&rq->rd->rto_count);
250}
251
252static inline void rt_clear_overload(struct rq *rq)
253{
254 if (!rq->online)
255 return;
256
257 /* the order here really doesn't matter */
258 atomic_dec(&rq->rd->rto_count);
259 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
260}
261
262static void update_rt_migration(struct rt_rq *rt_rq)
263{
264 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
265 if (!rt_rq->overloaded) {
266 rt_set_overload(rq_of_rt_rq(rt_rq));
267 rt_rq->overloaded = 1;
268 }
269 } else if (rt_rq->overloaded) {
270 rt_clear_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 0;
272 }
273}
274
275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276{
277 if (!rt_entity_is_task(rt_se))
278 return;
279
280 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
281
282 rt_rq->rt_nr_total++;
283 if (rt_se->nr_cpus_allowed > 1)
284 rt_rq->rt_nr_migratory++;
285
286 update_rt_migration(rt_rq);
287}
288
289static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
290{
291 if (!rt_entity_is_task(rt_se))
292 return;
293
294 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
295
296 rt_rq->rt_nr_total--;
297 if (rt_se->nr_cpus_allowed > 1)
298 rt_rq->rt_nr_migratory--;
299
300 update_rt_migration(rt_rq);
301}
302
303static inline int has_pushable_tasks(struct rq *rq)
304{
305 return !plist_head_empty(&rq->rt.pushable_tasks);
306}
307
308static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
309{
310 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
311 plist_node_init(&p->pushable_tasks, p->prio);
312 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
313
314 /* Update the highest prio pushable task */
315 if (p->prio < rq->rt.highest_prio.next)
316 rq->rt.highest_prio.next = p->prio;
317}
318
319static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
320{
321 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
322
323 /* Update the new highest prio pushable task */
324 if (has_pushable_tasks(rq)) {
325 p = plist_first_entry(&rq->rt.pushable_tasks,
326 struct task_struct, pushable_tasks);
327 rq->rt.highest_prio.next = p->prio;
328 } else
329 rq->rt.highest_prio.next = MAX_RT_PRIO;
330}
331
332#else
333
334static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
335{
336}
337
338static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
339{
340}
341
342static inline
343void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
344{
345}
346
347static inline
348void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
349{
350}
351
352#endif /* CONFIG_SMP */
353
354static inline int on_rt_rq(struct sched_rt_entity *rt_se)
355{
356 return !list_empty(&rt_se->run_list);
357}
358
359#ifdef CONFIG_RT_GROUP_SCHED
360
361static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
362{
363 if (!rt_rq->tg)
364 return RUNTIME_INF;
365
366 return rt_rq->rt_runtime;
367}
368
369static inline u64 sched_rt_period(struct rt_rq *rt_rq)
370{
371 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
372}
373
374typedef struct task_group *rt_rq_iter_t;
375
376static inline struct task_group *next_task_group(struct task_group *tg)
377{
378 do {
379 tg = list_entry_rcu(tg->list.next,
380 typeof(struct task_group), list);
381 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
382
383 if (&tg->list == &task_groups)
384 tg = NULL;
385
386 return tg;
387}
388
389#define for_each_rt_rq(rt_rq, iter, rq) \
390 for (iter = container_of(&task_groups, typeof(*iter), list); \
391 (iter = next_task_group(iter)) && \
392 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
393
394static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
395{
396 list_add_rcu(&rt_rq->leaf_rt_rq_list,
397 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
398}
399
400static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
401{
402 list_del_rcu(&rt_rq->leaf_rt_rq_list);
403}
404
405#define for_each_leaf_rt_rq(rt_rq, rq) \
406 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
407
408#define for_each_sched_rt_entity(rt_se) \
409 for (; rt_se; rt_se = rt_se->parent)
410
411static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
412{
413 return rt_se->my_q;
414}
415
416static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
417static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
418
419static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
420{
421 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
422 struct sched_rt_entity *rt_se;
423
424 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
425
426 rt_se = rt_rq->tg->rt_se[cpu];
427
428 if (rt_rq->rt_nr_running) {
429 if (rt_se && !on_rt_rq(rt_se))
430 enqueue_rt_entity(rt_se, false);
431 if (rt_rq->highest_prio.curr < curr->prio)
432 resched_task(curr);
433 }
434}
435
436static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
437{
438 struct sched_rt_entity *rt_se;
439 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
440
441 rt_se = rt_rq->tg->rt_se[cpu];
442
443 if (rt_se && on_rt_rq(rt_se))
444 dequeue_rt_entity(rt_se);
445}
446
447static inline int rt_rq_throttled(struct rt_rq *rt_rq)
448{
449 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
450}
451
452static int rt_se_boosted(struct sched_rt_entity *rt_se)
453{
454 struct rt_rq *rt_rq = group_rt_rq(rt_se);
455 struct task_struct *p;
456
457 if (rt_rq)
458 return !!rt_rq->rt_nr_boosted;
459
460 p = rt_task_of(rt_se);
461 return p->prio != p->normal_prio;
462}
463
464#ifdef CONFIG_SMP
465static inline const struct cpumask *sched_rt_period_mask(void)
466{
467 return cpu_rq(smp_processor_id())->rd->span;
468}
469#else
470static inline const struct cpumask *sched_rt_period_mask(void)
471{
472 return cpu_online_mask;
473}
474#endif
475
476static inline
477struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
478{
479 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
480}
481
482static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
483{
484 return &rt_rq->tg->rt_bandwidth;
485}
486
487#else /* !CONFIG_RT_GROUP_SCHED */
488
489static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
490{
491 return rt_rq->rt_runtime;
492}
493
494static inline u64 sched_rt_period(struct rt_rq *rt_rq)
495{
496 return ktime_to_ns(def_rt_bandwidth.rt_period);
497}
498
499typedef struct rt_rq *rt_rq_iter_t;
500
501#define for_each_rt_rq(rt_rq, iter, rq) \
502 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
503
504static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
505{
506}
507
508static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
509{
510}
511
512#define for_each_leaf_rt_rq(rt_rq, rq) \
513 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
514
515#define for_each_sched_rt_entity(rt_se) \
516 for (; rt_se; rt_se = NULL)
517
518static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
519{
520 return NULL;
521}
522
523static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
524{
525 if (rt_rq->rt_nr_running)
526 resched_task(rq_of_rt_rq(rt_rq)->curr);
527}
528
529static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
530{
531}
532
533static inline int rt_rq_throttled(struct rt_rq *rt_rq)
534{
535 return rt_rq->rt_throttled;
536}
537
538static inline const struct cpumask *sched_rt_period_mask(void)
539{
540 return cpu_online_mask;
541}
542
543static inline
544struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
545{
546 return &cpu_rq(cpu)->rt;
547}
548
549static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
550{
551 return &def_rt_bandwidth;
552}
553
554#endif /* CONFIG_RT_GROUP_SCHED */
555
556#ifdef CONFIG_SMP
557/*
558 * We ran out of runtime, see if we can borrow some from our neighbours.
559 */
560static int do_balance_runtime(struct rt_rq *rt_rq)
561{
562 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
563 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
564 int i, weight, more = 0;
565 u64 rt_period;
566
567 weight = cpumask_weight(rd->span);
568
569 raw_spin_lock(&rt_b->rt_runtime_lock);
570 rt_period = ktime_to_ns(rt_b->rt_period);
571 for_each_cpu(i, rd->span) {
572 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
573 s64 diff;
574
575 if (iter == rt_rq)
576 continue;
577
578 raw_spin_lock(&iter->rt_runtime_lock);
579 /*
580 * Either all rqs have inf runtime and there's nothing to steal
581 * or __disable_runtime() below sets a specific rq to inf to
582 * indicate its been disabled and disalow stealing.
583 */
584 if (iter->rt_runtime == RUNTIME_INF)
585 goto next;
586
587 /*
588 * From runqueues with spare time, take 1/n part of their
589 * spare time, but no more than our period.
590 */
591 diff = iter->rt_runtime - iter->rt_time;
592 if (diff > 0) {
593 diff = div_u64((u64)diff, weight);
594 if (rt_rq->rt_runtime + diff > rt_period)
595 diff = rt_period - rt_rq->rt_runtime;
596 iter->rt_runtime -= diff;
597 rt_rq->rt_runtime += diff;
598 more = 1;
599 if (rt_rq->rt_runtime == rt_period) {
600 raw_spin_unlock(&iter->rt_runtime_lock);
601 break;
602 }
603 }
604next:
605 raw_spin_unlock(&iter->rt_runtime_lock);
606 }
607 raw_spin_unlock(&rt_b->rt_runtime_lock);
608
609 return more;
610}
611
612/*
613 * Ensure this RQ takes back all the runtime it lend to its neighbours.
614 */
615static void __disable_runtime(struct rq *rq)
616{
617 struct root_domain *rd = rq->rd;
618 rt_rq_iter_t iter;
619 struct rt_rq *rt_rq;
620
621 if (unlikely(!scheduler_running))
622 return;
623
624 for_each_rt_rq(rt_rq, iter, rq) {
625 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
626 s64 want;
627 int i;
628
629 raw_spin_lock(&rt_b->rt_runtime_lock);
630 raw_spin_lock(&rt_rq->rt_runtime_lock);
631 /*
632 * Either we're all inf and nobody needs to borrow, or we're
633 * already disabled and thus have nothing to do, or we have
634 * exactly the right amount of runtime to take out.
635 */
636 if (rt_rq->rt_runtime == RUNTIME_INF ||
637 rt_rq->rt_runtime == rt_b->rt_runtime)
638 goto balanced;
639 raw_spin_unlock(&rt_rq->rt_runtime_lock);
640
641 /*
642 * Calculate the difference between what we started out with
643 * and what we current have, that's the amount of runtime
644 * we lend and now have to reclaim.
645 */
646 want = rt_b->rt_runtime - rt_rq->rt_runtime;
647
648 /*
649 * Greedy reclaim, take back as much as we can.
650 */
651 for_each_cpu(i, rd->span) {
652 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
653 s64 diff;
654
655 /*
656 * Can't reclaim from ourselves or disabled runqueues.
657 */
658 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
659 continue;
660
661 raw_spin_lock(&iter->rt_runtime_lock);
662 if (want > 0) {
663 diff = min_t(s64, iter->rt_runtime, want);
664 iter->rt_runtime -= diff;
665 want -= diff;
666 } else {
667 iter->rt_runtime -= want;
668 want -= want;
669 }
670 raw_spin_unlock(&iter->rt_runtime_lock);
671
672 if (!want)
673 break;
674 }
675
676 raw_spin_lock(&rt_rq->rt_runtime_lock);
677 /*
678 * We cannot be left wanting - that would mean some runtime
679 * leaked out of the system.
680 */
681 BUG_ON(want);
682balanced:
683 /*
684 * Disable all the borrow logic by pretending we have inf
685 * runtime - in which case borrowing doesn't make sense.
686 */
687 rt_rq->rt_runtime = RUNTIME_INF;
688 raw_spin_unlock(&rt_rq->rt_runtime_lock);
689 raw_spin_unlock(&rt_b->rt_runtime_lock);
690 }
691}
692
693static void disable_runtime(struct rq *rq)
694{
695 unsigned long flags;
696
697 raw_spin_lock_irqsave(&rq->lock, flags);
698 __disable_runtime(rq);
699 raw_spin_unlock_irqrestore(&rq->lock, flags);
700}
701
702static void __enable_runtime(struct rq *rq)
703{
704 rt_rq_iter_t iter;
705 struct rt_rq *rt_rq;
706
707 if (unlikely(!scheduler_running))
708 return;
709
710 /*
711 * Reset each runqueue's bandwidth settings
712 */
713 for_each_rt_rq(rt_rq, iter, rq) {
714 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
715
716 raw_spin_lock(&rt_b->rt_runtime_lock);
717 raw_spin_lock(&rt_rq->rt_runtime_lock);
718 rt_rq->rt_runtime = rt_b->rt_runtime;
719 rt_rq->rt_time = 0;
720 rt_rq->rt_throttled = 0;
721 raw_spin_unlock(&rt_rq->rt_runtime_lock);
722 raw_spin_unlock(&rt_b->rt_runtime_lock);
723 }
724}
725
726static void enable_runtime(struct rq *rq)
727{
728 unsigned long flags;
729
730 raw_spin_lock_irqsave(&rq->lock, flags);
731 __enable_runtime(rq);
732 raw_spin_unlock_irqrestore(&rq->lock, flags);
733}
734
735int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu)
736{
737 int cpu = (int)(long)hcpu;
738
739 switch (action) {
740 case CPU_DOWN_PREPARE:
741 case CPU_DOWN_PREPARE_FROZEN:
742 disable_runtime(cpu_rq(cpu));
743 return NOTIFY_OK;
744
745 case CPU_DOWN_FAILED:
746 case CPU_DOWN_FAILED_FROZEN:
747 case CPU_ONLINE:
748 case CPU_ONLINE_FROZEN:
749 enable_runtime(cpu_rq(cpu));
750 return NOTIFY_OK;
751
752 default:
753 return NOTIFY_DONE;
754 }
755}
756
757static int balance_runtime(struct rt_rq *rt_rq)
758{
759 int more = 0;
760
761 if (!sched_feat(RT_RUNTIME_SHARE))
762 return more;
763
764 if (rt_rq->rt_time > rt_rq->rt_runtime) {
765 raw_spin_unlock(&rt_rq->rt_runtime_lock);
766 more = do_balance_runtime(rt_rq);
767 raw_spin_lock(&rt_rq->rt_runtime_lock);
768 }
769
770 return more;
771}
772#else /* !CONFIG_SMP */
773static inline int balance_runtime(struct rt_rq *rt_rq)
774{
775 return 0;
776}
777#endif /* CONFIG_SMP */
778
779static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
780{
781 int i, idle = 1;
782 const struct cpumask *span;
783
784 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
785 return 1;
786
787 span = sched_rt_period_mask();
788 for_each_cpu(i, span) {
789 int enqueue = 0;
790 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
791 struct rq *rq = rq_of_rt_rq(rt_rq);
792
793 raw_spin_lock(&rq->lock);
794 if (rt_rq->rt_time) {
795 u64 runtime;
796
797 raw_spin_lock(&rt_rq->rt_runtime_lock);
798 if (rt_rq->rt_throttled)
799 balance_runtime(rt_rq);
800 runtime = rt_rq->rt_runtime;
801 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
802 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
803 rt_rq->rt_throttled = 0;
804 enqueue = 1;
805
806 /*
807 * Force a clock update if the CPU was idle,
808 * lest wakeup -> unthrottle time accumulate.
809 */
810 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
811 rq->skip_clock_update = -1;
812 }
813 if (rt_rq->rt_time || rt_rq->rt_nr_running)
814 idle = 0;
815 raw_spin_unlock(&rt_rq->rt_runtime_lock);
816 } else if (rt_rq->rt_nr_running) {
817 idle = 0;
818 if (!rt_rq_throttled(rt_rq))
819 enqueue = 1;
820 }
821
822 if (enqueue)
823 sched_rt_rq_enqueue(rt_rq);
824 raw_spin_unlock(&rq->lock);
825 }
826
827 return idle;
828}
829
830static inline int rt_se_prio(struct sched_rt_entity *rt_se)
831{
832#ifdef CONFIG_RT_GROUP_SCHED
833 struct rt_rq *rt_rq = group_rt_rq(rt_se);
834
835 if (rt_rq)
836 return rt_rq->highest_prio.curr;
837#endif
838
839 return rt_task_of(rt_se)->prio;
840}
841
842static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
843{
844 u64 runtime = sched_rt_runtime(rt_rq);
845
846 if (rt_rq->rt_throttled)
847 return rt_rq_throttled(rt_rq);
848
849 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
850 return 0;
851
852 balance_runtime(rt_rq);
853 runtime = sched_rt_runtime(rt_rq);
854 if (runtime == RUNTIME_INF)
855 return 0;
856
857 if (rt_rq->rt_time > runtime) {
858 rt_rq->rt_throttled = 1;
859 printk_once(KERN_WARNING "sched: RT throttling activated\n");
860 if (rt_rq_throttled(rt_rq)) {
861 sched_rt_rq_dequeue(rt_rq);
862 return 1;
863 }
864 }
865
866 return 0;
867}
868
869/*
870 * Update the current task's runtime statistics. Skip current tasks that
871 * are not in our scheduling class.
872 */
873static void update_curr_rt(struct rq *rq)
874{
875 struct task_struct *curr = rq->curr;
876 struct sched_rt_entity *rt_se = &curr->rt;
877 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
878 u64 delta_exec;
879
880 if (curr->sched_class != &rt_sched_class)
881 return;
882
883 delta_exec = rq->clock_task - curr->se.exec_start;
884 if (unlikely((s64)delta_exec < 0))
885 delta_exec = 0;
886
887 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
888
889 curr->se.sum_exec_runtime += delta_exec;
890 account_group_exec_runtime(curr, delta_exec);
891
892 curr->se.exec_start = rq->clock_task;
893 cpuacct_charge(curr, delta_exec);
894
895 sched_rt_avg_update(rq, delta_exec);
896
897 if (!rt_bandwidth_enabled())
898 return;
899
900 for_each_sched_rt_entity(rt_se) {
901 rt_rq = rt_rq_of_se(rt_se);
902
903 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
904 raw_spin_lock(&rt_rq->rt_runtime_lock);
905 rt_rq->rt_time += delta_exec;
906 if (sched_rt_runtime_exceeded(rt_rq))
907 resched_task(curr);
908 raw_spin_unlock(&rt_rq->rt_runtime_lock);
909 }
910 }
911}
912
913#if defined CONFIG_SMP
914
915static void
916inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
917{
918 struct rq *rq = rq_of_rt_rq(rt_rq);
919
920 if (rq->online && prio < prev_prio)
921 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
922}
923
924static void
925dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
926{
927 struct rq *rq = rq_of_rt_rq(rt_rq);
928
929 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
930 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
931}
932
933#else /* CONFIG_SMP */
934
935static inline
936void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
937static inline
938void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
939
940#endif /* CONFIG_SMP */
941
942#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
943static void
944inc_rt_prio(struct rt_rq *rt_rq, int prio)
945{
946 int prev_prio = rt_rq->highest_prio.curr;
947
948 if (prio < prev_prio)
949 rt_rq->highest_prio.curr = prio;
950
951 inc_rt_prio_smp(rt_rq, prio, prev_prio);
952}
953
954static void
955dec_rt_prio(struct rt_rq *rt_rq, int prio)
956{
957 int prev_prio = rt_rq->highest_prio.curr;
958
959 if (rt_rq->rt_nr_running) {
960
961 WARN_ON(prio < prev_prio);
962
963 /*
964 * This may have been our highest task, and therefore
965 * we may have some recomputation to do
966 */
967 if (prio == prev_prio) {
968 struct rt_prio_array *array = &rt_rq->active;
969
970 rt_rq->highest_prio.curr =
971 sched_find_first_bit(array->bitmap);
972 }
973
974 } else
975 rt_rq->highest_prio.curr = MAX_RT_PRIO;
976
977 dec_rt_prio_smp(rt_rq, prio, prev_prio);
978}
979
980#else
981
982static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
983static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
984
985#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
986
987#ifdef CONFIG_RT_GROUP_SCHED
988
989static void
990inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
991{
992 if (rt_se_boosted(rt_se))
993 rt_rq->rt_nr_boosted++;
994
995 if (rt_rq->tg)
996 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
997}
998
999static void
1000dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1001{
1002 if (rt_se_boosted(rt_se))
1003 rt_rq->rt_nr_boosted--;
1004
1005 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1006}
1007
1008#else /* CONFIG_RT_GROUP_SCHED */
1009
1010static void
1011inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1012{
1013 start_rt_bandwidth(&def_rt_bandwidth);
1014}
1015
1016static inline
1017void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1018
1019#endif /* CONFIG_RT_GROUP_SCHED */
1020
1021static inline
1022void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1023{
1024 int prio = rt_se_prio(rt_se);
1025
1026 WARN_ON(!rt_prio(prio));
1027 rt_rq->rt_nr_running++;
1028
1029 inc_rt_prio(rt_rq, prio);
1030 inc_rt_migration(rt_se, rt_rq);
1031 inc_rt_group(rt_se, rt_rq);
1032}
1033
1034static inline
1035void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1036{
1037 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1038 WARN_ON(!rt_rq->rt_nr_running);
1039 rt_rq->rt_nr_running--;
1040
1041 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1042 dec_rt_migration(rt_se, rt_rq);
1043 dec_rt_group(rt_se, rt_rq);
1044}
1045
1046static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1047{
1048 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1049 struct rt_prio_array *array = &rt_rq->active;
1050 struct rt_rq *group_rq = group_rt_rq(rt_se);
1051 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1052
1053 /*
1054 * Don't enqueue the group if its throttled, or when empty.
1055 * The latter is a consequence of the former when a child group
1056 * get throttled and the current group doesn't have any other
1057 * active members.
1058 */
1059 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
1060 return;
1061
1062 if (!rt_rq->rt_nr_running)
1063 list_add_leaf_rt_rq(rt_rq);
1064
1065 if (head)
1066 list_add(&rt_se->run_list, queue);
1067 else
1068 list_add_tail(&rt_se->run_list, queue);
1069 __set_bit(rt_se_prio(rt_se), array->bitmap);
1070
1071 inc_rt_tasks(rt_se, rt_rq);
1072}
1073
1074static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
1075{
1076 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1077 struct rt_prio_array *array = &rt_rq->active;
1078
1079 list_del_init(&rt_se->run_list);
1080 if (list_empty(array->queue + rt_se_prio(rt_se)))
1081 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1082
1083 dec_rt_tasks(rt_se, rt_rq);
1084 if (!rt_rq->rt_nr_running)
1085 list_del_leaf_rt_rq(rt_rq);
1086}
1087
1088/*
1089 * Because the prio of an upper entry depends on the lower
1090 * entries, we must remove entries top - down.
1091 */
1092static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
1093{
1094 struct sched_rt_entity *back = NULL;
1095
1096 for_each_sched_rt_entity(rt_se) {
1097 rt_se->back = back;
1098 back = rt_se;
1099 }
1100
1101 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1102 if (on_rt_rq(rt_se))
1103 __dequeue_rt_entity(rt_se);
1104 }
1105}
1106
1107static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
1108{
1109 dequeue_rt_stack(rt_se);
1110 for_each_sched_rt_entity(rt_se)
1111 __enqueue_rt_entity(rt_se, head);
1112}
1113
1114static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1115{
1116 dequeue_rt_stack(rt_se);
1117
1118 for_each_sched_rt_entity(rt_se) {
1119 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1120
1121 if (rt_rq && rt_rq->rt_nr_running)
1122 __enqueue_rt_entity(rt_se, false);
1123 }
1124}
1125
1126/*
1127 * Adding/removing a task to/from a priority array:
1128 */
1129static void
1130enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1131{
1132 struct sched_rt_entity *rt_se = &p->rt;
1133
1134 if (flags & ENQUEUE_WAKEUP)
1135 rt_se->timeout = 0;
1136
1137 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1138
1139 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
1140 enqueue_pushable_task(rq, p);
1141
1142 inc_nr_running(rq);
1143}
1144
1145static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1146{
1147 struct sched_rt_entity *rt_se = &p->rt;
1148
1149 update_curr_rt(rq);
1150 dequeue_rt_entity(rt_se);
1151
1152 dequeue_pushable_task(rq, p);
1153
1154 dec_nr_running(rq);
1155}
1156
1157/*
1158 * Put task to the head or the end of the run list without the overhead of
1159 * dequeue followed by enqueue.
1160 */
1161static void
1162requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1163{
1164 if (on_rt_rq(rt_se)) {
1165 struct rt_prio_array *array = &rt_rq->active;
1166 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1167
1168 if (head)
1169 list_move(&rt_se->run_list, queue);
1170 else
1171 list_move_tail(&rt_se->run_list, queue);
1172 }
1173}
1174
1175static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1176{
1177 struct sched_rt_entity *rt_se = &p->rt;
1178 struct rt_rq *rt_rq;
1179
1180 for_each_sched_rt_entity(rt_se) {
1181 rt_rq = rt_rq_of_se(rt_se);
1182 requeue_rt_entity(rt_rq, rt_se, head);
1183 }
1184}
1185
1186static void yield_task_rt(struct rq *rq)
1187{
1188 requeue_task_rt(rq, rq->curr, 0);
1189}
1190
1191#ifdef CONFIG_SMP
1192static int find_lowest_rq(struct task_struct *task);
1193
1194static int
1195select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1196{
1197 struct task_struct *curr;
1198 struct rq *rq;
1199 int cpu;
1200
1201 cpu = task_cpu(p);
1202
1203 /* For anything but wake ups, just return the task_cpu */
1204 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1205 goto out;
1206
1207 rq = cpu_rq(cpu);
1208
1209 rcu_read_lock();
1210 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1211
1212 /*
1213 * If the current task on @p's runqueue is an RT task, then
1214 * try to see if we can wake this RT task up on another
1215 * runqueue. Otherwise simply start this RT task
1216 * on its current runqueue.
1217 *
1218 * We want to avoid overloading runqueues. If the woken
1219 * task is a higher priority, then it will stay on this CPU
1220 * and the lower prio task should be moved to another CPU.
1221 * Even though this will probably make the lower prio task
1222 * lose its cache, we do not want to bounce a higher task
1223 * around just because it gave up its CPU, perhaps for a
1224 * lock?
1225 *
1226 * For equal prio tasks, we just let the scheduler sort it out.
1227 *
1228 * Otherwise, just let it ride on the affined RQ and the
1229 * post-schedule router will push the preempted task away
1230 *
1231 * This test is optimistic, if we get it wrong the load-balancer
1232 * will have to sort it out.
1233 */
1234 if (curr && unlikely(rt_task(curr)) &&
1235 (curr->rt.nr_cpus_allowed < 2 ||
1236 curr->prio <= p->prio) &&
1237 (p->rt.nr_cpus_allowed > 1)) {
1238 int target = find_lowest_rq(p);
1239
1240 if (target != -1)
1241 cpu = target;
1242 }
1243 rcu_read_unlock();
1244
1245out:
1246 return cpu;
1247}
1248
1249static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1250{
1251 if (rq->curr->rt.nr_cpus_allowed == 1)
1252 return;
1253
1254 if (p->rt.nr_cpus_allowed != 1
1255 && cpupri_find(&rq->rd->cpupri, p, NULL))
1256 return;
1257
1258 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1259 return;
1260
1261 /*
1262 * There appears to be other cpus that can accept
1263 * current and none to run 'p', so lets reschedule
1264 * to try and push current away:
1265 */
1266 requeue_task_rt(rq, p, 1);
1267 resched_task(rq->curr);
1268}
1269
1270#endif /* CONFIG_SMP */
1271
1272/*
1273 * Preempt the current task with a newly woken task if needed:
1274 */
1275static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
1276{
1277 if (p->prio < rq->curr->prio) {
1278 resched_task(rq->curr);
1279 return;
1280 }
1281
1282#ifdef CONFIG_SMP
1283 /*
1284 * If:
1285 *
1286 * - the newly woken task is of equal priority to the current task
1287 * - the newly woken task is non-migratable while current is migratable
1288 * - current will be preempted on the next reschedule
1289 *
1290 * we should check to see if current can readily move to a different
1291 * cpu. If so, we will reschedule to allow the push logic to try
1292 * to move current somewhere else, making room for our non-migratable
1293 * task.
1294 */
1295 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1296 check_preempt_equal_prio(rq, p);
1297#endif
1298}
1299
1300static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1301 struct rt_rq *rt_rq)
1302{
1303 struct rt_prio_array *array = &rt_rq->active;
1304 struct sched_rt_entity *next = NULL;
1305 struct list_head *queue;
1306 int idx;
1307
1308 idx = sched_find_first_bit(array->bitmap);
1309 BUG_ON(idx >= MAX_RT_PRIO);
1310
1311 queue = array->queue + idx;
1312 next = list_entry(queue->next, struct sched_rt_entity, run_list);
1313
1314 return next;
1315}
1316
1317static struct task_struct *_pick_next_task_rt(struct rq *rq)
1318{
1319 struct sched_rt_entity *rt_se;
1320 struct task_struct *p;
1321 struct rt_rq *rt_rq;
1322
1323 rt_rq = &rq->rt;
1324
1325 if (!rt_rq->rt_nr_running)
1326 return NULL;
1327
1328 if (rt_rq_throttled(rt_rq))
1329 return NULL;
1330
1331 do {
1332 rt_se = pick_next_rt_entity(rq, rt_rq);
1333 BUG_ON(!rt_se);
1334 rt_rq = group_rt_rq(rt_se);
1335 } while (rt_rq);
1336
1337 p = rt_task_of(rt_se);
1338 p->se.exec_start = rq->clock_task;
1339
1340 return p;
1341}
1342
1343static struct task_struct *pick_next_task_rt(struct rq *rq)
1344{
1345 struct task_struct *p = _pick_next_task_rt(rq);
1346
1347 /* The running task is never eligible for pushing */
1348 if (p)
1349 dequeue_pushable_task(rq, p);
1350
1351#ifdef CONFIG_SMP
1352 /*
1353 * We detect this state here so that we can avoid taking the RQ
1354 * lock again later if there is no need to push
1355 */
1356 rq->post_schedule = has_pushable_tasks(rq);
1357#endif
1358
1359 return p;
1360}
1361
1362static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1363{
1364 update_curr_rt(rq);
1365
1366 /*
1367 * The previous task needs to be made eligible for pushing
1368 * if it is still active
1369 */
1370 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1)
1371 enqueue_pushable_task(rq, p);
1372}
1373
1374#ifdef CONFIG_SMP
1375
1376/* Only try algorithms three times */
1377#define RT_MAX_TRIES 3
1378
1379static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1380{
1381 if (!task_running(rq, p) &&
1382 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1383 (p->rt.nr_cpus_allowed > 1))
1384 return 1;
1385 return 0;
1386}
1387
1388/* Return the second highest RT task, NULL otherwise */
1389static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
1390{
1391 struct task_struct *next = NULL;
1392 struct sched_rt_entity *rt_se;
1393 struct rt_prio_array *array;
1394 struct rt_rq *rt_rq;
1395 int idx;
1396
1397 for_each_leaf_rt_rq(rt_rq, rq) {
1398 array = &rt_rq->active;
1399 idx = sched_find_first_bit(array->bitmap);
1400next_idx:
1401 if (idx >= MAX_RT_PRIO)
1402 continue;
1403 if (next && next->prio < idx)
1404 continue;
1405 list_for_each_entry(rt_se, array->queue + idx, run_list) {
1406 struct task_struct *p;
1407
1408 if (!rt_entity_is_task(rt_se))
1409 continue;
1410
1411 p = rt_task_of(rt_se);
1412 if (pick_rt_task(rq, p, cpu)) {
1413 next = p;
1414 break;
1415 }
1416 }
1417 if (!next) {
1418 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1419 goto next_idx;
1420 }
1421 }
1422
1423 return next;
1424}
1425
1426static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1427
1428static int find_lowest_rq(struct task_struct *task)
1429{
1430 struct sched_domain *sd;
1431 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
1432 int this_cpu = smp_processor_id();
1433 int cpu = task_cpu(task);
1434
1435 /* Make sure the mask is initialized first */
1436 if (unlikely(!lowest_mask))
1437 return -1;
1438
1439 if (task->rt.nr_cpus_allowed == 1)
1440 return -1; /* No other targets possible */
1441
1442 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1443 return -1; /* No targets found */
1444
1445 /*
1446 * At this point we have built a mask of cpus representing the
1447 * lowest priority tasks in the system. Now we want to elect
1448 * the best one based on our affinity and topology.
1449 *
1450 * We prioritize the last cpu that the task executed on since
1451 * it is most likely cache-hot in that location.
1452 */
1453 if (cpumask_test_cpu(cpu, lowest_mask))
1454 return cpu;
1455
1456 /*
1457 * Otherwise, we consult the sched_domains span maps to figure
1458 * out which cpu is logically closest to our hot cache data.
1459 */
1460 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1461 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1462
1463 rcu_read_lock();
1464 for_each_domain(cpu, sd) {
1465 if (sd->flags & SD_WAKE_AFFINE) {
1466 int best_cpu;
1467
1468 /*
1469 * "this_cpu" is cheaper to preempt than a
1470 * remote processor.
1471 */
1472 if (this_cpu != -1 &&
1473 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1474 rcu_read_unlock();
1475 return this_cpu;
1476 }
1477
1478 best_cpu = cpumask_first_and(lowest_mask,
1479 sched_domain_span(sd));
1480 if (best_cpu < nr_cpu_ids) {
1481 rcu_read_unlock();
1482 return best_cpu;
1483 }
1484 }
1485 }
1486 rcu_read_unlock();
1487
1488 /*
1489 * And finally, if there were no matches within the domains
1490 * just give the caller *something* to work with from the compatible
1491 * locations.
1492 */
1493 if (this_cpu != -1)
1494 return this_cpu;
1495
1496 cpu = cpumask_any(lowest_mask);
1497 if (cpu < nr_cpu_ids)
1498 return cpu;
1499 return -1;
1500}
1501
1502/* Will lock the rq it finds */
1503static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1504{
1505 struct rq *lowest_rq = NULL;
1506 int tries;
1507 int cpu;
1508
1509 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1510 cpu = find_lowest_rq(task);
1511
1512 if ((cpu == -1) || (cpu == rq->cpu))
1513 break;
1514
1515 lowest_rq = cpu_rq(cpu);
1516
1517 /* if the prio of this runqueue changed, try again */
1518 if (double_lock_balance(rq, lowest_rq)) {
1519 /*
1520 * We had to unlock the run queue. In
1521 * the mean time, task could have
1522 * migrated already or had its affinity changed.
1523 * Also make sure that it wasn't scheduled on its rq.
1524 */
1525 if (unlikely(task_rq(task) != rq ||
1526 !cpumask_test_cpu(lowest_rq->cpu,
1527 tsk_cpus_allowed(task)) ||
1528 task_running(rq, task) ||
1529 !task->on_rq)) {
1530
1531 raw_spin_unlock(&lowest_rq->lock);
1532 lowest_rq = NULL;
1533 break;
1534 }
1535 }
1536
1537 /* If this rq is still suitable use it. */
1538 if (lowest_rq->rt.highest_prio.curr > task->prio)
1539 break;
1540
1541 /* try again */
1542 double_unlock_balance(rq, lowest_rq);
1543 lowest_rq = NULL;
1544 }
1545
1546 return lowest_rq;
1547}
1548
1549static struct task_struct *pick_next_pushable_task(struct rq *rq)
1550{
1551 struct task_struct *p;
1552
1553 if (!has_pushable_tasks(rq))
1554 return NULL;
1555
1556 p = plist_first_entry(&rq->rt.pushable_tasks,
1557 struct task_struct, pushable_tasks);
1558
1559 BUG_ON(rq->cpu != task_cpu(p));
1560 BUG_ON(task_current(rq, p));
1561 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1562
1563 BUG_ON(!p->on_rq);
1564 BUG_ON(!rt_task(p));
1565
1566 return p;
1567}
1568
1569/*
1570 * If the current CPU has more than one RT task, see if the non
1571 * running task can migrate over to a CPU that is running a task
1572 * of lesser priority.
1573 */
1574static int push_rt_task(struct rq *rq)
1575{
1576 struct task_struct *next_task;
1577 struct rq *lowest_rq;
1578 int ret = 0;
1579
1580 if (!rq->rt.overloaded)
1581 return 0;
1582
1583 next_task = pick_next_pushable_task(rq);
1584 if (!next_task)
1585 return 0;
1586
1587retry:
1588 if (unlikely(next_task == rq->curr)) {
1589 WARN_ON(1);
1590 return 0;
1591 }
1592
1593 /*
1594 * It's possible that the next_task slipped in of
1595 * higher priority than current. If that's the case
1596 * just reschedule current.
1597 */
1598 if (unlikely(next_task->prio < rq->curr->prio)) {
1599 resched_task(rq->curr);
1600 return 0;
1601 }
1602
1603 /* We might release rq lock */
1604 get_task_struct(next_task);
1605
1606 /* find_lock_lowest_rq locks the rq if found */
1607 lowest_rq = find_lock_lowest_rq(next_task, rq);
1608 if (!lowest_rq) {
1609 struct task_struct *task;
1610 /*
1611 * find_lock_lowest_rq releases rq->lock
1612 * so it is possible that next_task has migrated.
1613 *
1614 * We need to make sure that the task is still on the same
1615 * run-queue and is also still the next task eligible for
1616 * pushing.
1617 */
1618 task = pick_next_pushable_task(rq);
1619 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1620 /*
1621 * The task hasn't migrated, and is still the next
1622 * eligible task, but we failed to find a run-queue
1623 * to push it to. Do not retry in this case, since
1624 * other cpus will pull from us when ready.
1625 */
1626 goto out;
1627 }
1628
1629 if (!task)
1630 /* No more tasks, just exit */
1631 goto out;
1632
1633 /*
1634 * Something has shifted, try again.
1635 */
1636 put_task_struct(next_task);
1637 next_task = task;
1638 goto retry;
1639 }
1640
1641 deactivate_task(rq, next_task, 0);
1642 set_task_cpu(next_task, lowest_rq->cpu);
1643 activate_task(lowest_rq, next_task, 0);
1644 ret = 1;
1645
1646 resched_task(lowest_rq->curr);
1647
1648 double_unlock_balance(rq, lowest_rq);
1649
1650out:
1651 put_task_struct(next_task);
1652
1653 return ret;
1654}
1655
1656static void push_rt_tasks(struct rq *rq)
1657{
1658 /* push_rt_task will return true if it moved an RT */
1659 while (push_rt_task(rq))
1660 ;
1661}
1662
1663static int pull_rt_task(struct rq *this_rq)
1664{
1665 int this_cpu = this_rq->cpu, ret = 0, cpu;
1666 struct task_struct *p;
1667 struct rq *src_rq;
1668
1669 if (likely(!rt_overloaded(this_rq)))
1670 return 0;
1671
1672 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1673 if (this_cpu == cpu)
1674 continue;
1675
1676 src_rq = cpu_rq(cpu);
1677
1678 /*
1679 * Don't bother taking the src_rq->lock if the next highest
1680 * task is known to be lower-priority than our current task.
1681 * This may look racy, but if this value is about to go
1682 * logically higher, the src_rq will push this task away.
1683 * And if its going logically lower, we do not care
1684 */
1685 if (src_rq->rt.highest_prio.next >=
1686 this_rq->rt.highest_prio.curr)
1687 continue;
1688
1689 /*
1690 * We can potentially drop this_rq's lock in
1691 * double_lock_balance, and another CPU could
1692 * alter this_rq
1693 */
1694 double_lock_balance(this_rq, src_rq);
1695
1696 /*
1697 * Are there still pullable RT tasks?
1698 */
1699 if (src_rq->rt.rt_nr_running <= 1)
1700 goto skip;
1701
1702 p = pick_next_highest_task_rt(src_rq, this_cpu);
1703
1704 /*
1705 * Do we have an RT task that preempts
1706 * the to-be-scheduled task?
1707 */
1708 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1709 WARN_ON(p == src_rq->curr);
1710 WARN_ON(!p->on_rq);
1711
1712 /*
1713 * There's a chance that p is higher in priority
1714 * than what's currently running on its cpu.
1715 * This is just that p is wakeing up and hasn't
1716 * had a chance to schedule. We only pull
1717 * p if it is lower in priority than the
1718 * current task on the run queue
1719 */
1720 if (p->prio < src_rq->curr->prio)
1721 goto skip;
1722
1723 ret = 1;
1724
1725 deactivate_task(src_rq, p, 0);
1726 set_task_cpu(p, this_cpu);
1727 activate_task(this_rq, p, 0);
1728 /*
1729 * We continue with the search, just in
1730 * case there's an even higher prio task
1731 * in another runqueue. (low likelihood
1732 * but possible)
1733 */
1734 }
1735skip:
1736 double_unlock_balance(this_rq, src_rq);
1737 }
1738
1739 return ret;
1740}
1741
1742static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1743{
1744 /* Try to pull RT tasks here if we lower this rq's prio */
1745 if (rq->rt.highest_prio.curr > prev->prio)
1746 pull_rt_task(rq);
1747}
1748
1749static void post_schedule_rt(struct rq *rq)
1750{
1751 push_rt_tasks(rq);
1752}
1753
1754/*
1755 * If we are not running and we are not going to reschedule soon, we should
1756 * try to push tasks away now
1757 */
1758static void task_woken_rt(struct rq *rq, struct task_struct *p)
1759{
1760 if (!task_running(rq, p) &&
1761 !test_tsk_need_resched(rq->curr) &&
1762 has_pushable_tasks(rq) &&
1763 p->rt.nr_cpus_allowed > 1 &&
1764 rt_task(rq->curr) &&
1765 (rq->curr->rt.nr_cpus_allowed < 2 ||
1766 rq->curr->prio <= p->prio))
1767 push_rt_tasks(rq);
1768}
1769
1770static void set_cpus_allowed_rt(struct task_struct *p,
1771 const struct cpumask *new_mask)
1772{
1773 int weight = cpumask_weight(new_mask);
1774
1775 BUG_ON(!rt_task(p));
1776
1777 /*
1778 * Update the migration status of the RQ if we have an RT task
1779 * which is running AND changing its weight value.
1780 */
1781 if (p->on_rq && (weight != p->rt.nr_cpus_allowed)) {
1782 struct rq *rq = task_rq(p);
1783
1784 if (!task_current(rq, p)) {
1785 /*
1786 * Make sure we dequeue this task from the pushable list
1787 * before going further. It will either remain off of
1788 * the list because we are no longer pushable, or it
1789 * will be requeued.
1790 */
1791 if (p->rt.nr_cpus_allowed > 1)
1792 dequeue_pushable_task(rq, p);
1793
1794 /*
1795 * Requeue if our weight is changing and still > 1
1796 */
1797 if (weight > 1)
1798 enqueue_pushable_task(rq, p);
1799
1800 }
1801
1802 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1803 rq->rt.rt_nr_migratory++;
1804 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1805 BUG_ON(!rq->rt.rt_nr_migratory);
1806 rq->rt.rt_nr_migratory--;
1807 }
1808
1809 update_rt_migration(&rq->rt);
1810 }
1811}
1812
1813/* Assumes rq->lock is held */
1814static void rq_online_rt(struct rq *rq)
1815{
1816 if (rq->rt.overloaded)
1817 rt_set_overload(rq);
1818
1819 __enable_runtime(rq);
1820
1821 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1822}
1823
1824/* Assumes rq->lock is held */
1825static void rq_offline_rt(struct rq *rq)
1826{
1827 if (rq->rt.overloaded)
1828 rt_clear_overload(rq);
1829
1830 __disable_runtime(rq);
1831
1832 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1833}
1834
1835/*
1836 * When switch from the rt queue, we bring ourselves to a position
1837 * that we might want to pull RT tasks from other runqueues.
1838 */
1839static void switched_from_rt(struct rq *rq, struct task_struct *p)
1840{
1841 /*
1842 * If there are other RT tasks then we will reschedule
1843 * and the scheduling of the other RT tasks will handle
1844 * the balancing. But if we are the last RT task
1845 * we may need to handle the pulling of RT tasks
1846 * now.
1847 */
1848 if (p->on_rq && !rq->rt.rt_nr_running)
1849 pull_rt_task(rq);
1850}
1851
1852void init_sched_rt_class(void)
1853{
1854 unsigned int i;
1855
1856 for_each_possible_cpu(i) {
1857 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
1858 GFP_KERNEL, cpu_to_node(i));
1859 }
1860}
1861#endif /* CONFIG_SMP */
1862
1863/*
1864 * When switching a task to RT, we may overload the runqueue
1865 * with RT tasks. In this case we try to push them off to
1866 * other runqueues.
1867 */
1868static void switched_to_rt(struct rq *rq, struct task_struct *p)
1869{
1870 int check_resched = 1;
1871
1872 /*
1873 * If we are already running, then there's nothing
1874 * that needs to be done. But if we are not running
1875 * we may need to preempt the current running task.
1876 * If that current running task is also an RT task
1877 * then see if we can move to another run queue.
1878 */
1879 if (p->on_rq && rq->curr != p) {
1880#ifdef CONFIG_SMP
1881 if (rq->rt.overloaded && push_rt_task(rq) &&
1882 /* Don't resched if we changed runqueues */
1883 rq != task_rq(p))
1884 check_resched = 0;
1885#endif /* CONFIG_SMP */
1886 if (check_resched && p->prio < rq->curr->prio)
1887 resched_task(rq->curr);
1888 }
1889}
1890
1891/*
1892 * Priority of the task has changed. This may cause
1893 * us to initiate a push or pull.
1894 */
1895static void
1896prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
1897{
1898 if (!p->on_rq)
1899 return;
1900
1901 if (rq->curr == p) {
1902#ifdef CONFIG_SMP
1903 /*
1904 * If our priority decreases while running, we
1905 * may need to pull tasks to this runqueue.
1906 */
1907 if (oldprio < p->prio)
1908 pull_rt_task(rq);
1909 /*
1910 * If there's a higher priority task waiting to run
1911 * then reschedule. Note, the above pull_rt_task
1912 * can release the rq lock and p could migrate.
1913 * Only reschedule if p is still on the same runqueue.
1914 */
1915 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1916 resched_task(p);
1917#else
1918 /* For UP simply resched on drop of prio */
1919 if (oldprio < p->prio)
1920 resched_task(p);
1921#endif /* CONFIG_SMP */
1922 } else {
1923 /*
1924 * This task is not running, but if it is
1925 * greater than the current running task
1926 * then reschedule.
1927 */
1928 if (p->prio < rq->curr->prio)
1929 resched_task(rq->curr);
1930 }
1931}
1932
1933static void watchdog(struct rq *rq, struct task_struct *p)
1934{
1935 unsigned long soft, hard;
1936
1937 /* max may change after cur was read, this will be fixed next tick */
1938 soft = task_rlimit(p, RLIMIT_RTTIME);
1939 hard = task_rlimit_max(p, RLIMIT_RTTIME);
1940
1941 if (soft != RLIM_INFINITY) {
1942 unsigned long next;
1943
1944 p->rt.timeout++;
1945 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1946 if (p->rt.timeout > next)
1947 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1948 }
1949}
1950
1951static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1952{
1953 update_curr_rt(rq);
1954
1955 watchdog(rq, p);
1956
1957 /*
1958 * RR tasks need a special form of timeslice management.
1959 * FIFO tasks have no timeslices.
1960 */
1961 if (p->policy != SCHED_RR)
1962 return;
1963
1964 if (--p->rt.time_slice)
1965 return;
1966
1967 p->rt.time_slice = DEF_TIMESLICE;
1968
1969 /*
1970 * Requeue to the end of queue if we are not the only element
1971 * on the queue:
1972 */
1973 if (p->rt.run_list.prev != p->rt.run_list.next) {
1974 requeue_task_rt(rq, p, 0);
1975 set_tsk_need_resched(p);
1976 }
1977}
1978
1979static void set_curr_task_rt(struct rq *rq)
1980{
1981 struct task_struct *p = rq->curr;
1982
1983 p->se.exec_start = rq->clock_task;
1984
1985 /* The running task is never eligible for pushing */
1986 dequeue_pushable_task(rq, p);
1987}
1988
1989static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
1990{
1991 /*
1992 * Time slice is 0 for SCHED_FIFO tasks
1993 */
1994 if (task->policy == SCHED_RR)
1995 return DEF_TIMESLICE;
1996 else
1997 return 0;
1998}
1999
2000const struct sched_class rt_sched_class = {
2001 .next = &fair_sched_class,
2002 .enqueue_task = enqueue_task_rt,
2003 .dequeue_task = dequeue_task_rt,
2004 .yield_task = yield_task_rt,
2005
2006 .check_preempt_curr = check_preempt_curr_rt,
2007
2008 .pick_next_task = pick_next_task_rt,
2009 .put_prev_task = put_prev_task_rt,
2010
2011#ifdef CONFIG_SMP
2012 .select_task_rq = select_task_rq_rt,
2013
2014 .set_cpus_allowed = set_cpus_allowed_rt,
2015 .rq_online = rq_online_rt,
2016 .rq_offline = rq_offline_rt,
2017 .pre_schedule = pre_schedule_rt,
2018 .post_schedule = post_schedule_rt,
2019 .task_woken = task_woken_rt,
2020 .switched_from = switched_from_rt,
2021#endif
2022
2023 .set_curr_task = set_curr_task_rt,
2024 .task_tick = task_tick_rt,
2025
2026 .get_rr_interval = get_rr_interval_rt,
2027
2028 .prio_changed = prio_changed_rt,
2029 .switched_to = switched_to_rt,
2030};
2031
2032#ifdef CONFIG_SCHED_DEBUG
2033extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2034
2035void print_rt_stats(struct seq_file *m, int cpu)
2036{
2037 rt_rq_iter_t iter;
2038 struct rt_rq *rt_rq;
2039
2040 rcu_read_lock();
2041 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2042 print_rt_rq(m, cpu, rt_rq);
2043 rcu_read_unlock();
2044}
2045#endif /* CONFIG_SCHED_DEBUG */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
new file mode 100644
index 000000000000..c2e780234c31
--- /dev/null
+++ b/kernel/sched/sched.h
@@ -0,0 +1,1064 @@
1
2#include <linux/sched.h>
3#include <linux/mutex.h>
4#include <linux/spinlock.h>
5#include <linux/stop_machine.h>
6
7#include "cpupri.h"
8
9extern __read_mostly int scheduler_running;
10
11/*
12 * Convert user-nice values [ -20 ... 0 ... 19 ]
13 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
14 * and back.
15 */
16#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
17#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
18#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
19
20/*
21 * 'User priority' is the nice value converted to something we
22 * can work with better when scaling various scheduler parameters,
23 * it's a [ 0 ... 39 ] range.
24 */
25#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
26#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
27#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
28
29/*
30 * Helpers for converting nanosecond timing to jiffy resolution
31 */
32#define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
33
34#define NICE_0_LOAD SCHED_LOAD_SCALE
35#define NICE_0_SHIFT SCHED_LOAD_SHIFT
36
37/*
38 * These are the 'tuning knobs' of the scheduler:
39 *
40 * default timeslice is 100 msecs (used only for SCHED_RR tasks).
41 * Timeslices get refilled after they expire.
42 */
43#define DEF_TIMESLICE (100 * HZ / 1000)
44
45/*
46 * single value that denotes runtime == period, ie unlimited time.
47 */
48#define RUNTIME_INF ((u64)~0ULL)
49
50static inline int rt_policy(int policy)
51{
52 if (policy == SCHED_FIFO || policy == SCHED_RR)
53 return 1;
54 return 0;
55}
56
57static inline int task_has_rt_policy(struct task_struct *p)
58{
59 return rt_policy(p->policy);
60}
61
62/*
63 * This is the priority-queue data structure of the RT scheduling class:
64 */
65struct rt_prio_array {
66 DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
67 struct list_head queue[MAX_RT_PRIO];
68};
69
70struct rt_bandwidth {
71 /* nests inside the rq lock: */
72 raw_spinlock_t rt_runtime_lock;
73 ktime_t rt_period;
74 u64 rt_runtime;
75 struct hrtimer rt_period_timer;
76};
77
78extern struct mutex sched_domains_mutex;
79
80#ifdef CONFIG_CGROUP_SCHED
81
82#include <linux/cgroup.h>
83
84struct cfs_rq;
85struct rt_rq;
86
87static LIST_HEAD(task_groups);
88
89struct cfs_bandwidth {
90#ifdef CONFIG_CFS_BANDWIDTH
91 raw_spinlock_t lock;
92 ktime_t period;
93 u64 quota, runtime;
94 s64 hierarchal_quota;
95 u64 runtime_expires;
96
97 int idle, timer_active;
98 struct hrtimer period_timer, slack_timer;
99 struct list_head throttled_cfs_rq;
100
101 /* statistics */
102 int nr_periods, nr_throttled;
103 u64 throttled_time;
104#endif
105};
106
107/* task group related information */
108struct task_group {
109 struct cgroup_subsys_state css;
110
111#ifdef CONFIG_FAIR_GROUP_SCHED
112 /* schedulable entities of this group on each cpu */
113 struct sched_entity **se;
114 /* runqueue "owned" by this group on each cpu */
115 struct cfs_rq **cfs_rq;
116 unsigned long shares;
117
118 atomic_t load_weight;
119#endif
120
121#ifdef CONFIG_RT_GROUP_SCHED
122 struct sched_rt_entity **rt_se;
123 struct rt_rq **rt_rq;
124
125 struct rt_bandwidth rt_bandwidth;
126#endif
127
128 struct rcu_head rcu;
129 struct list_head list;
130
131 struct task_group *parent;
132 struct list_head siblings;
133 struct list_head children;
134
135#ifdef CONFIG_SCHED_AUTOGROUP
136 struct autogroup *autogroup;
137#endif
138
139 struct cfs_bandwidth cfs_bandwidth;
140};
141
142#ifdef CONFIG_FAIR_GROUP_SCHED
143#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
144
145/*
146 * A weight of 0 or 1 can cause arithmetics problems.
147 * A weight of a cfs_rq is the sum of weights of which entities
148 * are queued on this cfs_rq, so a weight of a entity should not be
149 * too large, so as the shares value of a task group.
150 * (The default weight is 1024 - so there's no practical
151 * limitation from this.)
152 */
153#define MIN_SHARES (1UL << 1)
154#define MAX_SHARES (1UL << 18)
155#endif
156
157/* Default task group.
158 * Every task in system belong to this group at bootup.
159 */
160extern struct task_group root_task_group;
161
162typedef int (*tg_visitor)(struct task_group *, void *);
163
164extern int walk_tg_tree_from(struct task_group *from,
165 tg_visitor down, tg_visitor up, void *data);
166
167/*
168 * Iterate the full tree, calling @down when first entering a node and @up when
169 * leaving it for the final time.
170 *
171 * Caller must hold rcu_lock or sufficient equivalent.
172 */
173static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
174{
175 return walk_tg_tree_from(&root_task_group, down, up, data);
176}
177
178extern int tg_nop(struct task_group *tg, void *data);
179
180extern void free_fair_sched_group(struct task_group *tg);
181extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
182extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
183extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
184 struct sched_entity *se, int cpu,
185 struct sched_entity *parent);
186extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
187extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
188
189extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
190extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
191extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
192
193extern void free_rt_sched_group(struct task_group *tg);
194extern int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent);
195extern void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
196 struct sched_rt_entity *rt_se, int cpu,
197 struct sched_rt_entity *parent);
198
199#else /* CONFIG_CGROUP_SCHED */
200
201struct cfs_bandwidth { };
202
203#endif /* CONFIG_CGROUP_SCHED */
204
205/* CFS-related fields in a runqueue */
206struct cfs_rq {
207 struct load_weight load;
208 unsigned long nr_running, h_nr_running;
209
210 u64 exec_clock;
211 u64 min_vruntime;
212#ifndef CONFIG_64BIT
213 u64 min_vruntime_copy;
214#endif
215
216 struct rb_root tasks_timeline;
217 struct rb_node *rb_leftmost;
218
219 struct list_head tasks;
220 struct list_head *balance_iterator;
221
222 /*
223 * 'curr' points to currently running entity on this cfs_rq.
224 * It is set to NULL otherwise (i.e when none are currently running).
225 */
226 struct sched_entity *curr, *next, *last, *skip;
227
228#ifdef CONFIG_SCHED_DEBUG
229 unsigned int nr_spread_over;
230#endif
231
232#ifdef CONFIG_FAIR_GROUP_SCHED
233 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
234
235 /*
236 * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
237 * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
238 * (like users, containers etc.)
239 *
240 * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
241 * list is used during load balance.
242 */
243 int on_list;
244 struct list_head leaf_cfs_rq_list;
245 struct task_group *tg; /* group that "owns" this runqueue */
246
247#ifdef CONFIG_SMP
248 /*
249 * the part of load.weight contributed by tasks
250 */
251 unsigned long task_weight;
252
253 /*
254 * h_load = weight * f(tg)
255 *
256 * Where f(tg) is the recursive weight fraction assigned to
257 * this group.
258 */
259 unsigned long h_load;
260
261 /*
262 * Maintaining per-cpu shares distribution for group scheduling
263 *
264 * load_stamp is the last time we updated the load average
265 * load_last is the last time we updated the load average and saw load
266 * load_unacc_exec_time is currently unaccounted execution time
267 */
268 u64 load_avg;
269 u64 load_period;
270 u64 load_stamp, load_last, load_unacc_exec_time;
271
272 unsigned long load_contribution;
273#endif /* CONFIG_SMP */
274#ifdef CONFIG_CFS_BANDWIDTH
275 int runtime_enabled;
276 u64 runtime_expires;
277 s64 runtime_remaining;
278
279 u64 throttled_timestamp;
280 int throttled, throttle_count;
281 struct list_head throttled_list;
282#endif /* CONFIG_CFS_BANDWIDTH */
283#endif /* CONFIG_FAIR_GROUP_SCHED */
284};
285
286static inline int rt_bandwidth_enabled(void)
287{
288 return sysctl_sched_rt_runtime >= 0;
289}
290
291/* Real-Time classes' related field in a runqueue: */
292struct rt_rq {
293 struct rt_prio_array active;
294 unsigned long rt_nr_running;
295#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
296 struct {
297 int curr; /* highest queued rt task prio */
298#ifdef CONFIG_SMP
299 int next; /* next highest */
300#endif
301 } highest_prio;
302#endif
303#ifdef CONFIG_SMP
304 unsigned long rt_nr_migratory;
305 unsigned long rt_nr_total;
306 int overloaded;
307 struct plist_head pushable_tasks;
308#endif
309 int rt_throttled;
310 u64 rt_time;
311 u64 rt_runtime;
312 /* Nests inside the rq lock: */
313 raw_spinlock_t rt_runtime_lock;
314
315#ifdef CONFIG_RT_GROUP_SCHED
316 unsigned long rt_nr_boosted;
317
318 struct rq *rq;
319 struct list_head leaf_rt_rq_list;
320 struct task_group *tg;
321#endif
322};
323
324#ifdef CONFIG_SMP
325
326/*
327 * We add the notion of a root-domain which will be used to define per-domain
328 * variables. Each exclusive cpuset essentially defines an island domain by
329 * fully partitioning the member cpus from any other cpuset. Whenever a new
330 * exclusive cpuset is created, we also create and attach a new root-domain
331 * object.
332 *
333 */
334struct root_domain {
335 atomic_t refcount;
336 atomic_t rto_count;
337 struct rcu_head rcu;
338 cpumask_var_t span;
339 cpumask_var_t online;
340
341 /*
342 * The "RT overload" flag: it gets set if a CPU has more than
343 * one runnable RT task.
344 */
345 cpumask_var_t rto_mask;
346 struct cpupri cpupri;
347};
348
349extern struct root_domain def_root_domain;
350
351#endif /* CONFIG_SMP */
352
353/*
354 * This is the main, per-CPU runqueue data structure.
355 *
356 * Locking rule: those places that want to lock multiple runqueues
357 * (such as the load balancing or the thread migration code), lock
358 * acquire operations must be ordered by ascending &runqueue.
359 */
360struct rq {
361 /* runqueue lock: */
362 raw_spinlock_t lock;
363
364 /*
365 * nr_running and cpu_load should be in the same cacheline because
366 * remote CPUs use both these fields when doing load calculation.
367 */
368 unsigned long nr_running;
369 #define CPU_LOAD_IDX_MAX 5
370 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
371 unsigned long last_load_update_tick;
372#ifdef CONFIG_NO_HZ
373 u64 nohz_stamp;
374 unsigned char nohz_balance_kick;
375#endif
376 int skip_clock_update;
377
378 /* capture load from *all* tasks on this cpu: */
379 struct load_weight load;
380 unsigned long nr_load_updates;
381 u64 nr_switches;
382
383 struct cfs_rq cfs;
384 struct rt_rq rt;
385
386#ifdef CONFIG_FAIR_GROUP_SCHED
387 /* list of leaf cfs_rq on this cpu: */
388 struct list_head leaf_cfs_rq_list;
389#endif
390#ifdef CONFIG_RT_GROUP_SCHED
391 struct list_head leaf_rt_rq_list;
392#endif
393
394 /*
395 * This is part of a global counter where only the total sum
396 * over all CPUs matters. A task can increase this counter on
397 * one CPU and if it got migrated afterwards it may decrease
398 * it on another CPU. Always updated under the runqueue lock:
399 */
400 unsigned long nr_uninterruptible;
401
402 struct task_struct *curr, *idle, *stop;
403 unsigned long next_balance;
404 struct mm_struct *prev_mm;
405
406 u64 clock;
407 u64 clock_task;
408
409 atomic_t nr_iowait;
410
411#ifdef CONFIG_SMP
412 struct root_domain *rd;
413 struct sched_domain *sd;
414
415 unsigned long cpu_power;
416
417 unsigned char idle_balance;
418 /* For active balancing */
419 int post_schedule;
420 int active_balance;
421 int push_cpu;
422 struct cpu_stop_work active_balance_work;
423 /* cpu of this runqueue: */
424 int cpu;
425 int online;
426
427 u64 rt_avg;
428 u64 age_stamp;
429 u64 idle_stamp;
430 u64 avg_idle;
431#endif
432
433#ifdef CONFIG_IRQ_TIME_ACCOUNTING
434 u64 prev_irq_time;
435#endif
436#ifdef CONFIG_PARAVIRT
437 u64 prev_steal_time;
438#endif
439#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
440 u64 prev_steal_time_rq;
441#endif
442
443 /* calc_load related fields */
444 unsigned long calc_load_update;
445 long calc_load_active;
446
447#ifdef CONFIG_SCHED_HRTICK
448#ifdef CONFIG_SMP
449 int hrtick_csd_pending;
450 struct call_single_data hrtick_csd;
451#endif
452 struct hrtimer hrtick_timer;
453#endif
454
455#ifdef CONFIG_SCHEDSTATS
456 /* latency stats */
457 struct sched_info rq_sched_info;
458 unsigned long long rq_cpu_time;
459 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
460
461 /* sys_sched_yield() stats */
462 unsigned int yld_count;
463
464 /* schedule() stats */
465 unsigned int sched_switch;
466 unsigned int sched_count;
467 unsigned int sched_goidle;
468
469 /* try_to_wake_up() stats */
470 unsigned int ttwu_count;
471 unsigned int ttwu_local;
472#endif
473
474#ifdef CONFIG_SMP
475 struct llist_head wake_list;
476#endif
477};
478
479static inline int cpu_of(struct rq *rq)
480{
481#ifdef CONFIG_SMP
482 return rq->cpu;
483#else
484 return 0;
485#endif
486}
487
488DECLARE_PER_CPU(struct rq, runqueues);
489
490#define rcu_dereference_check_sched_domain(p) \
491 rcu_dereference_check((p), \
492 lockdep_is_held(&sched_domains_mutex))
493
494/*
495 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
496 * See detach_destroy_domains: synchronize_sched for details.
497 *
498 * The domain tree of any CPU may only be accessed from within
499 * preempt-disabled sections.
500 */
501#define for_each_domain(cpu, __sd) \
502 for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
503
504#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
505#define this_rq() (&__get_cpu_var(runqueues))
506#define task_rq(p) cpu_rq(task_cpu(p))
507#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
508#define raw_rq() (&__raw_get_cpu_var(runqueues))
509
510#include "stats.h"
511#include "auto_group.h"
512
513#ifdef CONFIG_CGROUP_SCHED
514
515/*
516 * Return the group to which this tasks belongs.
517 *
518 * We use task_subsys_state_check() and extend the RCU verification with
519 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
520 * task it moves into the cgroup. Therefore by holding either of those locks,
521 * we pin the task to the current cgroup.
522 */
523static inline struct task_group *task_group(struct task_struct *p)
524{
525 struct task_group *tg;
526 struct cgroup_subsys_state *css;
527
528 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
529 lockdep_is_held(&p->pi_lock) ||
530 lockdep_is_held(&task_rq(p)->lock));
531 tg = container_of(css, struct task_group, css);
532
533 return autogroup_task_group(p, tg);
534}
535
536/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
537static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
538{
539#if defined(CONFIG_FAIR_GROUP_SCHED) || defined(CONFIG_RT_GROUP_SCHED)
540 struct task_group *tg = task_group(p);
541#endif
542
543#ifdef CONFIG_FAIR_GROUP_SCHED
544 p->se.cfs_rq = tg->cfs_rq[cpu];
545 p->se.parent = tg->se[cpu];
546#endif
547
548#ifdef CONFIG_RT_GROUP_SCHED
549 p->rt.rt_rq = tg->rt_rq[cpu];
550 p->rt.parent = tg->rt_se[cpu];
551#endif
552}
553
554#else /* CONFIG_CGROUP_SCHED */
555
556static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
557static inline struct task_group *task_group(struct task_struct *p)
558{
559 return NULL;
560}
561
562#endif /* CONFIG_CGROUP_SCHED */
563
564static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
565{
566 set_task_rq(p, cpu);
567#ifdef CONFIG_SMP
568 /*
569 * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
570 * successfuly executed on another CPU. We must ensure that updates of
571 * per-task data have been completed by this moment.
572 */
573 smp_wmb();
574 task_thread_info(p)->cpu = cpu;
575#endif
576}
577
578/*
579 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
580 */
581#ifdef CONFIG_SCHED_DEBUG
582# define const_debug __read_mostly
583#else
584# define const_debug const
585#endif
586
587extern const_debug unsigned int sysctl_sched_features;
588
589#define SCHED_FEAT(name, enabled) \
590 __SCHED_FEAT_##name ,
591
592enum {
593#include "features.h"
594};
595
596#undef SCHED_FEAT
597
598#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
599
600static inline u64 global_rt_period(void)
601{
602 return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
603}
604
605static inline u64 global_rt_runtime(void)
606{
607 if (sysctl_sched_rt_runtime < 0)
608 return RUNTIME_INF;
609
610 return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
611}
612
613
614
615static inline int task_current(struct rq *rq, struct task_struct *p)
616{
617 return rq->curr == p;
618}
619
620static inline int task_running(struct rq *rq, struct task_struct *p)
621{
622#ifdef CONFIG_SMP
623 return p->on_cpu;
624#else
625 return task_current(rq, p);
626#endif
627}
628
629
630#ifndef prepare_arch_switch
631# define prepare_arch_switch(next) do { } while (0)
632#endif
633#ifndef finish_arch_switch
634# define finish_arch_switch(prev) do { } while (0)
635#endif
636
637#ifndef __ARCH_WANT_UNLOCKED_CTXSW
638static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
639{
640#ifdef CONFIG_SMP
641 /*
642 * We can optimise this out completely for !SMP, because the
643 * SMP rebalancing from interrupt is the only thing that cares
644 * here.
645 */
646 next->on_cpu = 1;
647#endif
648}
649
650static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
651{
652#ifdef CONFIG_SMP
653 /*
654 * After ->on_cpu is cleared, the task can be moved to a different CPU.
655 * We must ensure this doesn't happen until the switch is completely
656 * finished.
657 */
658 smp_wmb();
659 prev->on_cpu = 0;
660#endif
661#ifdef CONFIG_DEBUG_SPINLOCK
662 /* this is a valid case when another task releases the spinlock */
663 rq->lock.owner = current;
664#endif
665 /*
666 * If we are tracking spinlock dependencies then we have to
667 * fix up the runqueue lock - which gets 'carried over' from
668 * prev into current:
669 */
670 spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
671
672 raw_spin_unlock_irq(&rq->lock);
673}
674
675#else /* __ARCH_WANT_UNLOCKED_CTXSW */
676static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
677{
678#ifdef CONFIG_SMP
679 /*
680 * We can optimise this out completely for !SMP, because the
681 * SMP rebalancing from interrupt is the only thing that cares
682 * here.
683 */
684 next->on_cpu = 1;
685#endif
686#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
687 raw_spin_unlock_irq(&rq->lock);
688#else
689 raw_spin_unlock(&rq->lock);
690#endif
691}
692
693static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
694{
695#ifdef CONFIG_SMP
696 /*
697 * After ->on_cpu is cleared, the task can be moved to a different CPU.
698 * We must ensure this doesn't happen until the switch is completely
699 * finished.
700 */
701 smp_wmb();
702 prev->on_cpu = 0;
703#endif
704#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
705 local_irq_enable();
706#endif
707}
708#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
709
710
711static inline void update_load_add(struct load_weight *lw, unsigned long inc)
712{
713 lw->weight += inc;
714 lw->inv_weight = 0;
715}
716
717static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
718{
719 lw->weight -= dec;
720 lw->inv_weight = 0;
721}
722
723static inline void update_load_set(struct load_weight *lw, unsigned long w)
724{
725 lw->weight = w;
726 lw->inv_weight = 0;
727}
728
729/*
730 * To aid in avoiding the subversion of "niceness" due to uneven distribution
731 * of tasks with abnormal "nice" values across CPUs the contribution that
732 * each task makes to its run queue's load is weighted according to its
733 * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
734 * scaled version of the new time slice allocation that they receive on time
735 * slice expiry etc.
736 */
737
738#define WEIGHT_IDLEPRIO 3
739#define WMULT_IDLEPRIO 1431655765
740
741/*
742 * Nice levels are multiplicative, with a gentle 10% change for every
743 * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
744 * nice 1, it will get ~10% less CPU time than another CPU-bound task
745 * that remained on nice 0.
746 *
747 * The "10% effect" is relative and cumulative: from _any_ nice level,
748 * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
749 * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
750 * If a task goes up by ~10% and another task goes down by ~10% then
751 * the relative distance between them is ~25%.)
752 */
753static const int prio_to_weight[40] = {
754 /* -20 */ 88761, 71755, 56483, 46273, 36291,
755 /* -15 */ 29154, 23254, 18705, 14949, 11916,
756 /* -10 */ 9548, 7620, 6100, 4904, 3906,
757 /* -5 */ 3121, 2501, 1991, 1586, 1277,
758 /* 0 */ 1024, 820, 655, 526, 423,
759 /* 5 */ 335, 272, 215, 172, 137,
760 /* 10 */ 110, 87, 70, 56, 45,
761 /* 15 */ 36, 29, 23, 18, 15,
762};
763
764/*
765 * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
766 *
767 * In cases where the weight does not change often, we can use the
768 * precalculated inverse to speed up arithmetics by turning divisions
769 * into multiplications:
770 */
771static const u32 prio_to_wmult[40] = {
772 /* -20 */ 48388, 59856, 76040, 92818, 118348,
773 /* -15 */ 147320, 184698, 229616, 287308, 360437,
774 /* -10 */ 449829, 563644, 704093, 875809, 1099582,
775 /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
776 /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
777 /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
778 /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
779 /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
780};
781
782/* Time spent by the tasks of the cpu accounting group executing in ... */
783enum cpuacct_stat_index {
784 CPUACCT_STAT_USER, /* ... user mode */
785 CPUACCT_STAT_SYSTEM, /* ... kernel mode */
786
787 CPUACCT_STAT_NSTATS,
788};
789
790
791#define sched_class_highest (&stop_sched_class)
792#define for_each_class(class) \
793 for (class = sched_class_highest; class; class = class->next)
794
795extern const struct sched_class stop_sched_class;
796extern const struct sched_class rt_sched_class;
797extern const struct sched_class fair_sched_class;
798extern const struct sched_class idle_sched_class;
799
800
801#ifdef CONFIG_SMP
802
803extern void trigger_load_balance(struct rq *rq, int cpu);
804extern void idle_balance(int this_cpu, struct rq *this_rq);
805
806#else /* CONFIG_SMP */
807
808static inline void idle_balance(int cpu, struct rq *rq)
809{
810}
811
812#endif
813
814extern void sysrq_sched_debug_show(void);
815extern void sched_init_granularity(void);
816extern void update_max_interval(void);
817extern void update_group_power(struct sched_domain *sd, int cpu);
818extern int update_runtime(struct notifier_block *nfb, unsigned long action, void *hcpu);
819extern void init_sched_rt_class(void);
820extern void init_sched_fair_class(void);
821
822extern void resched_task(struct task_struct *p);
823extern void resched_cpu(int cpu);
824
825extern struct rt_bandwidth def_rt_bandwidth;
826extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
827
828extern void update_cpu_load(struct rq *this_rq);
829
830#ifdef CONFIG_CGROUP_CPUACCT
831extern void cpuacct_charge(struct task_struct *tsk, u64 cputime);
832extern void cpuacct_update_stats(struct task_struct *tsk,
833 enum cpuacct_stat_index idx, cputime_t val);
834#else
835static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
836static inline void cpuacct_update_stats(struct task_struct *tsk,
837 enum cpuacct_stat_index idx, cputime_t val) {}
838#endif
839
840static inline void inc_nr_running(struct rq *rq)
841{
842 rq->nr_running++;
843}
844
845static inline void dec_nr_running(struct rq *rq)
846{
847 rq->nr_running--;
848}
849
850extern void update_rq_clock(struct rq *rq);
851
852extern void activate_task(struct rq *rq, struct task_struct *p, int flags);
853extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags);
854
855extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
856
857extern const_debug unsigned int sysctl_sched_time_avg;
858extern const_debug unsigned int sysctl_sched_nr_migrate;
859extern const_debug unsigned int sysctl_sched_migration_cost;
860
861static inline u64 sched_avg_period(void)
862{
863 return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
864}
865
866void calc_load_account_idle(struct rq *this_rq);
867
868#ifdef CONFIG_SCHED_HRTICK
869
870/*
871 * Use hrtick when:
872 * - enabled by features
873 * - hrtimer is actually high res
874 */
875static inline int hrtick_enabled(struct rq *rq)
876{
877 if (!sched_feat(HRTICK))
878 return 0;
879 if (!cpu_active(cpu_of(rq)))
880 return 0;
881 return hrtimer_is_hres_active(&rq->hrtick_timer);
882}
883
884void hrtick_start(struct rq *rq, u64 delay);
885
886#endif /* CONFIG_SCHED_HRTICK */
887
888#ifdef CONFIG_SMP
889extern void sched_avg_update(struct rq *rq);
890static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
891{
892 rq->rt_avg += rt_delta;
893 sched_avg_update(rq);
894}
895#else
896static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
897static inline void sched_avg_update(struct rq *rq) { }
898#endif
899
900extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
901
902#ifdef CONFIG_SMP
903#ifdef CONFIG_PREEMPT
904
905static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
906
907/*
908 * fair double_lock_balance: Safely acquires both rq->locks in a fair
909 * way at the expense of forcing extra atomic operations in all
910 * invocations. This assures that the double_lock is acquired using the
911 * same underlying policy as the spinlock_t on this architecture, which
912 * reduces latency compared to the unfair variant below. However, it
913 * also adds more overhead and therefore may reduce throughput.
914 */
915static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
916 __releases(this_rq->lock)
917 __acquires(busiest->lock)
918 __acquires(this_rq->lock)
919{
920 raw_spin_unlock(&this_rq->lock);
921 double_rq_lock(this_rq, busiest);
922
923 return 1;
924}
925
926#else
927/*
928 * Unfair double_lock_balance: Optimizes throughput at the expense of
929 * latency by eliminating extra atomic operations when the locks are
930 * already in proper order on entry. This favors lower cpu-ids and will
931 * grant the double lock to lower cpus over higher ids under contention,
932 * regardless of entry order into the function.
933 */
934static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
935 __releases(this_rq->lock)
936 __acquires(busiest->lock)
937 __acquires(this_rq->lock)
938{
939 int ret = 0;
940
941 if (unlikely(!raw_spin_trylock(&busiest->lock))) {
942 if (busiest < this_rq) {
943 raw_spin_unlock(&this_rq->lock);
944 raw_spin_lock(&busiest->lock);
945 raw_spin_lock_nested(&this_rq->lock,
946 SINGLE_DEPTH_NESTING);
947 ret = 1;
948 } else
949 raw_spin_lock_nested(&busiest->lock,
950 SINGLE_DEPTH_NESTING);
951 }
952 return ret;
953}
954
955#endif /* CONFIG_PREEMPT */
956
957/*
958 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
959 */
960static inline int double_lock_balance(struct rq *this_rq, struct rq *busiest)
961{
962 if (unlikely(!irqs_disabled())) {
963 /* printk() doesn't work good under rq->lock */
964 raw_spin_unlock(&this_rq->lock);
965 BUG_ON(1);
966 }
967
968 return _double_lock_balance(this_rq, busiest);
969}
970
971static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
972 __releases(busiest->lock)
973{
974 raw_spin_unlock(&busiest->lock);
975 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
976}
977
978/*
979 * double_rq_lock - safely lock two runqueues
980 *
981 * Note this does not disable interrupts like task_rq_lock,
982 * you need to do so manually before calling.
983 */
984static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
985 __acquires(rq1->lock)
986 __acquires(rq2->lock)
987{
988 BUG_ON(!irqs_disabled());
989 if (rq1 == rq2) {
990 raw_spin_lock(&rq1->lock);
991 __acquire(rq2->lock); /* Fake it out ;) */
992 } else {
993 if (rq1 < rq2) {
994 raw_spin_lock(&rq1->lock);
995 raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
996 } else {
997 raw_spin_lock(&rq2->lock);
998 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
999 }
1000 }
1001}
1002
1003/*
1004 * double_rq_unlock - safely unlock two runqueues
1005 *
1006 * Note this does not restore interrupts like task_rq_unlock,
1007 * you need to do so manually after calling.
1008 */
1009static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1010 __releases(rq1->lock)
1011 __releases(rq2->lock)
1012{
1013 raw_spin_unlock(&rq1->lock);
1014 if (rq1 != rq2)
1015 raw_spin_unlock(&rq2->lock);
1016 else
1017 __release(rq2->lock);
1018}
1019
1020#else /* CONFIG_SMP */
1021
1022/*
1023 * double_rq_lock - safely lock two runqueues
1024 *
1025 * Note this does not disable interrupts like task_rq_lock,
1026 * you need to do so manually before calling.
1027 */
1028static inline void double_rq_lock(struct rq *rq1, struct rq *rq2)
1029 __acquires(rq1->lock)
1030 __acquires(rq2->lock)
1031{
1032 BUG_ON(!irqs_disabled());
1033 BUG_ON(rq1 != rq2);
1034 raw_spin_lock(&rq1->lock);
1035 __acquire(rq2->lock); /* Fake it out ;) */
1036}
1037
1038/*
1039 * double_rq_unlock - safely unlock two runqueues
1040 *
1041 * Note this does not restore interrupts like task_rq_unlock,
1042 * you need to do so manually after calling.
1043 */
1044static inline void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1045 __releases(rq1->lock)
1046 __releases(rq2->lock)
1047{
1048 BUG_ON(rq1 != rq2);
1049 raw_spin_unlock(&rq1->lock);
1050 __release(rq2->lock);
1051}
1052
1053#endif
1054
1055extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
1056extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
1057extern void print_cfs_stats(struct seq_file *m, int cpu);
1058extern void print_rt_stats(struct seq_file *m, int cpu);
1059
1060extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1061extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1062extern void unthrottle_offline_cfs_rqs(struct rq *rq);
1063
1064extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
new file mode 100644
index 000000000000..2a581ba8e190
--- /dev/null
+++ b/kernel/sched/stats.c
@@ -0,0 +1,111 @@
1
2#include <linux/slab.h>
3#include <linux/fs.h>
4#include <linux/seq_file.h>
5#include <linux/proc_fs.h>
6
7#include "sched.h"
8
9/*
10 * bump this up when changing the output format or the meaning of an existing
11 * format, so that tools can adapt (or abort)
12 */
13#define SCHEDSTAT_VERSION 15
14
15static int show_schedstat(struct seq_file *seq, void *v)
16{
17 int cpu;
18 int mask_len = DIV_ROUND_UP(NR_CPUS, 32) * 9;
19 char *mask_str = kmalloc(mask_len, GFP_KERNEL);
20
21 if (mask_str == NULL)
22 return -ENOMEM;
23
24 seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
25 seq_printf(seq, "timestamp %lu\n", jiffies);
26 for_each_online_cpu(cpu) {
27 struct rq *rq = cpu_rq(cpu);
28#ifdef CONFIG_SMP
29 struct sched_domain *sd;
30 int dcount = 0;
31#endif
32
33 /* runqueue-specific stats */
34 seq_printf(seq,
35 "cpu%d %u %u %u %u %u %u %llu %llu %lu",
36 cpu, rq->yld_count,
37 rq->sched_switch, rq->sched_count, rq->sched_goidle,
38 rq->ttwu_count, rq->ttwu_local,
39 rq->rq_cpu_time,
40 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount);
41
42 seq_printf(seq, "\n");
43
44#ifdef CONFIG_SMP
45 /* domain-specific stats */
46 rcu_read_lock();
47 for_each_domain(cpu, sd) {
48 enum cpu_idle_type itype;
49
50 cpumask_scnprintf(mask_str, mask_len,
51 sched_domain_span(sd));
52 seq_printf(seq, "domain%d %s", dcount++, mask_str);
53 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
54 itype++) {
55 seq_printf(seq, " %u %u %u %u %u %u %u %u",
56 sd->lb_count[itype],
57 sd->lb_balanced[itype],
58 sd->lb_failed[itype],
59 sd->lb_imbalance[itype],
60 sd->lb_gained[itype],
61 sd->lb_hot_gained[itype],
62 sd->lb_nobusyq[itype],
63 sd->lb_nobusyg[itype]);
64 }
65 seq_printf(seq,
66 " %u %u %u %u %u %u %u %u %u %u %u %u\n",
67 sd->alb_count, sd->alb_failed, sd->alb_pushed,
68 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
69 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
70 sd->ttwu_wake_remote, sd->ttwu_move_affine,
71 sd->ttwu_move_balance);
72 }
73 rcu_read_unlock();
74#endif
75 }
76 kfree(mask_str);
77 return 0;
78}
79
80static int schedstat_open(struct inode *inode, struct file *file)
81{
82 unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
83 char *buf = kmalloc(size, GFP_KERNEL);
84 struct seq_file *m;
85 int res;
86
87 if (!buf)
88 return -ENOMEM;
89 res = single_open(file, show_schedstat, NULL);
90 if (!res) {
91 m = file->private_data;
92 m->buf = buf;
93 m->size = size;
94 } else
95 kfree(buf);
96 return res;
97}
98
99static const struct file_operations proc_schedstat_operations = {
100 .open = schedstat_open,
101 .read = seq_read,
102 .llseek = seq_lseek,
103 .release = single_release,
104};
105
106static int __init proc_schedstat_init(void)
107{
108 proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
109 return 0;
110}
111module_init(proc_schedstat_init);
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
new file mode 100644
index 000000000000..ea2b6f0ec868
--- /dev/null
+++ b/kernel/sched/stats.h
@@ -0,0 +1,233 @@
1
2#ifdef CONFIG_SCHEDSTATS
3
4/*
5 * Expects runqueue lock to be held for atomicity of update
6 */
7static inline void
8rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
9{
10 if (rq) {
11 rq->rq_sched_info.run_delay += delta;
12 rq->rq_sched_info.pcount++;
13 }
14}
15
16/*
17 * Expects runqueue lock to be held for atomicity of update
18 */
19static inline void
20rq_sched_info_depart(struct rq *rq, unsigned long long delta)
21{
22 if (rq)
23 rq->rq_cpu_time += delta;
24}
25
26static inline void
27rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
28{
29 if (rq)
30 rq->rq_sched_info.run_delay += delta;
31}
32# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
33# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
34# define schedstat_set(var, val) do { var = (val); } while (0)
35#else /* !CONFIG_SCHEDSTATS */
36static inline void
37rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
38{}
39static inline void
40rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
41{}
42static inline void
43rq_sched_info_depart(struct rq *rq, unsigned long long delta)
44{}
45# define schedstat_inc(rq, field) do { } while (0)
46# define schedstat_add(rq, field, amt) do { } while (0)
47# define schedstat_set(var, val) do { } while (0)
48#endif
49
50#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
51static inline void sched_info_reset_dequeued(struct task_struct *t)
52{
53 t->sched_info.last_queued = 0;
54}
55
56/*
57 * We are interested in knowing how long it was from the *first* time a
58 * task was queued to the time that it finally hit a cpu, we call this routine
59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 * delta taken on each cpu would annul the skew.
61 */
62static inline void sched_info_dequeued(struct task_struct *t)
63{
64 unsigned long long now = task_rq(t)->clock, delta = 0;
65
66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued)
68 delta = now - t->sched_info.last_queued;
69 sched_info_reset_dequeued(t);
70 t->sched_info.run_delay += delta;
71
72 rq_sched_info_dequeued(task_rq(t), delta);
73}
74
75/*
76 * Called when a task finally hits the cpu. We can now calculate how
77 * long it was waiting to run. We also note when it began so that we
78 * can keep stats on how long its timeslice is.
79 */
80static void sched_info_arrive(struct task_struct *t)
81{
82 unsigned long long now = task_rq(t)->clock, delta = 0;
83
84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued;
86 sched_info_reset_dequeued(t);
87 t->sched_info.run_delay += delta;
88 t->sched_info.last_arrival = now;
89 t->sched_info.pcount++;
90
91 rq_sched_info_arrive(task_rq(t), delta);
92}
93
94/*
95 * This function is only called from enqueue_task(), but also only updates
96 * the timestamp if it is already not set. It's assumed that
97 * sched_info_dequeued() will clear that stamp when appropriate.
98 */
99static inline void sched_info_queued(struct task_struct *t)
100{
101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued)
103 t->sched_info.last_queued = task_rq(t)->clock;
104}
105
106/*
107 * Called when a process ceases being the active-running process, either
108 * voluntarily or involuntarily. Now we can calculate how long we ran.
109 * Also, if the process is still in the TASK_RUNNING state, call
110 * sched_info_queued() to mark that it has now again started waiting on
111 * the runqueue.
112 */
113static inline void sched_info_depart(struct task_struct *t)
114{
115 unsigned long long delta = task_rq(t)->clock -
116 t->sched_info.last_arrival;
117
118 rq_sched_info_depart(task_rq(t), delta);
119
120 if (t->state == TASK_RUNNING)
121 sched_info_queued(t);
122}
123
124/*
125 * Called when tasks are switched involuntarily due, typically, to expiring
126 * their time slice. (This may also be called when switching to or from
127 * the idle task.) We are only called when prev != next.
128 */
129static inline void
130__sched_info_switch(struct task_struct *prev, struct task_struct *next)
131{
132 struct rq *rq = task_rq(prev);
133
134 /*
135 * prev now departs the cpu. It's not interesting to record
136 * stats about how efficient we were at scheduling the idle
137 * process, however.
138 */
139 if (prev != rq->idle)
140 sched_info_depart(prev);
141
142 if (next != rq->idle)
143 sched_info_arrive(next);
144}
145static inline void
146sched_info_switch(struct task_struct *prev, struct task_struct *next)
147{
148 if (unlikely(sched_info_on()))
149 __sched_info_switch(prev, next);
150}
151#else
152#define sched_info_queued(t) do { } while (0)
153#define sched_info_reset_dequeued(t) do { } while (0)
154#define sched_info_dequeued(t) do { } while (0)
155#define sched_info_switch(t, next) do { } while (0)
156#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
157
158/*
159 * The following are functions that support scheduler-internal time accounting.
160 * These functions are generally called at the timer tick. None of this depends
161 * on CONFIG_SCHEDSTATS.
162 */
163
164/**
165 * account_group_user_time - Maintain utime for a thread group.
166 *
167 * @tsk: Pointer to task structure.
168 * @cputime: Time value by which to increment the utime field of the
169 * thread_group_cputime structure.
170 *
171 * If thread group time is being maintained, get the structure for the
172 * running CPU and update the utime field there.
173 */
174static inline void account_group_user_time(struct task_struct *tsk,
175 cputime_t cputime)
176{
177 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
178
179 if (!cputimer->running)
180 return;
181
182 raw_spin_lock(&cputimer->lock);
183 cputimer->cputime.utime =
184 cputime_add(cputimer->cputime.utime, cputime);
185 raw_spin_unlock(&cputimer->lock);
186}
187
188/**
189 * account_group_system_time - Maintain stime for a thread group.
190 *
191 * @tsk: Pointer to task structure.
192 * @cputime: Time value by which to increment the stime field of the
193 * thread_group_cputime structure.
194 *
195 * If thread group time is being maintained, get the structure for the
196 * running CPU and update the stime field there.
197 */
198static inline void account_group_system_time(struct task_struct *tsk,
199 cputime_t cputime)
200{
201 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
202
203 if (!cputimer->running)
204 return;
205
206 raw_spin_lock(&cputimer->lock);
207 cputimer->cputime.stime =
208 cputime_add(cputimer->cputime.stime, cputime);
209 raw_spin_unlock(&cputimer->lock);
210}
211
212/**
213 * account_group_exec_runtime - Maintain exec runtime for a thread group.
214 *
215 * @tsk: Pointer to task structure.
216 * @ns: Time value by which to increment the sum_exec_runtime field
217 * of the thread_group_cputime structure.
218 *
219 * If thread group time is being maintained, get the structure for the
220 * running CPU and update the sum_exec_runtime field there.
221 */
222static inline void account_group_exec_runtime(struct task_struct *tsk,
223 unsigned long long ns)
224{
225 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
226
227 if (!cputimer->running)
228 return;
229
230 raw_spin_lock(&cputimer->lock);
231 cputimer->cputime.sum_exec_runtime += ns;
232 raw_spin_unlock(&cputimer->lock);
233}
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
new file mode 100644
index 000000000000..7b386e86fd23
--- /dev/null
+++ b/kernel/sched/stop_task.c
@@ -0,0 +1,108 @@
1#include "sched.h"
2
3/*
4 * stop-task scheduling class.
5 *
6 * The stop task is the highest priority task in the system, it preempts
7 * everything and will be preempted by nothing.
8 *
9 * See kernel/stop_machine.c
10 */
11
12#ifdef CONFIG_SMP
13static int
14select_task_rq_stop(struct task_struct *p, int sd_flag, int flags)
15{
16 return task_cpu(p); /* stop tasks as never migrate */
17}
18#endif /* CONFIG_SMP */
19
20static void
21check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
22{
23 /* we're never preempted */
24}
25
26static struct task_struct *pick_next_task_stop(struct rq *rq)
27{
28 struct task_struct *stop = rq->stop;
29
30 if (stop && stop->on_rq)
31 return stop;
32
33 return NULL;
34}
35
36static void
37enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
38{
39 inc_nr_running(rq);
40}
41
42static void
43dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
44{
45 dec_nr_running(rq);
46}
47
48static void yield_task_stop(struct rq *rq)
49{
50 BUG(); /* the stop task should never yield, its pointless. */
51}
52
53static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
54{
55}
56
57static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
58{
59}
60
61static void set_curr_task_stop(struct rq *rq)
62{
63}
64
65static void switched_to_stop(struct rq *rq, struct task_struct *p)
66{
67 BUG(); /* its impossible to change to this class */
68}
69
70static void
71prio_changed_stop(struct rq *rq, struct task_struct *p, int oldprio)
72{
73 BUG(); /* how!?, what priority? */
74}
75
76static unsigned int
77get_rr_interval_stop(struct rq *rq, struct task_struct *task)
78{
79 return 0;
80}
81
82/*
83 * Simple, special scheduling class for the per-CPU stop tasks:
84 */
85const struct sched_class stop_sched_class = {
86 .next = &rt_sched_class,
87
88 .enqueue_task = enqueue_task_stop,
89 .dequeue_task = dequeue_task_stop,
90 .yield_task = yield_task_stop,
91
92 .check_preempt_curr = check_preempt_curr_stop,
93
94 .pick_next_task = pick_next_task_stop,
95 .put_prev_task = put_prev_task_stop,
96
97#ifdef CONFIG_SMP
98 .select_task_rq = select_task_rq_stop,
99#endif
100
101 .set_curr_task = set_curr_task_stop,
102 .task_tick = task_tick_stop,
103
104 .get_rr_interval = get_rr_interval_stop,
105
106 .prio_changed = prio_changed_stop,
107 .switched_to = switched_to_stop,
108};