aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2010-05-14 09:29:52 -0400
committerSteven Rostedt <rostedt@goodmis.org>2010-05-14 09:29:52 -0400
commit23e117fa44429cc054cb27d5621d64e4ced91e52 (patch)
treea4b9d0902b9c6f009b2c297515221c1b9bed3af8 /kernel
parent668eb65f092902eb7dd526af73d4a7f025a94612 (diff)
parenta93d2f1744206827ccf416e2cdc5018aa503314e (diff)
Merge branch 'sched/core' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip into trace/tip/tracing/core-4
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpu.c26
-rw-r--r--kernel/cpuset.c67
-rw-r--r--kernel/cred-internals.h21
-rw-r--r--kernel/cred.c7
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/module.c14
-rw-r--r--kernel/rcutorture.c2
-rw-r--r--kernel/sched.c726
-rw-r--r--kernel/sched_debug.c108
-rw-r--r--kernel/sched_fair.c350
-rw-r--r--kernel/sched_features.h55
-rw-r--r--kernel/sched_idletask.c8
-rw-r--r--kernel/sched_rt.c15
-rw-r--r--kernel/stop_machine.c534
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/time/tick-sched.c84
-rw-r--r--kernel/time/timer_list.c1
-rw-r--r--kernel/trace/ftrace.c3
-rw-r--r--kernel/trace/trace_sched_switch.c5
-rw-r--r--kernel/trace/trace_sched_wakeup.c5
-rw-r--r--kernel/user.c11
24 files changed, 1001 insertions, 1049 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index a987aa1676b5..149e18ef1ab1 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -68,7 +68,7 @@ obj-$(CONFIG_USER_NS) += user_namespace.o
68obj-$(CONFIG_PID_NS) += pid_namespace.o 68obj-$(CONFIG_PID_NS) += pid_namespace.o
69obj-$(CONFIG_IKCONFIG) += configs.o 69obj-$(CONFIG_IKCONFIG) += configs.o
70obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o 70obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
71obj-$(CONFIG_STOP_MACHINE) += stop_machine.o 71obj-$(CONFIG_SMP) += stop_machine.o
72obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o 72obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o
73obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o 73obj-$(CONFIG_AUDIT) += audit.o auditfilter.o audit_watch.o
74obj-$(CONFIG_AUDITSYSCALL) += auditsc.o 74obj-$(CONFIG_AUDITSYSCALL) += auditsc.o
diff --git a/kernel/capability.c b/kernel/capability.c
index 9e4697e9b276..2f05303715a5 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -15,7 +15,6 @@
15#include <linux/syscalls.h> 15#include <linux/syscalls.h>
16#include <linux/pid_namespace.h> 16#include <linux/pid_namespace.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include "cred-internals.h"
19 18
20/* 19/*
21 * Leveraged for setting/resetting capabilities 20 * Leveraged for setting/resetting capabilities
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2769e13980c..4a07d057a265 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3010,7 +3010,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
3010 unsigned long flags = (unsigned long)key; 3010 unsigned long flags = (unsigned long)key;
3011 3011
3012 if (flags & POLLHUP) { 3012 if (flags & POLLHUP) {
3013 remove_wait_queue_locked(event->wqh, &event->wait); 3013 __remove_wait_queue(event->wqh, &event->wait);
3014 spin_lock(&cgrp->event_list_lock); 3014 spin_lock(&cgrp->event_list_lock);
3015 list_del(&event->list); 3015 list_del(&event->list);
3016 spin_unlock(&cgrp->event_list_lock); 3016 spin_unlock(&cgrp->event_list_lock);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 25bba73b1be3..545777574779 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -164,6 +164,7 @@ static inline void check_for_tasks(int cpu)
164} 164}
165 165
166struct take_cpu_down_param { 166struct take_cpu_down_param {
167 struct task_struct *caller;
167 unsigned long mod; 168 unsigned long mod;
168 void *hcpu; 169 void *hcpu;
169}; 170};
@@ -172,6 +173,7 @@ struct take_cpu_down_param {
172static int __ref take_cpu_down(void *_param) 173static int __ref take_cpu_down(void *_param)
173{ 174{
174 struct take_cpu_down_param *param = _param; 175 struct take_cpu_down_param *param = _param;
176 unsigned int cpu = (unsigned long)param->hcpu;
175 int err; 177 int err;
176 178
177 /* Ensure this CPU doesn't handle any more interrupts. */ 179 /* Ensure this CPU doesn't handle any more interrupts. */
@@ -182,6 +184,8 @@ static int __ref take_cpu_down(void *_param)
182 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
183 param->hcpu); 185 param->hcpu);
184 186
187 if (task_cpu(param->caller) == cpu)
188 move_task_off_dead_cpu(cpu, param->caller);
185 /* Force idle task to run as soon as we yield: it should 189 /* Force idle task to run as soon as we yield: it should
186 immediately notice cpu is offline and die quickly. */ 190 immediately notice cpu is offline and die quickly. */
187 sched_idle_next(); 191 sched_idle_next();
@@ -192,10 +196,10 @@ static int __ref take_cpu_down(void *_param)
192static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 196static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
193{ 197{
194 int err, nr_calls = 0; 198 int err, nr_calls = 0;
195 cpumask_var_t old_allowed;
196 void *hcpu = (void *)(long)cpu; 199 void *hcpu = (void *)(long)cpu;
197 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 200 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
198 struct take_cpu_down_param tcd_param = { 201 struct take_cpu_down_param tcd_param = {
202 .caller = current,
199 .mod = mod, 203 .mod = mod,
200 .hcpu = hcpu, 204 .hcpu = hcpu,
201 }; 205 };
@@ -206,9 +210,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
206 if (!cpu_online(cpu)) 210 if (!cpu_online(cpu))
207 return -EINVAL; 211 return -EINVAL;
208 212
209 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
210 return -ENOMEM;
211
212 cpu_hotplug_begin(); 213 cpu_hotplug_begin();
213 set_cpu_active(cpu, false); 214 set_cpu_active(cpu, false);
214 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
@@ -225,10 +226,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
225 goto out_release; 226 goto out_release;
226 } 227 }
227 228
228 /* Ensure that we are not runnable on dying cpu */
229 cpumask_copy(old_allowed, &current->cpus_allowed);
230 set_cpus_allowed_ptr(current, cpu_active_mask);
231
232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 229 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
233 if (err) { 230 if (err) {
234 set_cpu_active(cpu, true); 231 set_cpu_active(cpu, true);
@@ -237,7 +234,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
237 hcpu) == NOTIFY_BAD) 234 hcpu) == NOTIFY_BAD)
238 BUG(); 235 BUG();
239 236
240 goto out_allowed; 237 goto out_release;
241 } 238 }
242 BUG_ON(cpu_online(cpu)); 239 BUG_ON(cpu_online(cpu));
243 240
@@ -255,8 +252,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
255 252
256 check_for_tasks(cpu); 253 check_for_tasks(cpu);
257 254
258out_allowed:
259 set_cpus_allowed_ptr(current, old_allowed);
260out_release: 255out_release:
261 cpu_hotplug_done(); 256 cpu_hotplug_done();
262 if (!err) { 257 if (!err) {
@@ -264,7 +259,6 @@ out_release:
264 hcpu) == NOTIFY_BAD) 259 hcpu) == NOTIFY_BAD)
265 BUG(); 260 BUG();
266 } 261 }
267 free_cpumask_var(old_allowed);
268 return err; 262 return err;
269} 263}
270 264
@@ -272,9 +266,6 @@ int __ref cpu_down(unsigned int cpu)
272{ 266{
273 int err; 267 int err;
274 268
275 err = stop_machine_create();
276 if (err)
277 return err;
278 cpu_maps_update_begin(); 269 cpu_maps_update_begin();
279 270
280 if (cpu_hotplug_disabled) { 271 if (cpu_hotplug_disabled) {
@@ -286,7 +277,6 @@ int __ref cpu_down(unsigned int cpu)
286 277
287out: 278out:
288 cpu_maps_update_done(); 279 cpu_maps_update_done();
289 stop_machine_destroy();
290 return err; 280 return err;
291} 281}
292EXPORT_SYMBOL(cpu_down); 282EXPORT_SYMBOL(cpu_down);
@@ -367,9 +357,6 @@ int disable_nonboot_cpus(void)
367{ 357{
368 int cpu, first_cpu, error; 358 int cpu, first_cpu, error;
369 359
370 error = stop_machine_create();
371 if (error)
372 return error;
373 cpu_maps_update_begin(); 360 cpu_maps_update_begin();
374 first_cpu = cpumask_first(cpu_online_mask); 361 first_cpu = cpumask_first(cpu_online_mask);
375 /* 362 /*
@@ -400,7 +387,6 @@ int disable_nonboot_cpus(void)
400 printk(KERN_ERR "Non-boot CPUs are not disabled\n"); 387 printk(KERN_ERR "Non-boot CPUs are not disabled\n");
401 } 388 }
402 cpu_maps_update_done(); 389 cpu_maps_update_done();
403 stop_machine_destroy();
404 return error; 390 return error;
405} 391}
406 392
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d10946748ec2..9a50c5f6e727 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2182,19 +2182,52 @@ void __init cpuset_init_smp(void)
2182void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 2182void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2183{ 2183{
2184 mutex_lock(&callback_mutex); 2184 mutex_lock(&callback_mutex);
2185 cpuset_cpus_allowed_locked(tsk, pmask); 2185 task_lock(tsk);
2186 guarantee_online_cpus(task_cs(tsk), pmask);
2187 task_unlock(tsk);
2186 mutex_unlock(&callback_mutex); 2188 mutex_unlock(&callback_mutex);
2187} 2189}
2188 2190
2189/** 2191int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2190 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
2191 * Must be called with callback_mutex held.
2192 **/
2193void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
2194{ 2192{
2195 task_lock(tsk); 2193 const struct cpuset *cs;
2196 guarantee_online_cpus(task_cs(tsk), pmask); 2194 int cpu;
2197 task_unlock(tsk); 2195
2196 rcu_read_lock();
2197 cs = task_cs(tsk);
2198 if (cs)
2199 cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
2200 rcu_read_unlock();
2201
2202 /*
2203 * We own tsk->cpus_allowed, nobody can change it under us.
2204 *
2205 * But we used cs && cs->cpus_allowed lockless and thus can
2206 * race with cgroup_attach_task() or update_cpumask() and get
2207 * the wrong tsk->cpus_allowed. However, both cases imply the
2208 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2209 * which takes task_rq_lock().
2210 *
2211 * If we are called after it dropped the lock we must see all
2212 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2213 * set any mask even if it is not right from task_cs() pov,
2214 * the pending set_cpus_allowed_ptr() will fix things.
2215 */
2216
2217 cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2218 if (cpu >= nr_cpu_ids) {
2219 /*
2220 * Either tsk->cpus_allowed is wrong (see above) or it
2221 * is actually empty. The latter case is only possible
2222 * if we are racing with remove_tasks_in_empty_cpuset().
2223 * Like above we can temporary set any mask and rely on
2224 * set_cpus_allowed_ptr() as synchronization point.
2225 */
2226 cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
2227 cpu = cpumask_any(cpu_active_mask);
2228 }
2229
2230 return cpu;
2198} 2231}
2199 2232
2200void cpuset_init_current_mems_allowed(void) 2233void cpuset_init_current_mems_allowed(void)
@@ -2383,22 +2416,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2383} 2416}
2384 2417
2385/** 2418/**
2386 * cpuset_lock - lock out any changes to cpuset structures
2387 *
2388 * The out of memory (oom) code needs to mutex_lock cpusets
2389 * from being changed while it scans the tasklist looking for a
2390 * task in an overlapping cpuset. Expose callback_mutex via this
2391 * cpuset_lock() routine, so the oom code can lock it, before
2392 * locking the task list. The tasklist_lock is a spinlock, so
2393 * must be taken inside callback_mutex.
2394 */
2395
2396void cpuset_lock(void)
2397{
2398 mutex_lock(&callback_mutex);
2399}
2400
2401/**
2402 * cpuset_unlock - release lock on cpuset changes 2419 * cpuset_unlock - release lock on cpuset changes
2403 * 2420 *
2404 * Undo the lock taken in a previous cpuset_lock() call. 2421 * Undo the lock taken in a previous cpuset_lock() call.
diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h
deleted file mode 100644
index 2dc4fc2d0bf1..000000000000
--- a/kernel/cred-internals.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/* Internal credentials stuff
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12/*
13 * user.c
14 */
15static inline void sched_switch_user(struct task_struct *p)
16{
17#ifdef CONFIG_USER_SCHED
18 sched_move_task(p);
19#endif /* CONFIG_USER_SCHED */
20}
21
diff --git a/kernel/cred.c b/kernel/cred.c
index e1dbe9eef800..8f3672a58a1e 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -17,7 +17,6 @@
17#include <linux/init_task.h> 17#include <linux/init_task.h>
18#include <linux/security.h> 18#include <linux/security.h>
19#include <linux/cn_proc.h> 19#include <linux/cn_proc.h>
20#include "cred-internals.h"
21 20
22#if 0 21#if 0
23#define kdebug(FMT, ...) \ 22#define kdebug(FMT, ...) \
@@ -398,6 +397,8 @@ struct cred *prepare_usermodehelper_creds(void)
398 397
399error: 398error:
400 put_cred(new); 399 put_cred(new);
400 return NULL;
401
401free_tgcred: 402free_tgcred:
402#ifdef CONFIG_KEYS 403#ifdef CONFIG_KEYS
403 kfree(tgcred); 404 kfree(tgcred);
@@ -558,8 +559,6 @@ int commit_creds(struct cred *new)
558 atomic_dec(&old->user->processes); 559 atomic_dec(&old->user->processes);
559 alter_cred_subscribers(old, -2); 560 alter_cred_subscribers(old, -2);
560 561
561 sched_switch_user(task);
562
563 /* send notifications */ 562 /* send notifications */
564 if (new->uid != old->uid || 563 if (new->uid != old->uid ||
565 new->euid != old->euid || 564 new->euid != old->euid ||
@@ -791,8 +790,6 @@ bool creds_are_invalid(const struct cred *cred)
791{ 790{
792 if (cred->magic != CRED_MAGIC) 791 if (cred->magic != CRED_MAGIC)
793 return true; 792 return true;
794 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
795 return true;
796#ifdef CONFIG_SECURITY_SELINUX 793#ifdef CONFIG_SECURITY_SELINUX
797 if (selinux_is_enabled()) { 794 if (selinux_is_enabled()) {
798 if ((unsigned long) cred->security < PAGE_SIZE) 795 if ((unsigned long) cred->security < PAGE_SIZE)
diff --git a/kernel/exit.c b/kernel/exit.c
index 7f2683a10ac4..eabca5a73a85 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -55,7 +55,6 @@
55#include <asm/unistd.h> 55#include <asm/unistd.h>
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/mmu_context.h> 57#include <asm/mmu_context.h>
58#include "cred-internals.h"
59 58
60static void exit_mm(struct task_struct * tsk); 59static void exit_mm(struct task_struct * tsk);
61 60
diff --git a/kernel/module.c b/kernel/module.c
index b8a1e313448c..e2564580f3f1 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -724,16 +724,8 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
724 return -EFAULT; 724 return -EFAULT;
725 name[MODULE_NAME_LEN-1] = '\0'; 725 name[MODULE_NAME_LEN-1] = '\0';
726 726
727 /* Create stop_machine threads since free_module relies on 727 if (mutex_lock_interruptible(&module_mutex) != 0)
728 * a non-failing stop_machine call. */ 728 return -EINTR;
729 ret = stop_machine_create();
730 if (ret)
731 return ret;
732
733 if (mutex_lock_interruptible(&module_mutex) != 0) {
734 ret = -EINTR;
735 goto out_stop;
736 }
737 729
738 mod = find_module(name); 730 mod = find_module(name);
739 if (!mod) { 731 if (!mod) {
@@ -793,8 +785,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
793 785
794 out: 786 out:
795 mutex_unlock(&module_mutex); 787 mutex_unlock(&module_mutex);
796out_stop:
797 stop_machine_destroy();
798 return ret; 788 return ret;
799} 789}
800 790
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 58df55bf83ed..2b676f3a0f26 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -669,7 +669,7 @@ static struct rcu_torture_ops sched_expedited_ops = {
669 .sync = synchronize_sched_expedited, 669 .sync = synchronize_sched_expedited,
670 .cb_barrier = NULL, 670 .cb_barrier = NULL,
671 .fqs = rcu_sched_force_quiescent_state, 671 .fqs = rcu_sched_force_quiescent_state,
672 .stats = rcu_expedited_torture_stats, 672 .stats = NULL,
673 .irq_capable = 1, 673 .irq_capable = 1,
674 .name = "sched_expedited" 674 .name = "sched_expedited"
675}; 675};
diff --git a/kernel/sched.c b/kernel/sched.c
index 6af210a7de70..b531d7934083 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -55,9 +55,9 @@
55#include <linux/cpu.h> 55#include <linux/cpu.h>
56#include <linux/cpuset.h> 56#include <linux/cpuset.h>
57#include <linux/percpu.h> 57#include <linux/percpu.h>
58#include <linux/kthread.h>
59#include <linux/proc_fs.h> 58#include <linux/proc_fs.h>
60#include <linux/seq_file.h> 59#include <linux/seq_file.h>
60#include <linux/stop_machine.h>
61#include <linux/sysctl.h> 61#include <linux/sysctl.h>
62#include <linux/syscalls.h> 62#include <linux/syscalls.h>
63#include <linux/times.h> 63#include <linux/times.h>
@@ -493,8 +493,11 @@ struct rq {
493 #define CPU_LOAD_IDX_MAX 5 493 #define CPU_LOAD_IDX_MAX 5
494 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 494 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
495#ifdef CONFIG_NO_HZ 495#ifdef CONFIG_NO_HZ
496 u64 nohz_stamp;
496 unsigned char in_nohz_recently; 497 unsigned char in_nohz_recently;
497#endif 498#endif
499 unsigned int skip_clock_update;
500
498 /* capture load from *all* tasks on this cpu: */ 501 /* capture load from *all* tasks on this cpu: */
499 struct load_weight load; 502 struct load_weight load;
500 unsigned long nr_load_updates; 503 unsigned long nr_load_updates;
@@ -536,15 +539,13 @@ struct rq {
536 int post_schedule; 539 int post_schedule;
537 int active_balance; 540 int active_balance;
538 int push_cpu; 541 int push_cpu;
542 struct cpu_stop_work active_balance_work;
539 /* cpu of this runqueue: */ 543 /* cpu of this runqueue: */
540 int cpu; 544 int cpu;
541 int online; 545 int online;
542 546
543 unsigned long avg_load_per_task; 547 unsigned long avg_load_per_task;
544 548
545 struct task_struct *migration_thread;
546 struct list_head migration_queue;
547
548 u64 rt_avg; 549 u64 rt_avg;
549 u64 age_stamp; 550 u64 age_stamp;
550 u64 idle_stamp; 551 u64 idle_stamp;
@@ -592,6 +593,13 @@ static inline
592void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 593void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
593{ 594{
594 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 595 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
596
597 /*
598 * A queue event has occurred, and we're going to schedule. In
599 * this case, we can save a useless back to back clock update.
600 */
601 if (test_tsk_need_resched(p))
602 rq->skip_clock_update = 1;
595} 603}
596 604
597static inline int cpu_of(struct rq *rq) 605static inline int cpu_of(struct rq *rq)
@@ -626,7 +634,8 @@ static inline int cpu_of(struct rq *rq)
626 634
627inline void update_rq_clock(struct rq *rq) 635inline void update_rq_clock(struct rq *rq)
628{ 636{
629 rq->clock = sched_clock_cpu(cpu_of(rq)); 637 if (!rq->skip_clock_update)
638 rq->clock = sched_clock_cpu(cpu_of(rq));
630} 639}
631 640
632/* 641/*
@@ -904,16 +913,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
904#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 913#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
905 914
906/* 915/*
907 * Check whether the task is waking, we use this to synchronize against 916 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
908 * ttwu() so that task_cpu() reports a stable number. 917 * against ttwu().
909 *
910 * We need to make an exception for PF_STARTING tasks because the fork
911 * path might require task_rq_lock() to work, eg. it can call
912 * set_cpus_allowed_ptr() from the cpuset clone_ns code.
913 */ 918 */
914static inline int task_is_waking(struct task_struct *p) 919static inline int task_is_waking(struct task_struct *p)
915{ 920{
916 return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING)); 921 return unlikely(p->state == TASK_WAKING);
917} 922}
918 923
919/* 924/*
@@ -926,11 +931,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
926 struct rq *rq; 931 struct rq *rq;
927 932
928 for (;;) { 933 for (;;) {
929 while (task_is_waking(p))
930 cpu_relax();
931 rq = task_rq(p); 934 rq = task_rq(p);
932 raw_spin_lock(&rq->lock); 935 raw_spin_lock(&rq->lock);
933 if (likely(rq == task_rq(p) && !task_is_waking(p))) 936 if (likely(rq == task_rq(p)))
934 return rq; 937 return rq;
935 raw_spin_unlock(&rq->lock); 938 raw_spin_unlock(&rq->lock);
936 } 939 }
@@ -947,12 +950,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
947 struct rq *rq; 950 struct rq *rq;
948 951
949 for (;;) { 952 for (;;) {
950 while (task_is_waking(p))
951 cpu_relax();
952 local_irq_save(*flags); 953 local_irq_save(*flags);
953 rq = task_rq(p); 954 rq = task_rq(p);
954 raw_spin_lock(&rq->lock); 955 raw_spin_lock(&rq->lock);
955 if (likely(rq == task_rq(p) && !task_is_waking(p))) 956 if (likely(rq == task_rq(p)))
956 return rq; 957 return rq;
957 raw_spin_unlock_irqrestore(&rq->lock, *flags); 958 raw_spin_unlock_irqrestore(&rq->lock, *flags);
958 } 959 }
@@ -1229,6 +1230,17 @@ void wake_up_idle_cpu(int cpu)
1229 if (!tsk_is_polling(rq->idle)) 1230 if (!tsk_is_polling(rq->idle))
1230 smp_send_reschedule(cpu); 1231 smp_send_reschedule(cpu);
1231} 1232}
1233
1234int nohz_ratelimit(int cpu)
1235{
1236 struct rq *rq = cpu_rq(cpu);
1237 u64 diff = rq->clock - rq->nohz_stamp;
1238
1239 rq->nohz_stamp = rq->clock;
1240
1241 return diff < (NSEC_PER_SEC / HZ) >> 1;
1242}
1243
1232#endif /* CONFIG_NO_HZ */ 1244#endif /* CONFIG_NO_HZ */
1233 1245
1234static u64 sched_avg_period(void) 1246static u64 sched_avg_period(void)
@@ -1771,8 +1783,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1771 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1783 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1772 } 1784 }
1773 } 1785 }
1774 update_rq_clock(rq1);
1775 update_rq_clock(rq2);
1776} 1786}
1777 1787
1778/* 1788/*
@@ -1803,7 +1813,7 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1803} 1813}
1804#endif 1814#endif
1805 1815
1806static void calc_load_account_active(struct rq *this_rq); 1816static void calc_load_account_idle(struct rq *this_rq);
1807static void update_sysctl(void); 1817static void update_sysctl(void);
1808static int get_update_sysctl_factor(void); 1818static int get_update_sysctl_factor(void);
1809 1819
@@ -1860,62 +1870,43 @@ static void set_load_weight(struct task_struct *p)
1860 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO]; 1870 p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
1861} 1871}
1862 1872
1863static void update_avg(u64 *avg, u64 sample) 1873static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1864{ 1874{
1865 s64 diff = sample - *avg; 1875 update_rq_clock(rq);
1866 *avg += diff >> 3;
1867}
1868
1869static void
1870enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1871{
1872 if (wakeup)
1873 p->se.start_runtime = p->se.sum_exec_runtime;
1874
1875 sched_info_queued(p); 1876 sched_info_queued(p);
1876 p->sched_class->enqueue_task(rq, p, wakeup, head); 1877 p->sched_class->enqueue_task(rq, p, flags);
1877 p->se.on_rq = 1; 1878 p->se.on_rq = 1;
1878} 1879}
1879 1880
1880static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1881static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1881{ 1882{
1882 if (sleep) { 1883 update_rq_clock(rq);
1883 if (p->se.last_wakeup) {
1884 update_avg(&p->se.avg_overlap,
1885 p->se.sum_exec_runtime - p->se.last_wakeup);
1886 p->se.last_wakeup = 0;
1887 } else {
1888 update_avg(&p->se.avg_wakeup,
1889 sysctl_sched_wakeup_granularity);
1890 }
1891 }
1892
1893 sched_info_dequeued(p); 1884 sched_info_dequeued(p);
1894 p->sched_class->dequeue_task(rq, p, sleep); 1885 p->sched_class->dequeue_task(rq, p, flags);
1895 p->se.on_rq = 0; 1886 p->se.on_rq = 0;
1896} 1887}
1897 1888
1898/* 1889/*
1899 * activate_task - move a task to the runqueue. 1890 * activate_task - move a task to the runqueue.
1900 */ 1891 */
1901static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) 1892static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1902{ 1893{
1903 if (task_contributes_to_load(p)) 1894 if (task_contributes_to_load(p))
1904 rq->nr_uninterruptible--; 1895 rq->nr_uninterruptible--;
1905 1896
1906 enqueue_task(rq, p, wakeup, false); 1897 enqueue_task(rq, p, flags);
1907 inc_nr_running(rq); 1898 inc_nr_running(rq);
1908} 1899}
1909 1900
1910/* 1901/*
1911 * deactivate_task - remove a task from the runqueue. 1902 * deactivate_task - remove a task from the runqueue.
1912 */ 1903 */
1913static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) 1904static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1914{ 1905{
1915 if (task_contributes_to_load(p)) 1906 if (task_contributes_to_load(p))
1916 rq->nr_uninterruptible++; 1907 rq->nr_uninterruptible++;
1917 1908
1918 dequeue_task(rq, p, sleep); 1909 dequeue_task(rq, p, flags);
1919 dec_nr_running(rq); 1910 dec_nr_running(rq);
1920} 1911}
1921 1912
@@ -2044,21 +2035,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2044 __set_task_cpu(p, new_cpu); 2035 __set_task_cpu(p, new_cpu);
2045} 2036}
2046 2037
2047struct migration_req { 2038struct migration_arg {
2048 struct list_head list;
2049
2050 struct task_struct *task; 2039 struct task_struct *task;
2051 int dest_cpu; 2040 int dest_cpu;
2052
2053 struct completion done;
2054}; 2041};
2055 2042
2043static int migration_cpu_stop(void *data);
2044
2056/* 2045/*
2057 * The task's runqueue lock must be held. 2046 * The task's runqueue lock must be held.
2058 * Returns true if you have to wait for migration thread. 2047 * Returns true if you have to wait for migration thread.
2059 */ 2048 */
2060static int 2049static bool migrate_task(struct task_struct *p, int dest_cpu)
2061migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2062{ 2050{
2063 struct rq *rq = task_rq(p); 2051 struct rq *rq = task_rq(p);
2064 2052
@@ -2066,15 +2054,7 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2066 * If the task is not on a runqueue (and not running), then 2054 * If the task is not on a runqueue (and not running), then
2067 * the next wake-up will properly place the task. 2055 * the next wake-up will properly place the task.
2068 */ 2056 */
2069 if (!p->se.on_rq && !task_running(rq, p)) 2057 return p->se.on_rq || task_running(rq, p);
2070 return 0;
2071
2072 init_completion(&req->done);
2073 req->task = p;
2074 req->dest_cpu = dest_cpu;
2075 list_add(&req->list, &rq->migration_queue);
2076
2077 return 1;
2078} 2058}
2079 2059
2080/* 2060/*
@@ -2175,7 +2155,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2175 * just go back and repeat. 2155 * just go back and repeat.
2176 */ 2156 */
2177 rq = task_rq_lock(p, &flags); 2157 rq = task_rq_lock(p, &flags);
2178 trace_sched_wait_task(rq, p); 2158 trace_sched_wait_task(p);
2179 running = task_running(rq, p); 2159 running = task_running(rq, p);
2180 on_rq = p->se.on_rq; 2160 on_rq = p->se.on_rq;
2181 ncsw = 0; 2161 ncsw = 0;
@@ -2273,6 +2253,9 @@ void task_oncpu_function_call(struct task_struct *p,
2273} 2253}
2274 2254
2275#ifdef CONFIG_SMP 2255#ifdef CONFIG_SMP
2256/*
2257 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2258 */
2276static int select_fallback_rq(int cpu, struct task_struct *p) 2259static int select_fallback_rq(int cpu, struct task_struct *p)
2277{ 2260{
2278 int dest_cpu; 2261 int dest_cpu;
@@ -2289,12 +2272,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2289 return dest_cpu; 2272 return dest_cpu;
2290 2273
2291 /* No more Mr. Nice Guy. */ 2274 /* No more Mr. Nice Guy. */
2292 if (dest_cpu >= nr_cpu_ids) { 2275 if (unlikely(dest_cpu >= nr_cpu_ids)) {
2293 rcu_read_lock(); 2276 dest_cpu = cpuset_cpus_allowed_fallback(p);
2294 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2295 rcu_read_unlock();
2296 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2297
2298 /* 2277 /*
2299 * Don't tell them about moving exiting tasks or 2278 * Don't tell them about moving exiting tasks or
2300 * kernel threads (both mm NULL), since they never 2279 * kernel threads (both mm NULL), since they never
@@ -2311,17 +2290,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2311} 2290}
2312 2291
2313/* 2292/*
2314 * Gets called from 3 sites (exec, fork, wakeup), since it is called without 2293 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
2315 * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
2316 * by:
2317 *
2318 * exec: is unstable, retry loop
2319 * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
2320 */ 2294 */
2321static inline 2295static inline
2322int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2296int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
2323{ 2297{
2324 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); 2298 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
2325 2299
2326 /* 2300 /*
2327 * In order not to call set_task_cpu() on a blocking task we need 2301 * In order not to call set_task_cpu() on a blocking task we need
@@ -2339,6 +2313,12 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
2339 2313
2340 return cpu; 2314 return cpu;
2341} 2315}
2316
2317static void update_avg(u64 *avg, u64 sample)
2318{
2319 s64 diff = sample - *avg;
2320 *avg += diff >> 3;
2321}
2342#endif 2322#endif
2343 2323
2344/*** 2324/***
@@ -2360,16 +2340,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2360{ 2340{
2361 int cpu, orig_cpu, this_cpu, success = 0; 2341 int cpu, orig_cpu, this_cpu, success = 0;
2362 unsigned long flags; 2342 unsigned long flags;
2343 unsigned long en_flags = ENQUEUE_WAKEUP;
2363 struct rq *rq; 2344 struct rq *rq;
2364 2345
2365 if (!sched_feat(SYNC_WAKEUPS))
2366 wake_flags &= ~WF_SYNC;
2367
2368 this_cpu = get_cpu(); 2346 this_cpu = get_cpu();
2369 2347
2370 smp_wmb(); 2348 smp_wmb();
2371 rq = task_rq_lock(p, &flags); 2349 rq = task_rq_lock(p, &flags);
2372 update_rq_clock(rq);
2373 if (!(p->state & state)) 2350 if (!(p->state & state))
2374 goto out; 2351 goto out;
2375 2352
@@ -2389,28 +2366,26 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2389 * 2366 *
2390 * First fix up the nr_uninterruptible count: 2367 * First fix up the nr_uninterruptible count:
2391 */ 2368 */
2392 if (task_contributes_to_load(p)) 2369 if (task_contributes_to_load(p)) {
2393 rq->nr_uninterruptible--; 2370 if (likely(cpu_online(orig_cpu)))
2371 rq->nr_uninterruptible--;
2372 else
2373 this_rq()->nr_uninterruptible--;
2374 }
2394 p->state = TASK_WAKING; 2375 p->state = TASK_WAKING;
2395 2376
2396 if (p->sched_class->task_waking) 2377 if (p->sched_class->task_waking) {
2397 p->sched_class->task_waking(rq, p); 2378 p->sched_class->task_waking(rq, p);
2379 en_flags |= ENQUEUE_WAKING;
2380 }
2398 2381
2399 __task_rq_unlock(rq); 2382 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2400 2383 if (cpu != orig_cpu)
2401 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2402 if (cpu != orig_cpu) {
2403 /*
2404 * Since we migrate the task without holding any rq->lock,
2405 * we need to be careful with task_rq_lock(), since that
2406 * might end up locking an invalid rq.
2407 */
2408 set_task_cpu(p, cpu); 2384 set_task_cpu(p, cpu);
2409 } 2385 __task_rq_unlock(rq);
2410 2386
2411 rq = cpu_rq(cpu); 2387 rq = cpu_rq(cpu);
2412 raw_spin_lock(&rq->lock); 2388 raw_spin_lock(&rq->lock);
2413 update_rq_clock(rq);
2414 2389
2415 /* 2390 /*
2416 * We migrated the task without holding either rq->lock, however 2391 * We migrated the task without holding either rq->lock, however
@@ -2438,36 +2413,20 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2438 2413
2439out_activate: 2414out_activate:
2440#endif /* CONFIG_SMP */ 2415#endif /* CONFIG_SMP */
2441 schedstat_inc(p, se.nr_wakeups); 2416 schedstat_inc(p, se.statistics.nr_wakeups);
2442 if (wake_flags & WF_SYNC) 2417 if (wake_flags & WF_SYNC)
2443 schedstat_inc(p, se.nr_wakeups_sync); 2418 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2444 if (orig_cpu != cpu) 2419 if (orig_cpu != cpu)
2445 schedstat_inc(p, se.nr_wakeups_migrate); 2420 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2446 if (cpu == this_cpu) 2421 if (cpu == this_cpu)
2447 schedstat_inc(p, se.nr_wakeups_local); 2422 schedstat_inc(p, se.statistics.nr_wakeups_local);
2448 else 2423 else
2449 schedstat_inc(p, se.nr_wakeups_remote); 2424 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2450 activate_task(rq, p, 1); 2425 activate_task(rq, p, en_flags);
2451 success = 1; 2426 success = 1;
2452 2427
2453 /*
2454 * Only attribute actual wakeups done by this task.
2455 */
2456 if (!in_interrupt()) {
2457 struct sched_entity *se = &current->se;
2458 u64 sample = se->sum_exec_runtime;
2459
2460 if (se->last_wakeup)
2461 sample -= se->last_wakeup;
2462 else
2463 sample -= se->start_runtime;
2464 update_avg(&se->avg_wakeup, sample);
2465
2466 se->last_wakeup = se->sum_exec_runtime;
2467 }
2468
2469out_running: 2428out_running:
2470 trace_sched_wakeup(rq, p, success); 2429 trace_sched_wakeup(p, success);
2471 check_preempt_curr(rq, p, wake_flags); 2430 check_preempt_curr(rq, p, wake_flags);
2472 2431
2473 p->state = TASK_RUNNING; 2432 p->state = TASK_RUNNING;
@@ -2527,42 +2486,9 @@ static void __sched_fork(struct task_struct *p)
2527 p->se.sum_exec_runtime = 0; 2486 p->se.sum_exec_runtime = 0;
2528 p->se.prev_sum_exec_runtime = 0; 2487 p->se.prev_sum_exec_runtime = 0;
2529 p->se.nr_migrations = 0; 2488 p->se.nr_migrations = 0;
2530 p->se.last_wakeup = 0;
2531 p->se.avg_overlap = 0;
2532 p->se.start_runtime = 0;
2533 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2534 2489
2535#ifdef CONFIG_SCHEDSTATS 2490#ifdef CONFIG_SCHEDSTATS
2536 p->se.wait_start = 0; 2491 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2537 p->se.wait_max = 0;
2538 p->se.wait_count = 0;
2539 p->se.wait_sum = 0;
2540
2541 p->se.sleep_start = 0;
2542 p->se.sleep_max = 0;
2543 p->se.sum_sleep_runtime = 0;
2544
2545 p->se.block_start = 0;
2546 p->se.block_max = 0;
2547 p->se.exec_max = 0;
2548 p->se.slice_max = 0;
2549
2550 p->se.nr_migrations_cold = 0;
2551 p->se.nr_failed_migrations_affine = 0;
2552 p->se.nr_failed_migrations_running = 0;
2553 p->se.nr_failed_migrations_hot = 0;
2554 p->se.nr_forced_migrations = 0;
2555
2556 p->se.nr_wakeups = 0;
2557 p->se.nr_wakeups_sync = 0;
2558 p->se.nr_wakeups_migrate = 0;
2559 p->se.nr_wakeups_local = 0;
2560 p->se.nr_wakeups_remote = 0;
2561 p->se.nr_wakeups_affine = 0;
2562 p->se.nr_wakeups_affine_attempts = 0;
2563 p->se.nr_wakeups_passive = 0;
2564 p->se.nr_wakeups_idle = 0;
2565
2566#endif 2492#endif
2567 2493
2568 INIT_LIST_HEAD(&p->rt.run_list); 2494 INIT_LIST_HEAD(&p->rt.run_list);
@@ -2583,11 +2509,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
2583 2509
2584 __sched_fork(p); 2510 __sched_fork(p);
2585 /* 2511 /*
2586 * We mark the process as waking here. This guarantees that 2512 * We mark the process as running here. This guarantees that
2587 * nobody will actually run it, and a signal or other external 2513 * nobody will actually run it, and a signal or other external
2588 * event cannot wake it up and insert it on the runqueue either. 2514 * event cannot wake it up and insert it on the runqueue either.
2589 */ 2515 */
2590 p->state = TASK_WAKING; 2516 p->state = TASK_RUNNING;
2591 2517
2592 /* 2518 /*
2593 * Revert to default priority/policy on fork if requested. 2519 * Revert to default priority/policy on fork if requested.
@@ -2654,31 +2580,27 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2654 int cpu __maybe_unused = get_cpu(); 2580 int cpu __maybe_unused = get_cpu();
2655 2581
2656#ifdef CONFIG_SMP 2582#ifdef CONFIG_SMP
2583 rq = task_rq_lock(p, &flags);
2584 p->state = TASK_WAKING;
2585
2657 /* 2586 /*
2658 * Fork balancing, do it here and not earlier because: 2587 * Fork balancing, do it here and not earlier because:
2659 * - cpus_allowed can change in the fork path 2588 * - cpus_allowed can change in the fork path
2660 * - any previously selected cpu might disappear through hotplug 2589 * - any previously selected cpu might disappear through hotplug
2661 * 2590 *
2662 * We still have TASK_WAKING but PF_STARTING is gone now, meaning 2591 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2663 * ->cpus_allowed is stable, we have preemption disabled, meaning 2592 * without people poking at ->cpus_allowed.
2664 * cpu_online_mask is stable.
2665 */ 2593 */
2666 cpu = select_task_rq(p, SD_BALANCE_FORK, 0); 2594 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
2667 set_task_cpu(p, cpu); 2595 set_task_cpu(p, cpu);
2668#endif
2669 2596
2670 /*
2671 * Since the task is not on the rq and we still have TASK_WAKING set
2672 * nobody else will migrate this task.
2673 */
2674 rq = cpu_rq(cpu);
2675 raw_spin_lock_irqsave(&rq->lock, flags);
2676
2677 BUG_ON(p->state != TASK_WAKING);
2678 p->state = TASK_RUNNING; 2597 p->state = TASK_RUNNING;
2679 update_rq_clock(rq); 2598 task_rq_unlock(rq, &flags);
2599#endif
2600
2601 rq = task_rq_lock(p, &flags);
2680 activate_task(rq, p, 0); 2602 activate_task(rq, p, 0);
2681 trace_sched_wakeup_new(rq, p, 1); 2603 trace_sched_wakeup_new(p, 1);
2682 check_preempt_curr(rq, p, WF_FORK); 2604 check_preempt_curr(rq, p, WF_FORK);
2683#ifdef CONFIG_SMP 2605#ifdef CONFIG_SMP
2684 if (p->sched_class->task_woken) 2606 if (p->sched_class->task_woken)
@@ -2898,7 +2820,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2898 struct mm_struct *mm, *oldmm; 2820 struct mm_struct *mm, *oldmm;
2899 2821
2900 prepare_task_switch(rq, prev, next); 2822 prepare_task_switch(rq, prev, next);
2901 trace_sched_switch(rq, prev, next); 2823 trace_sched_switch(prev, next);
2902 mm = next->mm; 2824 mm = next->mm;
2903 oldmm = prev->active_mm; 2825 oldmm = prev->active_mm;
2904 /* 2826 /*
@@ -3015,6 +2937,61 @@ static unsigned long calc_load_update;
3015unsigned long avenrun[3]; 2937unsigned long avenrun[3];
3016EXPORT_SYMBOL(avenrun); 2938EXPORT_SYMBOL(avenrun);
3017 2939
2940static long calc_load_fold_active(struct rq *this_rq)
2941{
2942 long nr_active, delta = 0;
2943
2944 nr_active = this_rq->nr_running;
2945 nr_active += (long) this_rq->nr_uninterruptible;
2946
2947 if (nr_active != this_rq->calc_load_active) {
2948 delta = nr_active - this_rq->calc_load_active;
2949 this_rq->calc_load_active = nr_active;
2950 }
2951
2952 return delta;
2953}
2954
2955#ifdef CONFIG_NO_HZ
2956/*
2957 * For NO_HZ we delay the active fold to the next LOAD_FREQ update.
2958 *
2959 * When making the ILB scale, we should try to pull this in as well.
2960 */
2961static atomic_long_t calc_load_tasks_idle;
2962
2963static void calc_load_account_idle(struct rq *this_rq)
2964{
2965 long delta;
2966
2967 delta = calc_load_fold_active(this_rq);
2968 if (delta)
2969 atomic_long_add(delta, &calc_load_tasks_idle);
2970}
2971
2972static long calc_load_fold_idle(void)
2973{
2974 long delta = 0;
2975
2976 /*
2977 * Its got a race, we don't care...
2978 */
2979 if (atomic_long_read(&calc_load_tasks_idle))
2980 delta = atomic_long_xchg(&calc_load_tasks_idle, 0);
2981
2982 return delta;
2983}
2984#else
2985static void calc_load_account_idle(struct rq *this_rq)
2986{
2987}
2988
2989static inline long calc_load_fold_idle(void)
2990{
2991 return 0;
2992}
2993#endif
2994
3018/** 2995/**
3019 * get_avenrun - get the load average array 2996 * get_avenrun - get the load average array
3020 * @loads: pointer to dest load array 2997 * @loads: pointer to dest load array
@@ -3061,20 +3038,22 @@ void calc_global_load(void)
3061} 3038}
3062 3039
3063/* 3040/*
3064 * Either called from update_cpu_load() or from a cpu going idle 3041 * Called from update_cpu_load() to periodically update this CPU's
3042 * active count.
3065 */ 3043 */
3066static void calc_load_account_active(struct rq *this_rq) 3044static void calc_load_account_active(struct rq *this_rq)
3067{ 3045{
3068 long nr_active, delta; 3046 long delta;
3069 3047
3070 nr_active = this_rq->nr_running; 3048 if (time_before(jiffies, this_rq->calc_load_update))
3071 nr_active += (long) this_rq->nr_uninterruptible; 3049 return;
3072 3050
3073 if (nr_active != this_rq->calc_load_active) { 3051 delta = calc_load_fold_active(this_rq);
3074 delta = nr_active - this_rq->calc_load_active; 3052 delta += calc_load_fold_idle();
3075 this_rq->calc_load_active = nr_active; 3053 if (delta)
3076 atomic_long_add(delta, &calc_load_tasks); 3054 atomic_long_add(delta, &calc_load_tasks);
3077 } 3055
3056 this_rq->calc_load_update += LOAD_FREQ;
3078} 3057}
3079 3058
3080/* 3059/*
@@ -3106,10 +3085,7 @@ static void update_cpu_load(struct rq *this_rq)
3106 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; 3085 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
3107 } 3086 }
3108 3087
3109 if (time_after_eq(jiffies, this_rq->calc_load_update)) { 3088 calc_load_account_active(this_rq);
3110 this_rq->calc_load_update += LOAD_FREQ;
3111 calc_load_account_active(this_rq);
3112 }
3113} 3089}
3114 3090
3115#ifdef CONFIG_SMP 3091#ifdef CONFIG_SMP
@@ -3121,44 +3097,27 @@ static void update_cpu_load(struct rq *this_rq)
3121void sched_exec(void) 3097void sched_exec(void)
3122{ 3098{
3123 struct task_struct *p = current; 3099 struct task_struct *p = current;
3124 struct migration_req req;
3125 int dest_cpu, this_cpu;
3126 unsigned long flags; 3100 unsigned long flags;
3127 struct rq *rq; 3101 struct rq *rq;
3128 3102 int dest_cpu;
3129again:
3130 this_cpu = get_cpu();
3131 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3132 if (dest_cpu == this_cpu) {
3133 put_cpu();
3134 return;
3135 }
3136 3103
3137 rq = task_rq_lock(p, &flags); 3104 rq = task_rq_lock(p, &flags);
3138 put_cpu(); 3105 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3106 if (dest_cpu == smp_processor_id())
3107 goto unlock;
3139 3108
3140 /* 3109 /*
3141 * select_task_rq() can race against ->cpus_allowed 3110 * select_task_rq() can race against ->cpus_allowed
3142 */ 3111 */
3143 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3112 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
3144 || unlikely(!cpu_active(dest_cpu))) { 3113 likely(cpu_active(dest_cpu)) && migrate_task(p, dest_cpu)) {
3145 task_rq_unlock(rq, &flags); 3114 struct migration_arg arg = { p, dest_cpu };
3146 goto again;
3147 }
3148 3115
3149 /* force the process onto the specified CPU */
3150 if (migrate_task(p, dest_cpu, &req)) {
3151 /* Need to wait for migration thread (might exit: take ref). */
3152 struct task_struct *mt = rq->migration_thread;
3153
3154 get_task_struct(mt);
3155 task_rq_unlock(rq, &flags); 3116 task_rq_unlock(rq, &flags);
3156 wake_up_process(mt); 3117 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
3157 put_task_struct(mt);
3158 wait_for_completion(&req.done);
3159
3160 return; 3118 return;
3161 } 3119 }
3120unlock:
3162 task_rq_unlock(rq, &flags); 3121 task_rq_unlock(rq, &flags);
3163} 3122}
3164 3123
@@ -3630,23 +3589,9 @@ static inline void schedule_debug(struct task_struct *prev)
3630 3589
3631static void put_prev_task(struct rq *rq, struct task_struct *prev) 3590static void put_prev_task(struct rq *rq, struct task_struct *prev)
3632{ 3591{
3633 if (prev->state == TASK_RUNNING) { 3592 if (prev->se.on_rq)
3634 u64 runtime = prev->se.sum_exec_runtime; 3593 update_rq_clock(rq);
3635 3594 rq->skip_clock_update = 0;
3636 runtime -= prev->se.prev_sum_exec_runtime;
3637 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
3638
3639 /*
3640 * In order to avoid avg_overlap growing stale when we are
3641 * indeed overlapping and hence not getting put to sleep, grow
3642 * the avg_overlap on preemption.
3643 *
3644 * We use the average preemption runtime because that
3645 * correlates to the amount of cache footprint a task can
3646 * build up.
3647 */
3648 update_avg(&prev->se.avg_overlap, runtime);
3649 }
3650 prev->sched_class->put_prev_task(rq, prev); 3595 prev->sched_class->put_prev_task(rq, prev);
3651} 3596}
3652 3597
@@ -3709,14 +3654,13 @@ need_resched_nonpreemptible:
3709 hrtick_clear(rq); 3654 hrtick_clear(rq);
3710 3655
3711 raw_spin_lock_irq(&rq->lock); 3656 raw_spin_lock_irq(&rq->lock);
3712 update_rq_clock(rq);
3713 clear_tsk_need_resched(prev); 3657 clear_tsk_need_resched(prev);
3714 3658
3715 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3659 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3716 if (unlikely(signal_pending_state(prev->state, prev))) 3660 if (unlikely(signal_pending_state(prev->state, prev)))
3717 prev->state = TASK_RUNNING; 3661 prev->state = TASK_RUNNING;
3718 else 3662 else
3719 deactivate_task(rq, prev, 1); 3663 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3720 switch_count = &prev->nvcsw; 3664 switch_count = &prev->nvcsw;
3721 } 3665 }
3722 3666
@@ -4039,8 +3983,7 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4039 if (!x->done) { 3983 if (!x->done) {
4040 DECLARE_WAITQUEUE(wait, current); 3984 DECLARE_WAITQUEUE(wait, current);
4041 3985
4042 wait.flags |= WQ_FLAG_EXCLUSIVE; 3986 __add_wait_queue_tail_exclusive(&x->wait, &wait);
4043 __add_wait_queue_tail(&x->wait, &wait);
4044 do { 3987 do {
4045 if (signal_pending_state(state, current)) { 3988 if (signal_pending_state(state, current)) {
4046 timeout = -ERESTARTSYS; 3989 timeout = -ERESTARTSYS;
@@ -4266,7 +4209,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4266 BUG_ON(prio < 0 || prio > MAX_PRIO); 4209 BUG_ON(prio < 0 || prio > MAX_PRIO);
4267 4210
4268 rq = task_rq_lock(p, &flags); 4211 rq = task_rq_lock(p, &flags);
4269 update_rq_clock(rq);
4270 4212
4271 oldprio = p->prio; 4213 oldprio = p->prio;
4272 prev_class = p->sched_class; 4214 prev_class = p->sched_class;
@@ -4287,7 +4229,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4287 if (running) 4229 if (running)
4288 p->sched_class->set_curr_task(rq); 4230 p->sched_class->set_curr_task(rq);
4289 if (on_rq) { 4231 if (on_rq) {
4290 enqueue_task(rq, p, 0, oldprio < prio); 4232 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4291 4233
4292 check_class_changed(rq, p, prev_class, oldprio, running); 4234 check_class_changed(rq, p, prev_class, oldprio, running);
4293 } 4235 }
@@ -4309,7 +4251,6 @@ void set_user_nice(struct task_struct *p, long nice)
4309 * the task might be in the middle of scheduling on another CPU. 4251 * the task might be in the middle of scheduling on another CPU.
4310 */ 4252 */
4311 rq = task_rq_lock(p, &flags); 4253 rq = task_rq_lock(p, &flags);
4312 update_rq_clock(rq);
4313 /* 4254 /*
4314 * The RT priorities are set via sched_setscheduler(), but we still 4255 * The RT priorities are set via sched_setscheduler(), but we still
4315 * allow the 'normal' nice value to be set - but as expected 4256 * allow the 'normal' nice value to be set - but as expected
@@ -4331,7 +4272,7 @@ void set_user_nice(struct task_struct *p, long nice)
4331 delta = p->prio - old_prio; 4272 delta = p->prio - old_prio;
4332 4273
4333 if (on_rq) { 4274 if (on_rq) {
4334 enqueue_task(rq, p, 0, false); 4275 enqueue_task(rq, p, 0);
4335 /* 4276 /*
4336 * If the task increased its priority or is running and 4277 * If the task increased its priority or is running and
4337 * lowered its priority, then reschedule its CPU: 4278 * lowered its priority, then reschedule its CPU:
@@ -4592,7 +4533,6 @@ recheck:
4592 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4533 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4593 goto recheck; 4534 goto recheck;
4594 } 4535 }
4595 update_rq_clock(rq);
4596 on_rq = p->se.on_rq; 4536 on_rq = p->se.on_rq;
4597 running = task_current(rq, p); 4537 running = task_current(rq, p);
4598 if (on_rq) 4538 if (on_rq)
@@ -5329,17 +5269,15 @@ static inline void sched_init_granularity(void)
5329/* 5269/*
5330 * This is how migration works: 5270 * This is how migration works:
5331 * 5271 *
5332 * 1) we queue a struct migration_req structure in the source CPU's 5272 * 1) we invoke migration_cpu_stop() on the target CPU using
5333 * runqueue and wake up that CPU's migration thread. 5273 * stop_one_cpu().
5334 * 2) we down() the locked semaphore => thread blocks. 5274 * 2) stopper starts to run (implicitly forcing the migrated thread
5335 * 3) migration thread wakes up (implicitly it forces the migrated 5275 * off the CPU)
5336 * thread off the CPU) 5276 * 3) it checks whether the migrated task is still in the wrong runqueue.
5337 * 4) it gets the migration request and checks whether the migrated 5277 * 4) if it's in the wrong runqueue then the migration thread removes
5338 * task is still in the wrong runqueue.
5339 * 5) if it's in the wrong runqueue then the migration thread removes
5340 * it and puts it into the right queue. 5278 * it and puts it into the right queue.
5341 * 6) migration thread up()s the semaphore. 5279 * 5) stopper completes and stop_one_cpu() returns and the migration
5342 * 7) we wake up and the migration is done. 5280 * is done.
5343 */ 5281 */
5344 5282
5345/* 5283/*
@@ -5353,12 +5291,23 @@ static inline void sched_init_granularity(void)
5353 */ 5291 */
5354int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) 5292int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5355{ 5293{
5356 struct migration_req req;
5357 unsigned long flags; 5294 unsigned long flags;
5358 struct rq *rq; 5295 struct rq *rq;
5296 unsigned int dest_cpu;
5359 int ret = 0; 5297 int ret = 0;
5360 5298
5299 /*
5300 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5301 * drop the rq->lock and still rely on ->cpus_allowed.
5302 */
5303again:
5304 while (task_is_waking(p))
5305 cpu_relax();
5361 rq = task_rq_lock(p, &flags); 5306 rq = task_rq_lock(p, &flags);
5307 if (task_is_waking(p)) {
5308 task_rq_unlock(rq, &flags);
5309 goto again;
5310 }
5362 5311
5363 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 5312 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5364 ret = -EINVAL; 5313 ret = -EINVAL;
@@ -5382,15 +5331,12 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5382 if (cpumask_test_cpu(task_cpu(p), new_mask)) 5331 if (cpumask_test_cpu(task_cpu(p), new_mask))
5383 goto out; 5332 goto out;
5384 5333
5385 if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) { 5334 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
5335 if (migrate_task(p, dest_cpu)) {
5336 struct migration_arg arg = { p, dest_cpu };
5386 /* Need help from migration thread: drop lock and wait. */ 5337 /* Need help from migration thread: drop lock and wait. */
5387 struct task_struct *mt = rq->migration_thread;
5388
5389 get_task_struct(mt);
5390 task_rq_unlock(rq, &flags); 5338 task_rq_unlock(rq, &flags);
5391 wake_up_process(mt); 5339 stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
5392 put_task_struct(mt);
5393 wait_for_completion(&req.done);
5394 tlb_migrate_finish(p->mm); 5340 tlb_migrate_finish(p->mm);
5395 return 0; 5341 return 0;
5396 } 5342 }
@@ -5448,98 +5394,49 @@ fail:
5448 return ret; 5394 return ret;
5449} 5395}
5450 5396
5451#define RCU_MIGRATION_IDLE 0
5452#define RCU_MIGRATION_NEED_QS 1
5453#define RCU_MIGRATION_GOT_QS 2
5454#define RCU_MIGRATION_MUST_SYNC 3
5455
5456/* 5397/*
5457 * migration_thread - this is a highprio system thread that performs 5398 * migration_cpu_stop - this will be executed by a highprio stopper thread
5458 * thread migration by bumping thread off CPU then 'pushing' onto 5399 * and performs thread migration by bumping thread off CPU then
5459 * another runqueue. 5400 * 'pushing' onto another runqueue.
5460 */ 5401 */
5461static int migration_thread(void *data) 5402static int migration_cpu_stop(void *data)
5462{ 5403{
5463 int badcpu; 5404 struct migration_arg *arg = data;
5464 int cpu = (long)data;
5465 struct rq *rq;
5466
5467 rq = cpu_rq(cpu);
5468 BUG_ON(rq->migration_thread != current);
5469
5470 set_current_state(TASK_INTERRUPTIBLE);
5471 while (!kthread_should_stop()) {
5472 struct migration_req *req;
5473 struct list_head *head;
5474
5475 raw_spin_lock_irq(&rq->lock);
5476
5477 if (cpu_is_offline(cpu)) {
5478 raw_spin_unlock_irq(&rq->lock);
5479 break;
5480 }
5481
5482 if (rq->active_balance) {
5483 active_load_balance(rq, cpu);
5484 rq->active_balance = 0;
5485 }
5486
5487 head = &rq->migration_queue;
5488
5489 if (list_empty(head)) {
5490 raw_spin_unlock_irq(&rq->lock);
5491 schedule();
5492 set_current_state(TASK_INTERRUPTIBLE);
5493 continue;
5494 }
5495 req = list_entry(head->next, struct migration_req, list);
5496 list_del_init(head->next);
5497
5498 if (req->task != NULL) {
5499 raw_spin_unlock(&rq->lock);
5500 __migrate_task(req->task, cpu, req->dest_cpu);
5501 } else if (likely(cpu == (badcpu = smp_processor_id()))) {
5502 req->dest_cpu = RCU_MIGRATION_GOT_QS;
5503 raw_spin_unlock(&rq->lock);
5504 } else {
5505 req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
5506 raw_spin_unlock(&rq->lock);
5507 WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
5508 }
5509 local_irq_enable();
5510
5511 complete(&req->done);
5512 }
5513 __set_current_state(TASK_RUNNING);
5514
5515 return 0;
5516}
5517
5518#ifdef CONFIG_HOTPLUG_CPU
5519
5520static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
5521{
5522 int ret;
5523 5405
5406 /*
5407 * The original target cpu might have gone down and we might
5408 * be on another cpu but it doesn't matter.
5409 */
5524 local_irq_disable(); 5410 local_irq_disable();
5525 ret = __migrate_task(p, src_cpu, dest_cpu); 5411 __migrate_task(arg->task, raw_smp_processor_id(), arg->dest_cpu);
5526 local_irq_enable(); 5412 local_irq_enable();
5527 return ret; 5413 return 0;
5528} 5414}
5529 5415
5416#ifdef CONFIG_HOTPLUG_CPU
5530/* 5417/*
5531 * Figure out where task on dead CPU should go, use force if necessary. 5418 * Figure out where task on dead CPU should go, use force if necessary.
5532 */ 5419 */
5533static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 5420void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5534{ 5421{
5535 int dest_cpu; 5422 struct rq *rq = cpu_rq(dead_cpu);
5423 int needs_cpu, uninitialized_var(dest_cpu);
5424 unsigned long flags;
5536 5425
5537again: 5426 local_irq_save(flags);
5538 dest_cpu = select_fallback_rq(dead_cpu, p);
5539 5427
5540 /* It can have affinity changed while we were choosing. */ 5428 raw_spin_lock(&rq->lock);
5541 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 5429 needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
5542 goto again; 5430 if (needs_cpu)
5431 dest_cpu = select_fallback_rq(dead_cpu, p);
5432 raw_spin_unlock(&rq->lock);
5433 /*
5434 * It can only fail if we race with set_cpus_allowed(),
5435 * in the racer should migrate the task anyway.
5436 */
5437 if (needs_cpu)
5438 __migrate_task(p, dead_cpu, dest_cpu);
5439 local_irq_restore(flags);
5543} 5440}
5544 5441
5545/* 5442/*
@@ -5603,7 +5500,6 @@ void sched_idle_next(void)
5603 5500
5604 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5501 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5605 5502
5606 update_rq_clock(rq);
5607 activate_task(rq, p, 0); 5503 activate_task(rq, p, 0);
5608 5504
5609 raw_spin_unlock_irqrestore(&rq->lock, flags); 5505 raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -5658,7 +5554,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5658 for ( ; ; ) { 5554 for ( ; ; ) {
5659 if (!rq->nr_running) 5555 if (!rq->nr_running)
5660 break; 5556 break;
5661 update_rq_clock(rq);
5662 next = pick_next_task(rq); 5557 next = pick_next_task(rq);
5663 if (!next) 5558 if (!next)
5664 break; 5559 break;
@@ -5881,35 +5776,20 @@ static void set_rq_offline(struct rq *rq)
5881static int __cpuinit 5776static int __cpuinit
5882migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 5777migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5883{ 5778{
5884 struct task_struct *p;
5885 int cpu = (long)hcpu; 5779 int cpu = (long)hcpu;
5886 unsigned long flags; 5780 unsigned long flags;
5887 struct rq *rq; 5781 struct rq *rq = cpu_rq(cpu);
5888 5782
5889 switch (action) { 5783 switch (action) {
5890 5784
5891 case CPU_UP_PREPARE: 5785 case CPU_UP_PREPARE:
5892 case CPU_UP_PREPARE_FROZEN: 5786 case CPU_UP_PREPARE_FROZEN:
5893 p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
5894 if (IS_ERR(p))
5895 return NOTIFY_BAD;
5896 kthread_bind(p, cpu);
5897 /* Must be high prio: stop_machine expects to yield to it. */
5898 rq = task_rq_lock(p, &flags);
5899 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5900 task_rq_unlock(rq, &flags);
5901 get_task_struct(p);
5902 cpu_rq(cpu)->migration_thread = p;
5903 rq->calc_load_update = calc_load_update; 5787 rq->calc_load_update = calc_load_update;
5904 break; 5788 break;
5905 5789
5906 case CPU_ONLINE: 5790 case CPU_ONLINE:
5907 case CPU_ONLINE_FROZEN: 5791 case CPU_ONLINE_FROZEN:
5908 /* Strictly unnecessary, as first user will wake it. */
5909 wake_up_process(cpu_rq(cpu)->migration_thread);
5910
5911 /* Update our root-domain */ 5792 /* Update our root-domain */
5912 rq = cpu_rq(cpu);
5913 raw_spin_lock_irqsave(&rq->lock, flags); 5793 raw_spin_lock_irqsave(&rq->lock, flags);
5914 if (rq->rd) { 5794 if (rq->rd) {
5915 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5795 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -5920,61 +5800,24 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5920 break; 5800 break;
5921 5801
5922#ifdef CONFIG_HOTPLUG_CPU 5802#ifdef CONFIG_HOTPLUG_CPU
5923 case CPU_UP_CANCELED:
5924 case CPU_UP_CANCELED_FROZEN:
5925 if (!cpu_rq(cpu)->migration_thread)
5926 break;
5927 /* Unbind it from offline cpu so it can run. Fall thru. */
5928 kthread_bind(cpu_rq(cpu)->migration_thread,
5929 cpumask_any(cpu_online_mask));
5930 kthread_stop(cpu_rq(cpu)->migration_thread);
5931 put_task_struct(cpu_rq(cpu)->migration_thread);
5932 cpu_rq(cpu)->migration_thread = NULL;
5933 break;
5934
5935 case CPU_DEAD: 5803 case CPU_DEAD:
5936 case CPU_DEAD_FROZEN: 5804 case CPU_DEAD_FROZEN:
5937 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
5938 migrate_live_tasks(cpu); 5805 migrate_live_tasks(cpu);
5939 rq = cpu_rq(cpu);
5940 kthread_stop(rq->migration_thread);
5941 put_task_struct(rq->migration_thread);
5942 rq->migration_thread = NULL;
5943 /* Idle task back to normal (off runqueue, low prio) */ 5806 /* Idle task back to normal (off runqueue, low prio) */
5944 raw_spin_lock_irq(&rq->lock); 5807 raw_spin_lock_irq(&rq->lock);
5945 update_rq_clock(rq);
5946 deactivate_task(rq, rq->idle, 0); 5808 deactivate_task(rq, rq->idle, 0);
5947 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 5809 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5948 rq->idle->sched_class = &idle_sched_class; 5810 rq->idle->sched_class = &idle_sched_class;
5949 migrate_dead_tasks(cpu); 5811 migrate_dead_tasks(cpu);
5950 raw_spin_unlock_irq(&rq->lock); 5812 raw_spin_unlock_irq(&rq->lock);
5951 cpuset_unlock();
5952 migrate_nr_uninterruptible(rq); 5813 migrate_nr_uninterruptible(rq);
5953 BUG_ON(rq->nr_running != 0); 5814 BUG_ON(rq->nr_running != 0);
5954 calc_global_load_remove(rq); 5815 calc_global_load_remove(rq);
5955 /*
5956 * No need to migrate the tasks: it was best-effort if
5957 * they didn't take sched_hotcpu_mutex. Just wake up
5958 * the requestors.
5959 */
5960 raw_spin_lock_irq(&rq->lock);
5961 while (!list_empty(&rq->migration_queue)) {
5962 struct migration_req *req;
5963
5964 req = list_entry(rq->migration_queue.next,
5965 struct migration_req, list);
5966 list_del_init(&req->list);
5967 raw_spin_unlock_irq(&rq->lock);
5968 complete(&req->done);
5969 raw_spin_lock_irq(&rq->lock);
5970 }
5971 raw_spin_unlock_irq(&rq->lock);
5972 break; 5816 break;
5973 5817
5974 case CPU_DYING: 5818 case CPU_DYING:
5975 case CPU_DYING_FROZEN: 5819 case CPU_DYING_FROZEN:
5976 /* Update our root-domain */ 5820 /* Update our root-domain */
5977 rq = cpu_rq(cpu);
5978 raw_spin_lock_irqsave(&rq->lock, flags); 5821 raw_spin_lock_irqsave(&rq->lock, flags);
5979 if (rq->rd) { 5822 if (rq->rd) {
5980 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 5823 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
@@ -6305,6 +6148,9 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
6305 struct rq *rq = cpu_rq(cpu); 6148 struct rq *rq = cpu_rq(cpu);
6306 struct sched_domain *tmp; 6149 struct sched_domain *tmp;
6307 6150
6151 for (tmp = sd; tmp; tmp = tmp->parent)
6152 tmp->span_weight = cpumask_weight(sched_domain_span(tmp));
6153
6308 /* Remove the sched domains which do not contribute to scheduling. */ 6154 /* Remove the sched domains which do not contribute to scheduling. */
6309 for (tmp = sd; tmp; ) { 6155 for (tmp = sd; tmp; ) {
6310 struct sched_domain *parent = tmp->parent; 6156 struct sched_domain *parent = tmp->parent;
@@ -7788,10 +7634,8 @@ void __init sched_init(void)
7788 rq->push_cpu = 0; 7634 rq->push_cpu = 0;
7789 rq->cpu = i; 7635 rq->cpu = i;
7790 rq->online = 0; 7636 rq->online = 0;
7791 rq->migration_thread = NULL;
7792 rq->idle_stamp = 0; 7637 rq->idle_stamp = 0;
7793 rq->avg_idle = 2*sysctl_sched_migration_cost; 7638 rq->avg_idle = 2*sysctl_sched_migration_cost;
7794 INIT_LIST_HEAD(&rq->migration_queue);
7795 rq_attach_root(rq, &def_root_domain); 7639 rq_attach_root(rq, &def_root_domain);
7796#endif 7640#endif
7797 init_rq_hrtick(rq); 7641 init_rq_hrtick(rq);
@@ -7892,7 +7736,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7892{ 7736{
7893 int on_rq; 7737 int on_rq;
7894 7738
7895 update_rq_clock(rq);
7896 on_rq = p->se.on_rq; 7739 on_rq = p->se.on_rq;
7897 if (on_rq) 7740 if (on_rq)
7898 deactivate_task(rq, p, 0); 7741 deactivate_task(rq, p, 0);
@@ -7919,9 +7762,9 @@ void normalize_rt_tasks(void)
7919 7762
7920 p->se.exec_start = 0; 7763 p->se.exec_start = 0;
7921#ifdef CONFIG_SCHEDSTATS 7764#ifdef CONFIG_SCHEDSTATS
7922 p->se.wait_start = 0; 7765 p->se.statistics.wait_start = 0;
7923 p->se.sleep_start = 0; 7766 p->se.statistics.sleep_start = 0;
7924 p->se.block_start = 0; 7767 p->se.statistics.block_start = 0;
7925#endif 7768#endif
7926 7769
7927 if (!rt_task(p)) { 7770 if (!rt_task(p)) {
@@ -8254,8 +8097,6 @@ void sched_move_task(struct task_struct *tsk)
8254 8097
8255 rq = task_rq_lock(tsk, &flags); 8098 rq = task_rq_lock(tsk, &flags);
8256 8099
8257 update_rq_clock(rq);
8258
8259 running = task_current(rq, tsk); 8100 running = task_current(rq, tsk);
8260 on_rq = tsk->se.on_rq; 8101 on_rq = tsk->se.on_rq;
8261 8102
@@ -8274,7 +8115,7 @@ void sched_move_task(struct task_struct *tsk)
8274 if (unlikely(running)) 8115 if (unlikely(running))
8275 tsk->sched_class->set_curr_task(rq); 8116 tsk->sched_class->set_curr_task(rq);
8276 if (on_rq) 8117 if (on_rq)
8277 enqueue_task(rq, tsk, 0, false); 8118 enqueue_task(rq, tsk, 0);
8278 8119
8279 task_rq_unlock(rq, &flags); 8120 task_rq_unlock(rq, &flags);
8280} 8121}
@@ -9088,43 +8929,32 @@ struct cgroup_subsys cpuacct_subsys = {
9088 8929
9089#ifndef CONFIG_SMP 8930#ifndef CONFIG_SMP
9090 8931
9091int rcu_expedited_torture_stats(char *page)
9092{
9093 return 0;
9094}
9095EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
9096
9097void synchronize_sched_expedited(void) 8932void synchronize_sched_expedited(void)
9098{ 8933{
8934 barrier();
9099} 8935}
9100EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 8936EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9101 8937
9102#else /* #ifndef CONFIG_SMP */ 8938#else /* #ifndef CONFIG_SMP */
9103 8939
9104static DEFINE_PER_CPU(struct migration_req, rcu_migration_req); 8940static atomic_t synchronize_sched_expedited_count = ATOMIC_INIT(0);
9105static DEFINE_MUTEX(rcu_sched_expedited_mutex);
9106
9107#define RCU_EXPEDITED_STATE_POST -2
9108#define RCU_EXPEDITED_STATE_IDLE -1
9109
9110static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
9111 8941
9112int rcu_expedited_torture_stats(char *page) 8942static int synchronize_sched_expedited_cpu_stop(void *data)
9113{ 8943{
9114 int cnt = 0; 8944 /*
9115 int cpu; 8945 * There must be a full memory barrier on each affected CPU
9116 8946 * between the time that try_stop_cpus() is called and the
9117 cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state); 8947 * time that it returns.
9118 for_each_online_cpu(cpu) { 8948 *
9119 cnt += sprintf(&page[cnt], " %d:%d", 8949 * In the current initial implementation of cpu_stop, the
9120 cpu, per_cpu(rcu_migration_req, cpu).dest_cpu); 8950 * above condition is already met when the control reaches
9121 } 8951 * this point and the following smp_mb() is not strictly
9122 cnt += sprintf(&page[cnt], "\n"); 8952 * necessary. Do smp_mb() anyway for documentation and
9123 return cnt; 8953 * robustness against future implementation changes.
8954 */
8955 smp_mb(); /* See above comment block. */
8956 return 0;
9124} 8957}
9125EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
9126
9127static long synchronize_sched_expedited_count;
9128 8958
9129/* 8959/*
9130 * Wait for an rcu-sched grace period to elapse, but use "big hammer" 8960 * Wait for an rcu-sched grace period to elapse, but use "big hammer"
@@ -9138,18 +8968,14 @@ static long synchronize_sched_expedited_count;
9138 */ 8968 */
9139void synchronize_sched_expedited(void) 8969void synchronize_sched_expedited(void)
9140{ 8970{
9141 int cpu; 8971 int snap, trycount = 0;
9142 unsigned long flags;
9143 bool need_full_sync = 0;
9144 struct rq *rq;
9145 struct migration_req *req;
9146 long snap;
9147 int trycount = 0;
9148 8972
9149 smp_mb(); /* ensure prior mod happens before capturing snap. */ 8973 smp_mb(); /* ensure prior mod happens before capturing snap. */
9150 snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1; 8974 snap = atomic_read(&synchronize_sched_expedited_count) + 1;
9151 get_online_cpus(); 8975 get_online_cpus();
9152 while (!mutex_trylock(&rcu_sched_expedited_mutex)) { 8976 while (try_stop_cpus(cpu_online_mask,
8977 synchronize_sched_expedited_cpu_stop,
8978 NULL) == -EAGAIN) {
9153 put_online_cpus(); 8979 put_online_cpus();
9154 if (trycount++ < 10) 8980 if (trycount++ < 10)
9155 udelay(trycount * num_online_cpus()); 8981 udelay(trycount * num_online_cpus());
@@ -9157,41 +8983,15 @@ void synchronize_sched_expedited(void)
9157 synchronize_sched(); 8983 synchronize_sched();
9158 return; 8984 return;
9159 } 8985 }
9160 if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) { 8986 if (atomic_read(&synchronize_sched_expedited_count) - snap > 0) {
9161 smp_mb(); /* ensure test happens before caller kfree */ 8987 smp_mb(); /* ensure test happens before caller kfree */
9162 return; 8988 return;
9163 } 8989 }
9164 get_online_cpus(); 8990 get_online_cpus();
9165 } 8991 }
9166 rcu_expedited_state = RCU_EXPEDITED_STATE_POST; 8992 atomic_inc(&synchronize_sched_expedited_count);
9167 for_each_online_cpu(cpu) { 8993 smp_mb__after_atomic_inc(); /* ensure post-GP actions seen after GP. */
9168 rq = cpu_rq(cpu);
9169 req = &per_cpu(rcu_migration_req, cpu);
9170 init_completion(&req->done);
9171 req->task = NULL;
9172 req->dest_cpu = RCU_MIGRATION_NEED_QS;
9173 raw_spin_lock_irqsave(&rq->lock, flags);
9174 list_add(&req->list, &rq->migration_queue);
9175 raw_spin_unlock_irqrestore(&rq->lock, flags);
9176 wake_up_process(rq->migration_thread);
9177 }
9178 for_each_online_cpu(cpu) {
9179 rcu_expedited_state = cpu;
9180 req = &per_cpu(rcu_migration_req, cpu);
9181 rq = cpu_rq(cpu);
9182 wait_for_completion(&req->done);
9183 raw_spin_lock_irqsave(&rq->lock, flags);
9184 if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
9185 need_full_sync = 1;
9186 req->dest_cpu = RCU_MIGRATION_IDLE;
9187 raw_spin_unlock_irqrestore(&rq->lock, flags);
9188 }
9189 rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
9190 synchronize_sched_expedited_count++;
9191 mutex_unlock(&rcu_sched_expedited_mutex);
9192 put_online_cpus(); 8994 put_online_cpus();
9193 if (need_full_sync)
9194 synchronize_sched();
9195} 8995}
9196EXPORT_SYMBOL_GPL(synchronize_sched_expedited); 8996EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
9197 8997
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 9b49db144037..9cf1baf6616a 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -70,16 +70,16 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu,
70 PN(se->vruntime); 70 PN(se->vruntime);
71 PN(se->sum_exec_runtime); 71 PN(se->sum_exec_runtime);
72#ifdef CONFIG_SCHEDSTATS 72#ifdef CONFIG_SCHEDSTATS
73 PN(se->wait_start); 73 PN(se->statistics.wait_start);
74 PN(se->sleep_start); 74 PN(se->statistics.sleep_start);
75 PN(se->block_start); 75 PN(se->statistics.block_start);
76 PN(se->sleep_max); 76 PN(se->statistics.sleep_max);
77 PN(se->block_max); 77 PN(se->statistics.block_max);
78 PN(se->exec_max); 78 PN(se->statistics.exec_max);
79 PN(se->slice_max); 79 PN(se->statistics.slice_max);
80 PN(se->wait_max); 80 PN(se->statistics.wait_max);
81 PN(se->wait_sum); 81 PN(se->statistics.wait_sum);
82 P(se->wait_count); 82 P(se->statistics.wait_count);
83#endif 83#endif
84 P(se->load.weight); 84 P(se->load.weight);
85#undef PN 85#undef PN
@@ -104,7 +104,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
105 SPLIT_NS(p->se.vruntime), 105 SPLIT_NS(p->se.vruntime),
106 SPLIT_NS(p->se.sum_exec_runtime), 106 SPLIT_NS(p->se.sum_exec_runtime),
107 SPLIT_NS(p->se.sum_sleep_runtime)); 107 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
108#else 108#else
109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); 110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
@@ -173,11 +173,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
173 task_group_path(tg, path, sizeof(path)); 173 task_group_path(tg, path, sizeof(path));
174 174
175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
176#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
177 {
178 uid_t uid = cfs_rq->tg->uid;
179 SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
180 }
181#else 176#else
182 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 177 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
183#endif 178#endif
@@ -407,40 +402,38 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
407 PN(se.exec_start); 402 PN(se.exec_start);
408 PN(se.vruntime); 403 PN(se.vruntime);
409 PN(se.sum_exec_runtime); 404 PN(se.sum_exec_runtime);
410 PN(se.avg_overlap);
411 PN(se.avg_wakeup);
412 405
413 nr_switches = p->nvcsw + p->nivcsw; 406 nr_switches = p->nvcsw + p->nivcsw;
414 407
415#ifdef CONFIG_SCHEDSTATS 408#ifdef CONFIG_SCHEDSTATS
416 PN(se.wait_start); 409 PN(se.statistics.wait_start);
417 PN(se.sleep_start); 410 PN(se.statistics.sleep_start);
418 PN(se.block_start); 411 PN(se.statistics.block_start);
419 PN(se.sleep_max); 412 PN(se.statistics.sleep_max);
420 PN(se.block_max); 413 PN(se.statistics.block_max);
421 PN(se.exec_max); 414 PN(se.statistics.exec_max);
422 PN(se.slice_max); 415 PN(se.statistics.slice_max);
423 PN(se.wait_max); 416 PN(se.statistics.wait_max);
424 PN(se.wait_sum); 417 PN(se.statistics.wait_sum);
425 P(se.wait_count); 418 P(se.statistics.wait_count);
426 PN(se.iowait_sum); 419 PN(se.statistics.iowait_sum);
427 P(se.iowait_count); 420 P(se.statistics.iowait_count);
428 P(sched_info.bkl_count); 421 P(sched_info.bkl_count);
429 P(se.nr_migrations); 422 P(se.nr_migrations);
430 P(se.nr_migrations_cold); 423 P(se.statistics.nr_migrations_cold);
431 P(se.nr_failed_migrations_affine); 424 P(se.statistics.nr_failed_migrations_affine);
432 P(se.nr_failed_migrations_running); 425 P(se.statistics.nr_failed_migrations_running);
433 P(se.nr_failed_migrations_hot); 426 P(se.statistics.nr_failed_migrations_hot);
434 P(se.nr_forced_migrations); 427 P(se.statistics.nr_forced_migrations);
435 P(se.nr_wakeups); 428 P(se.statistics.nr_wakeups);
436 P(se.nr_wakeups_sync); 429 P(se.statistics.nr_wakeups_sync);
437 P(se.nr_wakeups_migrate); 430 P(se.statistics.nr_wakeups_migrate);
438 P(se.nr_wakeups_local); 431 P(se.statistics.nr_wakeups_local);
439 P(se.nr_wakeups_remote); 432 P(se.statistics.nr_wakeups_remote);
440 P(se.nr_wakeups_affine); 433 P(se.statistics.nr_wakeups_affine);
441 P(se.nr_wakeups_affine_attempts); 434 P(se.statistics.nr_wakeups_affine_attempts);
442 P(se.nr_wakeups_passive); 435 P(se.statistics.nr_wakeups_passive);
443 P(se.nr_wakeups_idle); 436 P(se.statistics.nr_wakeups_idle);
444 437
445 { 438 {
446 u64 avg_atom, avg_per_cpu; 439 u64 avg_atom, avg_per_cpu;
@@ -491,31 +484,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
491void proc_sched_set_task(struct task_struct *p) 484void proc_sched_set_task(struct task_struct *p)
492{ 485{
493#ifdef CONFIG_SCHEDSTATS 486#ifdef CONFIG_SCHEDSTATS
494 p->se.wait_max = 0; 487 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
495 p->se.wait_sum = 0;
496 p->se.wait_count = 0;
497 p->se.iowait_sum = 0;
498 p->se.iowait_count = 0;
499 p->se.sleep_max = 0;
500 p->se.sum_sleep_runtime = 0;
501 p->se.block_max = 0;
502 p->se.exec_max = 0;
503 p->se.slice_max = 0;
504 p->se.nr_migrations = 0;
505 p->se.nr_migrations_cold = 0;
506 p->se.nr_failed_migrations_affine = 0;
507 p->se.nr_failed_migrations_running = 0;
508 p->se.nr_failed_migrations_hot = 0;
509 p->se.nr_forced_migrations = 0;
510 p->se.nr_wakeups = 0;
511 p->se.nr_wakeups_sync = 0;
512 p->se.nr_wakeups_migrate = 0;
513 p->se.nr_wakeups_local = 0;
514 p->se.nr_wakeups_remote = 0;
515 p->se.nr_wakeups_affine = 0;
516 p->se.nr_wakeups_affine_attempts = 0;
517 p->se.nr_wakeups_passive = 0;
518 p->se.nr_wakeups_idle = 0;
519 p->sched_info.bkl_count = 0;
520#endif 488#endif
521} 489}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5a5ea2cd924f..217e4a9393e4 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -35,8 +35,8 @@
35 * (to see the precise effective timeslice length of your workload, 35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field) 36 * run vmstat and monitor the context-switches (cs) field)
37 */ 37 */
38unsigned int sysctl_sched_latency = 5000000ULL; 38unsigned int sysctl_sched_latency = 6000000ULL;
39unsigned int normalized_sysctl_sched_latency = 5000000ULL; 39unsigned int normalized_sysctl_sched_latency = 6000000ULL;
40 40
41/* 41/*
42 * The initial- and re-scaling of tunables is configurable 42 * The initial- and re-scaling of tunables is configurable
@@ -52,15 +52,15 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
52 52
53/* 53/*
54 * Minimal preemption granularity for CPU-bound tasks: 54 * Minimal preemption granularity for CPU-bound tasks:
55 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
56 */ 56 */
57unsigned int sysctl_sched_min_granularity = 1000000ULL; 57unsigned int sysctl_sched_min_granularity = 2000000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL; 58unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
59 59
60/* 60/*
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */ 62 */
63static unsigned int sched_nr_latency = 5; 63static unsigned int sched_nr_latency = 3;
64 64
65/* 65/*
66 * After fork, child runs first. If set to 0 (default) then 66 * After fork, child runs first. If set to 0 (default) then
@@ -505,7 +505,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
505{ 505{
506 unsigned long delta_exec_weighted; 506 unsigned long delta_exec_weighted;
507 507
508 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); 508 schedstat_set(curr->statistics.exec_max,
509 max((u64)delta_exec, curr->statistics.exec_max));
509 510
510 curr->sum_exec_runtime += delta_exec; 511 curr->sum_exec_runtime += delta_exec;
511 schedstat_add(cfs_rq, exec_clock, delta_exec); 512 schedstat_add(cfs_rq, exec_clock, delta_exec);
@@ -548,7 +549,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
548static inline void 549static inline void
549update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 550update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
550{ 551{
551 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); 552 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
552} 553}
553 554
554/* 555/*
@@ -567,18 +568,18 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
567static void 568static void
568update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 569update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
569{ 570{
570 schedstat_set(se->wait_max, max(se->wait_max, 571 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
571 rq_of(cfs_rq)->clock - se->wait_start)); 572 rq_of(cfs_rq)->clock - se->statistics.wait_start));
572 schedstat_set(se->wait_count, se->wait_count + 1); 573 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
573 schedstat_set(se->wait_sum, se->wait_sum + 574 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
574 rq_of(cfs_rq)->clock - se->wait_start); 575 rq_of(cfs_rq)->clock - se->statistics.wait_start);
575#ifdef CONFIG_SCHEDSTATS 576#ifdef CONFIG_SCHEDSTATS
576 if (entity_is_task(se)) { 577 if (entity_is_task(se)) {
577 trace_sched_stat_wait(task_of(se), 578 trace_sched_stat_wait(task_of(se),
578 rq_of(cfs_rq)->clock - se->wait_start); 579 rq_of(cfs_rq)->clock - se->statistics.wait_start);
579 } 580 }
580#endif 581#endif
581 schedstat_set(se->wait_start, 0); 582 schedstat_set(se->statistics.wait_start, 0);
582} 583}
583 584
584static inline void 585static inline void
@@ -657,39 +658,39 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
657 if (entity_is_task(se)) 658 if (entity_is_task(se))
658 tsk = task_of(se); 659 tsk = task_of(se);
659 660
660 if (se->sleep_start) { 661 if (se->statistics.sleep_start) {
661 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 662 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
662 663
663 if ((s64)delta < 0) 664 if ((s64)delta < 0)
664 delta = 0; 665 delta = 0;
665 666
666 if (unlikely(delta > se->sleep_max)) 667 if (unlikely(delta > se->statistics.sleep_max))
667 se->sleep_max = delta; 668 se->statistics.sleep_max = delta;
668 669
669 se->sleep_start = 0; 670 se->statistics.sleep_start = 0;
670 se->sum_sleep_runtime += delta; 671 se->statistics.sum_sleep_runtime += delta;
671 672
672 if (tsk) { 673 if (tsk) {
673 account_scheduler_latency(tsk, delta >> 10, 1); 674 account_scheduler_latency(tsk, delta >> 10, 1);
674 trace_sched_stat_sleep(tsk, delta); 675 trace_sched_stat_sleep(tsk, delta);
675 } 676 }
676 } 677 }
677 if (se->block_start) { 678 if (se->statistics.block_start) {
678 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 679 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
679 680
680 if ((s64)delta < 0) 681 if ((s64)delta < 0)
681 delta = 0; 682 delta = 0;
682 683
683 if (unlikely(delta > se->block_max)) 684 if (unlikely(delta > se->statistics.block_max))
684 se->block_max = delta; 685 se->statistics.block_max = delta;
685 686
686 se->block_start = 0; 687 se->statistics.block_start = 0;
687 se->sum_sleep_runtime += delta; 688 se->statistics.sum_sleep_runtime += delta;
688 689
689 if (tsk) { 690 if (tsk) {
690 if (tsk->in_iowait) { 691 if (tsk->in_iowait) {
691 se->iowait_sum += delta; 692 se->statistics.iowait_sum += delta;
692 se->iowait_count++; 693 se->statistics.iowait_count++;
693 trace_sched_stat_iowait(tsk, delta); 694 trace_sched_stat_iowait(tsk, delta);
694 } 695 }
695 696
@@ -737,20 +738,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
737 vruntime += sched_vslice(cfs_rq, se); 738 vruntime += sched_vslice(cfs_rq, se);
738 739
739 /* sleeps up to a single latency don't count. */ 740 /* sleeps up to a single latency don't count. */
740 if (!initial && sched_feat(FAIR_SLEEPERS)) { 741 if (!initial) {
741 unsigned long thresh = sysctl_sched_latency; 742 unsigned long thresh = sysctl_sched_latency;
742 743
743 /* 744 /*
744 * Convert the sleeper threshold into virtual time.
745 * SCHED_IDLE is a special sub-class. We care about
746 * fairness only relative to other SCHED_IDLE tasks,
747 * all of which have the same weight.
748 */
749 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
750 task_of(se)->policy != SCHED_IDLE))
751 thresh = calc_delta_fair(thresh, se);
752
753 /*
754 * Halve their sleep time's effect, to allow 745 * Halve their sleep time's effect, to allow
755 * for a gentler effect of sleepers: 746 * for a gentler effect of sleepers:
756 */ 747 */
@@ -766,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
766 se->vruntime = vruntime; 757 se->vruntime = vruntime;
767} 758}
768 759
769#define ENQUEUE_WAKEUP 1
770#define ENQUEUE_MIGRATE 2
771
772static void 760static void
773enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 761enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
774{ 762{
@@ -776,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
776 * Update the normalized vruntime before updating min_vruntime 764 * Update the normalized vruntime before updating min_vruntime
777 * through callig update_curr(). 765 * through callig update_curr().
778 */ 766 */
779 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) 767 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
780 se->vruntime += cfs_rq->min_vruntime; 768 se->vruntime += cfs_rq->min_vruntime;
781 769
782 /* 770 /*
@@ -812,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
812} 800}
813 801
814static void 802static void
815dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 803dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
816{ 804{
817 /* 805 /*
818 * Update run-time statistics of the 'current'. 806 * Update run-time statistics of the 'current'.
@@ -820,15 +808,15 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
820 update_curr(cfs_rq); 808 update_curr(cfs_rq);
821 809
822 update_stats_dequeue(cfs_rq, se); 810 update_stats_dequeue(cfs_rq, se);
823 if (sleep) { 811 if (flags & DEQUEUE_SLEEP) {
824#ifdef CONFIG_SCHEDSTATS 812#ifdef CONFIG_SCHEDSTATS
825 if (entity_is_task(se)) { 813 if (entity_is_task(se)) {
826 struct task_struct *tsk = task_of(se); 814 struct task_struct *tsk = task_of(se);
827 815
828 if (tsk->state & TASK_INTERRUPTIBLE) 816 if (tsk->state & TASK_INTERRUPTIBLE)
829 se->sleep_start = rq_of(cfs_rq)->clock; 817 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
830 if (tsk->state & TASK_UNINTERRUPTIBLE) 818 if (tsk->state & TASK_UNINTERRUPTIBLE)
831 se->block_start = rq_of(cfs_rq)->clock; 819 se->statistics.block_start = rq_of(cfs_rq)->clock;
832 } 820 }
833#endif 821#endif
834 } 822 }
@@ -845,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
845 * update can refer to the ->curr item and we need to reflect this 833 * update can refer to the ->curr item and we need to reflect this
846 * movement in our normalized position. 834 * movement in our normalized position.
847 */ 835 */
848 if (!sleep) 836 if (!(flags & DEQUEUE_SLEEP))
849 se->vruntime -= cfs_rq->min_vruntime; 837 se->vruntime -= cfs_rq->min_vruntime;
850} 838}
851 839
@@ -912,7 +900,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
912 * when there are only lesser-weight tasks around): 900 * when there are only lesser-weight tasks around):
913 */ 901 */
914 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { 902 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
915 se->slice_max = max(se->slice_max, 903 se->statistics.slice_max = max(se->statistics.slice_max,
916 se->sum_exec_runtime - se->prev_sum_exec_runtime); 904 se->sum_exec_runtime - se->prev_sum_exec_runtime);
917 } 905 }
918#endif 906#endif
@@ -1054,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
1054 * then put the task into the rbtree: 1042 * then put the task into the rbtree:
1055 */ 1043 */
1056static void 1044static void
1057enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) 1045enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1058{ 1046{
1059 struct cfs_rq *cfs_rq; 1047 struct cfs_rq *cfs_rq;
1060 struct sched_entity *se = &p->se; 1048 struct sched_entity *se = &p->se;
1061 int flags = 0;
1062
1063 if (wakeup)
1064 flags |= ENQUEUE_WAKEUP;
1065 if (p->state == TASK_WAKING)
1066 flags |= ENQUEUE_MIGRATE;
1067 1049
1068 for_each_sched_entity(se) { 1050 for_each_sched_entity(se) {
1069 if (se->on_rq) 1051 if (se->on_rq)
@@ -1081,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1081 * decreased. We remove the task from the rbtree and 1063 * decreased. We remove the task from the rbtree and
1082 * update the fair scheduling stats: 1064 * update the fair scheduling stats:
1083 */ 1065 */
1084static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) 1066static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1085{ 1067{
1086 struct cfs_rq *cfs_rq; 1068 struct cfs_rq *cfs_rq;
1087 struct sched_entity *se = &p->se; 1069 struct sched_entity *se = &p->se;
1088 1070
1089 for_each_sched_entity(se) { 1071 for_each_sched_entity(se) {
1090 cfs_rq = cfs_rq_of(se); 1072 cfs_rq = cfs_rq_of(se);
1091 dequeue_entity(cfs_rq, se, sleep); 1073 dequeue_entity(cfs_rq, se, flags);
1092 /* Don't dequeue parent if it has other entities besides us */ 1074 /* Don't dequeue parent if it has other entities besides us */
1093 if (cfs_rq->load.weight) 1075 if (cfs_rq->load.weight)
1094 break; 1076 break;
1095 sleep = 1; 1077 flags |= DEQUEUE_SLEEP;
1096 } 1078 }
1097 1079
1098 hrtick_update(rq); 1080 hrtick_update(rq);
@@ -1240,7 +1222,6 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
1240 1222
1241static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) 1223static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1242{ 1224{
1243 struct task_struct *curr = current;
1244 unsigned long this_load, load; 1225 unsigned long this_load, load;
1245 int idx, this_cpu, prev_cpu; 1226 int idx, this_cpu, prev_cpu;
1246 unsigned long tl_per_task; 1227 unsigned long tl_per_task;
@@ -1255,18 +1236,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1255 load = source_load(prev_cpu, idx); 1236 load = source_load(prev_cpu, idx);
1256 this_load = target_load(this_cpu, idx); 1237 this_load = target_load(this_cpu, idx);
1257 1238
1258 if (sync) {
1259 if (sched_feat(SYNC_LESS) &&
1260 (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1261 p->se.avg_overlap > sysctl_sched_migration_cost))
1262 sync = 0;
1263 } else {
1264 if (sched_feat(SYNC_MORE) &&
1265 (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1266 p->se.avg_overlap < sysctl_sched_migration_cost))
1267 sync = 1;
1268 }
1269
1270 /* 1239 /*
1271 * If sync wakeup then subtract the (maximum possible) 1240 * If sync wakeup then subtract the (maximum possible)
1272 * effect of the currently running task from the load 1241 * effect of the currently running task from the load
@@ -1306,7 +1275,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1306 if (sync && balanced) 1275 if (sync && balanced)
1307 return 1; 1276 return 1;
1308 1277
1309 schedstat_inc(p, se.nr_wakeups_affine_attempts); 1278 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
1310 tl_per_task = cpu_avg_load_per_task(this_cpu); 1279 tl_per_task = cpu_avg_load_per_task(this_cpu);
1311 1280
1312 if (balanced || 1281 if (balanced ||
@@ -1318,7 +1287,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1318 * there is no bad imbalance. 1287 * there is no bad imbalance.
1319 */ 1288 */
1320 schedstat_inc(sd, ttwu_move_affine); 1289 schedstat_inc(sd, ttwu_move_affine);
1321 schedstat_inc(p, se.nr_wakeups_affine); 1290 schedstat_inc(p, se.statistics.nr_wakeups_affine);
1322 1291
1323 return 1; 1292 return 1;
1324 } 1293 }
@@ -1406,29 +1375,48 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1406/* 1375/*
1407 * Try and locate an idle CPU in the sched_domain. 1376 * Try and locate an idle CPU in the sched_domain.
1408 */ 1377 */
1409static int 1378static int select_idle_sibling(struct task_struct *p, int target)
1410select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1411{ 1379{
1412 int cpu = smp_processor_id(); 1380 int cpu = smp_processor_id();
1413 int prev_cpu = task_cpu(p); 1381 int prev_cpu = task_cpu(p);
1382 struct sched_domain *sd;
1414 int i; 1383 int i;
1415 1384
1416 /* 1385 /*
1417 * If this domain spans both cpu and prev_cpu (see the SD_WAKE_AFFINE 1386 * If the task is going to be woken-up on this cpu and if it is
1418 * test in select_task_rq_fair) and the prev_cpu is idle then that's 1387 * already idle, then it is the right target.
1419 * always a better target than the current cpu.
1420 */ 1388 */
1421 if (target == cpu && !cpu_rq(prev_cpu)->cfs.nr_running) 1389 if (target == cpu && idle_cpu(cpu))
1390 return cpu;
1391
1392 /*
1393 * If the task is going to be woken-up on the cpu where it previously
1394 * ran and if it is currently idle, then it the right target.
1395 */
1396 if (target == prev_cpu && idle_cpu(prev_cpu))
1422 return prev_cpu; 1397 return prev_cpu;
1423 1398
1424 /* 1399 /*
1425 * Otherwise, iterate the domain and find an elegible idle cpu. 1400 * Otherwise, iterate the domains and find an elegible idle cpu.
1426 */ 1401 */
1427 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { 1402 for_each_domain(target, sd) {
1428 if (!cpu_rq(i)->cfs.nr_running) { 1403 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
1429 target = i;
1430 break; 1404 break;
1405
1406 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1407 if (idle_cpu(i)) {
1408 target = i;
1409 break;
1410 }
1431 } 1411 }
1412
1413 /*
1414 * Lets stop looking for an idle sibling when we reached
1415 * the domain that spans the current cpu and prev_cpu.
1416 */
1417 if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
1418 cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
1419 break;
1432 } 1420 }
1433 1421
1434 return target; 1422 return target;
@@ -1445,7 +1433,8 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1445 * 1433 *
1446 * preempt must be disabled. 1434 * preempt must be disabled.
1447 */ 1435 */
1448static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) 1436static int
1437select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
1449{ 1438{
1450 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; 1439 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1451 int cpu = smp_processor_id(); 1440 int cpu = smp_processor_id();
@@ -1456,8 +1445,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1456 int sync = wake_flags & WF_SYNC; 1445 int sync = wake_flags & WF_SYNC;
1457 1446
1458 if (sd_flag & SD_BALANCE_WAKE) { 1447 if (sd_flag & SD_BALANCE_WAKE) {
1459 if (sched_feat(AFFINE_WAKEUPS) && 1448 if (cpumask_test_cpu(cpu, &p->cpus_allowed))
1460 cpumask_test_cpu(cpu, &p->cpus_allowed))
1461 want_affine = 1; 1449 want_affine = 1;
1462 new_cpu = prev_cpu; 1450 new_cpu = prev_cpu;
1463 } 1451 }
@@ -1491,34 +1479,13 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1491 } 1479 }
1492 1480
1493 /* 1481 /*
1494 * While iterating the domains looking for a spanning 1482 * If both cpu and prev_cpu are part of this domain,
1495 * WAKE_AFFINE domain, adjust the affine target to any idle cpu 1483 * cpu is a valid SD_WAKE_AFFINE target.
1496 * in cache sharing domains along the way.
1497 */ 1484 */
1498 if (want_affine) { 1485 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1499 int target = -1; 1486 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1500 1487 affine_sd = tmp;
1501 /* 1488 want_affine = 0;
1502 * If both cpu and prev_cpu are part of this domain,
1503 * cpu is a valid SD_WAKE_AFFINE target.
1504 */
1505 if (cpumask_test_cpu(prev_cpu, sched_domain_span(tmp)))
1506 target = cpu;
1507
1508 /*
1509 * If there's an idle sibling in this domain, make that
1510 * the wake_affine target instead of the current cpu.
1511 */
1512 if (tmp->flags & SD_SHARE_PKG_RESOURCES)
1513 target = select_idle_sibling(p, tmp, target);
1514
1515 if (target >= 0) {
1516 if (tmp->flags & SD_WAKE_AFFINE) {
1517 affine_sd = tmp;
1518 want_affine = 0;
1519 }
1520 cpu = target;
1521 }
1522 } 1489 }
1523 1490
1524 if (!want_sd && !want_affine) 1491 if (!want_sd && !want_affine)
@@ -1531,22 +1498,29 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1531 sd = tmp; 1498 sd = tmp;
1532 } 1499 }
1533 1500
1501#ifdef CONFIG_FAIR_GROUP_SCHED
1534 if (sched_feat(LB_SHARES_UPDATE)) { 1502 if (sched_feat(LB_SHARES_UPDATE)) {
1535 /* 1503 /*
1536 * Pick the largest domain to update shares over 1504 * Pick the largest domain to update shares over
1537 */ 1505 */
1538 tmp = sd; 1506 tmp = sd;
1539 if (affine_sd && (!tmp || 1507 if (affine_sd && (!tmp || affine_sd->span_weight > sd->span_weight))
1540 cpumask_weight(sched_domain_span(affine_sd)) >
1541 cpumask_weight(sched_domain_span(sd))))
1542 tmp = affine_sd; 1508 tmp = affine_sd;
1543 1509
1544 if (tmp) 1510 if (tmp) {
1511 raw_spin_unlock(&rq->lock);
1545 update_shares(tmp); 1512 update_shares(tmp);
1513 raw_spin_lock(&rq->lock);
1514 }
1546 } 1515 }
1516#endif
1547 1517
1548 if (affine_sd && wake_affine(affine_sd, p, sync)) 1518 if (affine_sd) {
1549 return cpu; 1519 if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1520 return select_idle_sibling(p, cpu);
1521 else
1522 return select_idle_sibling(p, prev_cpu);
1523 }
1550 1524
1551 while (sd) { 1525 while (sd) {
1552 int load_idx = sd->forkexec_idx; 1526 int load_idx = sd->forkexec_idx;
@@ -1576,10 +1550,10 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1576 1550
1577 /* Now try balancing at a lower domain level of new_cpu */ 1551 /* Now try balancing at a lower domain level of new_cpu */
1578 cpu = new_cpu; 1552 cpu = new_cpu;
1579 weight = cpumask_weight(sched_domain_span(sd)); 1553 weight = sd->span_weight;
1580 sd = NULL; 1554 sd = NULL;
1581 for_each_domain(cpu, tmp) { 1555 for_each_domain(cpu, tmp) {
1582 if (weight <= cpumask_weight(sched_domain_span(tmp))) 1556 if (weight <= tmp->span_weight)
1583 break; 1557 break;
1584 if (tmp->flags & sd_flag) 1558 if (tmp->flags & sd_flag)
1585 sd = tmp; 1559 sd = tmp;
@@ -1591,63 +1565,26 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1591} 1565}
1592#endif /* CONFIG_SMP */ 1566#endif /* CONFIG_SMP */
1593 1567
1594/*
1595 * Adaptive granularity
1596 *
1597 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1598 * with the limit of wakeup_gran -- when it never does a wakeup.
1599 *
1600 * So the smaller avg_wakeup is the faster we want this task to preempt,
1601 * but we don't want to treat the preemptee unfairly and therefore allow it
1602 * to run for at least the amount of time we'd like to run.
1603 *
1604 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1605 *
1606 * NOTE: we use *nr_running to scale with load, this nicely matches the
1607 * degrading latency on load.
1608 */
1609static unsigned long
1610adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1611{
1612 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1613 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1614 u64 gran = 0;
1615
1616 if (this_run < expected_wakeup)
1617 gran = expected_wakeup - this_run;
1618
1619 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1620}
1621
1622static unsigned long 1568static unsigned long
1623wakeup_gran(struct sched_entity *curr, struct sched_entity *se) 1569wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1624{ 1570{
1625 unsigned long gran = sysctl_sched_wakeup_granularity; 1571 unsigned long gran = sysctl_sched_wakeup_granularity;
1626 1572
1627 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1628 gran = adaptive_gran(curr, se);
1629
1630 /* 1573 /*
1631 * Since its curr running now, convert the gran from real-time 1574 * Since its curr running now, convert the gran from real-time
1632 * to virtual-time in his units. 1575 * to virtual-time in his units.
1576 *
1577 * By using 'se' instead of 'curr' we penalize light tasks, so
1578 * they get preempted easier. That is, if 'se' < 'curr' then
1579 * the resulting gran will be larger, therefore penalizing the
1580 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1581 * be smaller, again penalizing the lighter task.
1582 *
1583 * This is especially important for buddies when the leftmost
1584 * task is higher priority than the buddy.
1633 */ 1585 */
1634 if (sched_feat(ASYM_GRAN)) { 1586 if (unlikely(se->load.weight != NICE_0_LOAD))
1635 /* 1587 gran = calc_delta_fair(gran, se);
1636 * By using 'se' instead of 'curr' we penalize light tasks, so
1637 * they get preempted easier. That is, if 'se' < 'curr' then
1638 * the resulting gran will be larger, therefore penalizing the
1639 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1640 * be smaller, again penalizing the lighter task.
1641 *
1642 * This is especially important for buddies when the leftmost
1643 * task is higher priority than the buddy.
1644 */
1645 if (unlikely(se->load.weight != NICE_0_LOAD))
1646 gran = calc_delta_fair(gran, se);
1647 } else {
1648 if (unlikely(curr->load.weight != NICE_0_LOAD))
1649 gran = calc_delta_fair(gran, curr);
1650 }
1651 1588
1652 return gran; 1589 return gran;
1653} 1590}
@@ -1705,7 +1642,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1705 struct task_struct *curr = rq->curr; 1642 struct task_struct *curr = rq->curr;
1706 struct sched_entity *se = &curr->se, *pse = &p->se; 1643 struct sched_entity *se = &curr->se, *pse = &p->se;
1707 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1644 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1708 int sync = wake_flags & WF_SYNC;
1709 int scale = cfs_rq->nr_running >= sched_nr_latency; 1645 int scale = cfs_rq->nr_running >= sched_nr_latency;
1710 1646
1711 if (unlikely(rt_prio(p->prio))) 1647 if (unlikely(rt_prio(p->prio)))
@@ -1738,14 +1674,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1738 if (unlikely(curr->policy == SCHED_IDLE)) 1674 if (unlikely(curr->policy == SCHED_IDLE))
1739 goto preempt; 1675 goto preempt;
1740 1676
1741 if (sched_feat(WAKEUP_SYNC) && sync)
1742 goto preempt;
1743
1744 if (sched_feat(WAKEUP_OVERLAP) &&
1745 se->avg_overlap < sysctl_sched_migration_cost &&
1746 pse->avg_overlap < sysctl_sched_migration_cost)
1747 goto preempt;
1748
1749 if (!sched_feat(WAKEUP_PREEMPT)) 1677 if (!sched_feat(WAKEUP_PREEMPT))
1750 return; 1678 return;
1751 1679
@@ -1844,13 +1772,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1844 * 3) are cache-hot on their current CPU. 1772 * 3) are cache-hot on their current CPU.
1845 */ 1773 */
1846 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { 1774 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
1847 schedstat_inc(p, se.nr_failed_migrations_affine); 1775 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
1848 return 0; 1776 return 0;
1849 } 1777 }
1850 *all_pinned = 0; 1778 *all_pinned = 0;
1851 1779
1852 if (task_running(rq, p)) { 1780 if (task_running(rq, p)) {
1853 schedstat_inc(p, se.nr_failed_migrations_running); 1781 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1854 return 0; 1782 return 0;
1855 } 1783 }
1856 1784
@@ -1866,14 +1794,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1866#ifdef CONFIG_SCHEDSTATS 1794#ifdef CONFIG_SCHEDSTATS
1867 if (tsk_cache_hot) { 1795 if (tsk_cache_hot) {
1868 schedstat_inc(sd, lb_hot_gained[idle]); 1796 schedstat_inc(sd, lb_hot_gained[idle]);
1869 schedstat_inc(p, se.nr_forced_migrations); 1797 schedstat_inc(p, se.statistics.nr_forced_migrations);
1870 } 1798 }
1871#endif 1799#endif
1872 return 1; 1800 return 1;
1873 } 1801 }
1874 1802
1875 if (tsk_cache_hot) { 1803 if (tsk_cache_hot) {
1876 schedstat_inc(p, se.nr_failed_migrations_hot); 1804 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
1877 return 0; 1805 return 0;
1878 } 1806 }
1879 return 1; 1807 return 1;
@@ -2311,7 +2239,7 @@ unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2311 2239
2312unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) 2240unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2313{ 2241{
2314 unsigned long weight = cpumask_weight(sched_domain_span(sd)); 2242 unsigned long weight = sd->span_weight;
2315 unsigned long smt_gain = sd->smt_gain; 2243 unsigned long smt_gain = sd->smt_gain;
2316 2244
2317 smt_gain /= weight; 2245 smt_gain /= weight;
@@ -2344,7 +2272,7 @@ unsigned long scale_rt_power(int cpu)
2344 2272
2345static void update_cpu_power(struct sched_domain *sd, int cpu) 2273static void update_cpu_power(struct sched_domain *sd, int cpu)
2346{ 2274{
2347 unsigned long weight = cpumask_weight(sched_domain_span(sd)); 2275 unsigned long weight = sd->span_weight;
2348 unsigned long power = SCHED_LOAD_SCALE; 2276 unsigned long power = SCHED_LOAD_SCALE;
2349 struct sched_group *sdg = sd->groups; 2277 struct sched_group *sdg = sd->groups;
2350 2278
@@ -2870,6 +2798,8 @@ static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
2870 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2); 2798 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
2871} 2799}
2872 2800
2801static int active_load_balance_cpu_stop(void *data);
2802
2873/* 2803/*
2874 * Check this_cpu to ensure it is balanced within domain. Attempt to move 2804 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2875 * tasks if there is an imbalance. 2805 * tasks if there is an imbalance.
@@ -2959,8 +2889,9 @@ redo:
2959 if (need_active_balance(sd, sd_idle, idle)) { 2889 if (need_active_balance(sd, sd_idle, idle)) {
2960 raw_spin_lock_irqsave(&busiest->lock, flags); 2890 raw_spin_lock_irqsave(&busiest->lock, flags);
2961 2891
2962 /* don't kick the migration_thread, if the curr 2892 /* don't kick the active_load_balance_cpu_stop,
2963 * task on busiest cpu can't be moved to this_cpu 2893 * if the curr task on busiest cpu can't be
2894 * moved to this_cpu
2964 */ 2895 */
2965 if (!cpumask_test_cpu(this_cpu, 2896 if (!cpumask_test_cpu(this_cpu,
2966 &busiest->curr->cpus_allowed)) { 2897 &busiest->curr->cpus_allowed)) {
@@ -2970,14 +2901,22 @@ redo:
2970 goto out_one_pinned; 2901 goto out_one_pinned;
2971 } 2902 }
2972 2903
2904 /*
2905 * ->active_balance synchronizes accesses to
2906 * ->active_balance_work. Once set, it's cleared
2907 * only after active load balance is finished.
2908 */
2973 if (!busiest->active_balance) { 2909 if (!busiest->active_balance) {
2974 busiest->active_balance = 1; 2910 busiest->active_balance = 1;
2975 busiest->push_cpu = this_cpu; 2911 busiest->push_cpu = this_cpu;
2976 active_balance = 1; 2912 active_balance = 1;
2977 } 2913 }
2978 raw_spin_unlock_irqrestore(&busiest->lock, flags); 2914 raw_spin_unlock_irqrestore(&busiest->lock, flags);
2915
2979 if (active_balance) 2916 if (active_balance)
2980 wake_up_process(busiest->migration_thread); 2917 stop_one_cpu_nowait(cpu_of(busiest),
2918 active_load_balance_cpu_stop, busiest,
2919 &busiest->active_balance_work);
2981 2920
2982 /* 2921 /*
2983 * We've kicked active balancing, reset the failure 2922 * We've kicked active balancing, reset the failure
@@ -3084,24 +3023,29 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
3084} 3023}
3085 3024
3086/* 3025/*
3087 * active_load_balance is run by migration threads. It pushes running tasks 3026 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
3088 * off the busiest CPU onto idle CPUs. It requires at least 1 task to be 3027 * running tasks off the busiest CPU onto idle CPUs. It requires at
3089 * running on each physical CPU where possible, and avoids physical / 3028 * least 1 task to be running on each physical CPU where possible, and
3090 * logical imbalances. 3029 * avoids physical / logical imbalances.
3091 *
3092 * Called with busiest_rq locked.
3093 */ 3030 */
3094static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) 3031static int active_load_balance_cpu_stop(void *data)
3095{ 3032{
3033 struct rq *busiest_rq = data;
3034 int busiest_cpu = cpu_of(busiest_rq);
3096 int target_cpu = busiest_rq->push_cpu; 3035 int target_cpu = busiest_rq->push_cpu;
3036 struct rq *target_rq = cpu_rq(target_cpu);
3097 struct sched_domain *sd; 3037 struct sched_domain *sd;
3098 struct rq *target_rq; 3038
3039 raw_spin_lock_irq(&busiest_rq->lock);
3040
3041 /* make sure the requested cpu hasn't gone down in the meantime */
3042 if (unlikely(busiest_cpu != smp_processor_id() ||
3043 !busiest_rq->active_balance))
3044 goto out_unlock;
3099 3045
3100 /* Is there any task to move? */ 3046 /* Is there any task to move? */
3101 if (busiest_rq->nr_running <= 1) 3047 if (busiest_rq->nr_running <= 1)
3102 return; 3048 goto out_unlock;
3103
3104 target_rq = cpu_rq(target_cpu);
3105 3049
3106 /* 3050 /*
3107 * This condition is "impossible", if it occurs 3051 * This condition is "impossible", if it occurs
@@ -3112,8 +3056,6 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3112 3056
3113 /* move a task from busiest_rq to target_rq */ 3057 /* move a task from busiest_rq to target_rq */
3114 double_lock_balance(busiest_rq, target_rq); 3058 double_lock_balance(busiest_rq, target_rq);
3115 update_rq_clock(busiest_rq);
3116 update_rq_clock(target_rq);
3117 3059
3118 /* Search for an sd spanning us and the target CPU. */ 3060 /* Search for an sd spanning us and the target CPU. */
3119 for_each_domain(target_cpu, sd) { 3061 for_each_domain(target_cpu, sd) {
@@ -3132,6 +3074,10 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3132 schedstat_inc(sd, alb_failed); 3074 schedstat_inc(sd, alb_failed);
3133 } 3075 }
3134 double_unlock_balance(busiest_rq, target_rq); 3076 double_unlock_balance(busiest_rq, target_rq);
3077out_unlock:
3078 busiest_rq->active_balance = 0;
3079 raw_spin_unlock_irq(&busiest_rq->lock);
3080 return 0;
3135} 3081}
3136 3082
3137#ifdef CONFIG_NO_HZ 3083#ifdef CONFIG_NO_HZ
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index d5059fd761d9..83c66e8ad3ee 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,11 +1,4 @@
1/* 1/*
2 * Disregards a certain amount of sleep time (sched_latency_ns) and
3 * considers the task to be running during that period. This gives it
4 * a service deficit on wakeup, allowing it to run sooner.
5 */
6SCHED_FEAT(FAIR_SLEEPERS, 1)
7
8/*
9 * Only give sleepers 50% of their service deficit. This allows 2 * Only give sleepers 50% of their service deficit. This allows
10 * them to run sooner, but does not allow tons of sleepers to 3 * them to run sooner, but does not allow tons of sleepers to
11 * rip the spread apart. 4 * rip the spread apart.
@@ -13,13 +6,6 @@ SCHED_FEAT(FAIR_SLEEPERS, 1)
13SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) 6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
14 7
15/* 8/*
16 * By not normalizing the sleep time, heavy tasks get an effective
17 * longer period, and lighter task an effective shorter period they
18 * are considered running.
19 */
20SCHED_FEAT(NORMALIZED_SLEEPER, 0)
21
22/*
23 * Place new tasks ahead so that they do not starve already running 9 * Place new tasks ahead so that they do not starve already running
24 * tasks 10 * tasks
25 */ 11 */
@@ -31,37 +17,6 @@ SCHED_FEAT(START_DEBIT, 1)
31SCHED_FEAT(WAKEUP_PREEMPT, 1) 17SCHED_FEAT(WAKEUP_PREEMPT, 1)
32 18
33/* 19/*
34 * Compute wakeup_gran based on task behaviour, clipped to
35 * [0, sched_wakeup_gran_ns]
36 */
37SCHED_FEAT(ADAPTIVE_GRAN, 1)
38
39/*
40 * When converting the wakeup granularity to virtual time, do it such
41 * that heavier tasks preempting a lighter task have an edge.
42 */
43SCHED_FEAT(ASYM_GRAN, 1)
44
45/*
46 * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS.
47 */
48SCHED_FEAT(WAKEUP_SYNC, 0)
49
50/*
51 * Wakeup preempt based on task behaviour. Tasks that do not overlap
52 * don't get preempted.
53 */
54SCHED_FEAT(WAKEUP_OVERLAP, 0)
55
56/*
57 * Use the SYNC wakeup hint, pipes and the likes use this to indicate
58 * the remote end is likely to consume the data we just wrote, and
59 * therefore has cache benefit from being placed on the same cpu, see
60 * also AFFINE_WAKEUPS.
61 */
62SCHED_FEAT(SYNC_WAKEUPS, 1)
63
64/*
65 * Based on load and program behaviour, see if it makes sense to place 20 * Based on load and program behaviour, see if it makes sense to place
66 * a newly woken task on the same cpu as the task that woke it -- 21 * a newly woken task on the same cpu as the task that woke it --
67 * improve cache locality. Typically used with SYNC wakeups as 22 * improve cache locality. Typically used with SYNC wakeups as
@@ -70,16 +25,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1)
70SCHED_FEAT(AFFINE_WAKEUPS, 1) 25SCHED_FEAT(AFFINE_WAKEUPS, 1)
71 26
72/* 27/*
73 * Weaken SYNC hint based on overlap
74 */
75SCHED_FEAT(SYNC_LESS, 1)
76
77/*
78 * Add SYNC hint based on overlap
79 */
80SCHED_FEAT(SYNC_MORE, 0)
81
82/*
83 * Prefer to schedule the task we woke last (assuming it failed 28 * Prefer to schedule the task we woke last (assuming it failed
84 * wakeup-preemption), since its likely going to consume data we 29 * wakeup-preemption), since its likely going to consume data we
85 * touched, increases cache locality. 30 * touched, increases cache locality.
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index a8a6d8a50947..9fa0f402c87c 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -6,7 +6,8 @@
6 */ 6 */
7 7
8#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
9static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) 9static int
10select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
10{ 11{
11 return task_cpu(p); /* IDLE tasks as never migrated */ 12 return task_cpu(p); /* IDLE tasks as never migrated */
12} 13}
@@ -22,8 +23,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
22static struct task_struct *pick_next_task_idle(struct rq *rq) 23static struct task_struct *pick_next_task_idle(struct rq *rq)
23{ 24{
24 schedstat_inc(rq, sched_goidle); 25 schedstat_inc(rq, sched_goidle);
25 /* adjust the active tasks as we might go into a long sleep */ 26 calc_load_account_idle(rq);
26 calc_load_account_active(rq);
27 return rq->idle; 27 return rq->idle;
28} 28}
29 29
@@ -32,7 +32,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
32 * message if some code attempts to do it: 32 * message if some code attempts to do it:
33 */ 33 */
34static void 34static void
35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 35dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
36{ 36{
37 raw_spin_unlock_irq(&rq->lock); 37 raw_spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 38 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b5b920ae2ea7..8afb953e31c6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -613,7 +613,7 @@ static void update_curr_rt(struct rq *rq)
613 if (unlikely((s64)delta_exec < 0)) 613 if (unlikely((s64)delta_exec < 0))
614 delta_exec = 0; 614 delta_exec = 0;
615 615
616 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); 616 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
617 617
618 curr->se.sum_exec_runtime += delta_exec; 618 curr->se.sum_exec_runtime += delta_exec;
619 account_group_exec_runtime(curr, delta_exec); 619 account_group_exec_runtime(curr, delta_exec);
@@ -888,20 +888,20 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
888 * Adding/removing a task to/from a priority array: 888 * Adding/removing a task to/from a priority array:
889 */ 889 */
890static void 890static void
891enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) 891enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
892{ 892{
893 struct sched_rt_entity *rt_se = &p->rt; 893 struct sched_rt_entity *rt_se = &p->rt;
894 894
895 if (wakeup) 895 if (flags & ENQUEUE_WAKEUP)
896 rt_se->timeout = 0; 896 rt_se->timeout = 0;
897 897
898 enqueue_rt_entity(rt_se, head); 898 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
899 899
900 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 900 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
901 enqueue_pushable_task(rq, p); 901 enqueue_pushable_task(rq, p);
902} 902}
903 903
904static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 904static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
905{ 905{
906 struct sched_rt_entity *rt_se = &p->rt; 906 struct sched_rt_entity *rt_se = &p->rt;
907 907
@@ -948,10 +948,9 @@ static void yield_task_rt(struct rq *rq)
948#ifdef CONFIG_SMP 948#ifdef CONFIG_SMP
949static int find_lowest_rq(struct task_struct *task); 949static int find_lowest_rq(struct task_struct *task);
950 950
951static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) 951static int
952select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
952{ 953{
953 struct rq *rq = task_rq(p);
954
955 if (sd_flag != SD_BALANCE_WAKE) 954 if (sd_flag != SD_BALANCE_WAKE)
956 return smp_processor_id(); 955 return smp_processor_id();
957 956
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 9bb9fb1bd79c..ef51d1fcf5e6 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -1,17 +1,381 @@
1/* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. 1/*
2 * GPL v2 and any later version. 2 * kernel/stop_machine.c
3 *
4 * Copyright (C) 2008, 2005 IBM Corporation.
5 * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au
6 * Copyright (C) 2010 SUSE Linux Products GmbH
7 * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
8 *
9 * This file is released under the GPLv2 and any later version.
3 */ 10 */
11#include <linux/completion.h>
4#include <linux/cpu.h> 12#include <linux/cpu.h>
5#include <linux/err.h> 13#include <linux/init.h>
6#include <linux/kthread.h> 14#include <linux/kthread.h>
7#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/percpu.h>
8#include <linux/sched.h> 17#include <linux/sched.h>
9#include <linux/stop_machine.h> 18#include <linux/stop_machine.h>
10#include <linux/syscalls.h>
11#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/kallsyms.h>
12 21
13#include <asm/atomic.h> 22#include <asm/atomic.h>
14#include <asm/uaccess.h> 23
24/*
25 * Structure to determine completion condition and record errors. May
26 * be shared by works on different cpus.
27 */
28struct cpu_stop_done {
29 atomic_t nr_todo; /* nr left to execute */
30 bool executed; /* actually executed? */
31 int ret; /* collected return value */
32 struct completion completion; /* fired if nr_todo reaches 0 */
33};
34
35/* the actual stopper, one per every possible cpu, enabled on online cpus */
36struct cpu_stopper {
37 spinlock_t lock;
38 struct list_head works; /* list of pending works */
39 struct task_struct *thread; /* stopper thread */
40 bool enabled; /* is this stopper enabled? */
41};
42
43static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
44
45static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
46{
47 memset(done, 0, sizeof(*done));
48 atomic_set(&done->nr_todo, nr_todo);
49 init_completion(&done->completion);
50}
51
52/* signal completion unless @done is NULL */
53static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed)
54{
55 if (done) {
56 if (executed)
57 done->executed = true;
58 if (atomic_dec_and_test(&done->nr_todo))
59 complete(&done->completion);
60 }
61}
62
63/* queue @work to @stopper. if offline, @work is completed immediately */
64static void cpu_stop_queue_work(struct cpu_stopper *stopper,
65 struct cpu_stop_work *work)
66{
67 unsigned long flags;
68
69 spin_lock_irqsave(&stopper->lock, flags);
70
71 if (stopper->enabled) {
72 list_add_tail(&work->list, &stopper->works);
73 wake_up_process(stopper->thread);
74 } else
75 cpu_stop_signal_done(work->done, false);
76
77 spin_unlock_irqrestore(&stopper->lock, flags);
78}
79
80/**
81 * stop_one_cpu - stop a cpu
82 * @cpu: cpu to stop
83 * @fn: function to execute
84 * @arg: argument to @fn
85 *
86 * Execute @fn(@arg) on @cpu. @fn is run in a process context with
87 * the highest priority preempting any task on the cpu and
88 * monopolizing it. This function returns after the execution is
89 * complete.
90 *
91 * This function doesn't guarantee @cpu stays online till @fn
92 * completes. If @cpu goes down in the middle, execution may happen
93 * partially or fully on different cpus. @fn should either be ready
94 * for that or the caller should ensure that @cpu stays online until
95 * this function completes.
96 *
97 * CONTEXT:
98 * Might sleep.
99 *
100 * RETURNS:
101 * -ENOENT if @fn(@arg) was not executed because @cpu was offline;
102 * otherwise, the return value of @fn.
103 */
104int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
105{
106 struct cpu_stop_done done;
107 struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done };
108
109 cpu_stop_init_done(&done, 1);
110 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work);
111 wait_for_completion(&done.completion);
112 return done.executed ? done.ret : -ENOENT;
113}
114
115/**
116 * stop_one_cpu_nowait - stop a cpu but don't wait for completion
117 * @cpu: cpu to stop
118 * @fn: function to execute
119 * @arg: argument to @fn
120 *
121 * Similar to stop_one_cpu() but doesn't wait for completion. The
122 * caller is responsible for ensuring @work_buf is currently unused
123 * and will remain untouched until stopper starts executing @fn.
124 *
125 * CONTEXT:
126 * Don't care.
127 */
128void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
129 struct cpu_stop_work *work_buf)
130{
131 *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
132 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf);
133}
134
135/* static data for stop_cpus */
136static DEFINE_MUTEX(stop_cpus_mutex);
137static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
138
139int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
140{
141 struct cpu_stop_work *work;
142 struct cpu_stop_done done;
143 unsigned int cpu;
144
145 /* initialize works and done */
146 for_each_cpu(cpu, cpumask) {
147 work = &per_cpu(stop_cpus_work, cpu);
148 work->fn = fn;
149 work->arg = arg;
150 work->done = &done;
151 }
152 cpu_stop_init_done(&done, cpumask_weight(cpumask));
153
154 /*
155 * Disable preemption while queueing to avoid getting
156 * preempted by a stopper which might wait for other stoppers
157 * to enter @fn which can lead to deadlock.
158 */
159 preempt_disable();
160 for_each_cpu(cpu, cpumask)
161 cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
162 &per_cpu(stop_cpus_work, cpu));
163 preempt_enable();
164
165 wait_for_completion(&done.completion);
166 return done.executed ? done.ret : -ENOENT;
167}
168
169/**
170 * stop_cpus - stop multiple cpus
171 * @cpumask: cpus to stop
172 * @fn: function to execute
173 * @arg: argument to @fn
174 *
175 * Execute @fn(@arg) on online cpus in @cpumask. On each target cpu,
176 * @fn is run in a process context with the highest priority
177 * preempting any task on the cpu and monopolizing it. This function
178 * returns after all executions are complete.
179 *
180 * This function doesn't guarantee the cpus in @cpumask stay online
181 * till @fn completes. If some cpus go down in the middle, execution
182 * on the cpu may happen partially or fully on different cpus. @fn
183 * should either be ready for that or the caller should ensure that
184 * the cpus stay online until this function completes.
185 *
186 * All stop_cpus() calls are serialized making it safe for @fn to wait
187 * for all cpus to start executing it.
188 *
189 * CONTEXT:
190 * Might sleep.
191 *
192 * RETURNS:
193 * -ENOENT if @fn(@arg) was not executed at all because all cpus in
194 * @cpumask were offline; otherwise, 0 if all executions of @fn
195 * returned 0, any non zero return value if any returned non zero.
196 */
197int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
198{
199 int ret;
200
201 /* static works are used, process one request at a time */
202 mutex_lock(&stop_cpus_mutex);
203 ret = __stop_cpus(cpumask, fn, arg);
204 mutex_unlock(&stop_cpus_mutex);
205 return ret;
206}
207
208/**
209 * try_stop_cpus - try to stop multiple cpus
210 * @cpumask: cpus to stop
211 * @fn: function to execute
212 * @arg: argument to @fn
213 *
214 * Identical to stop_cpus() except that it fails with -EAGAIN if
215 * someone else is already using the facility.
216 *
217 * CONTEXT:
218 * Might sleep.
219 *
220 * RETURNS:
221 * -EAGAIN if someone else is already stopping cpus, -ENOENT if
222 * @fn(@arg) was not executed at all because all cpus in @cpumask were
223 * offline; otherwise, 0 if all executions of @fn returned 0, any non
224 * zero return value if any returned non zero.
225 */
226int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
227{
228 int ret;
229
230 /* static works are used, process one request at a time */
231 if (!mutex_trylock(&stop_cpus_mutex))
232 return -EAGAIN;
233 ret = __stop_cpus(cpumask, fn, arg);
234 mutex_unlock(&stop_cpus_mutex);
235 return ret;
236}
237
238static int cpu_stopper_thread(void *data)
239{
240 struct cpu_stopper *stopper = data;
241 struct cpu_stop_work *work;
242 int ret;
243
244repeat:
245 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
246
247 if (kthread_should_stop()) {
248 __set_current_state(TASK_RUNNING);
249 return 0;
250 }
251
252 work = NULL;
253 spin_lock_irq(&stopper->lock);
254 if (!list_empty(&stopper->works)) {
255 work = list_first_entry(&stopper->works,
256 struct cpu_stop_work, list);
257 list_del_init(&work->list);
258 }
259 spin_unlock_irq(&stopper->lock);
260
261 if (work) {
262 cpu_stop_fn_t fn = work->fn;
263 void *arg = work->arg;
264 struct cpu_stop_done *done = work->done;
265 char ksym_buf[KSYM_NAME_LEN];
266
267 __set_current_state(TASK_RUNNING);
268
269 /* cpu stop callbacks are not allowed to sleep */
270 preempt_disable();
271
272 ret = fn(arg);
273 if (ret)
274 done->ret = ret;
275
276 /* restore preemption and check it's still balanced */
277 preempt_enable();
278 WARN_ONCE(preempt_count(),
279 "cpu_stop: %s(%p) leaked preempt count\n",
280 kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
281 ksym_buf), arg);
282
283 cpu_stop_signal_done(done, true);
284 } else
285 schedule();
286
287 goto repeat;
288}
289
290/* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
291static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
292 unsigned long action, void *hcpu)
293{
294 struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
295 unsigned int cpu = (unsigned long)hcpu;
296 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
297 struct cpu_stop_work *work;
298 struct task_struct *p;
299
300 switch (action & ~CPU_TASKS_FROZEN) {
301 case CPU_UP_PREPARE:
302 BUG_ON(stopper->thread || stopper->enabled ||
303 !list_empty(&stopper->works));
304 p = kthread_create(cpu_stopper_thread, stopper, "migration/%d",
305 cpu);
306 if (IS_ERR(p))
307 return NOTIFY_BAD;
308 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
309 get_task_struct(p);
310 stopper->thread = p;
311 break;
312
313 case CPU_ONLINE:
314 kthread_bind(stopper->thread, cpu);
315 /* strictly unnecessary, as first user will wake it */
316 wake_up_process(stopper->thread);
317 /* mark enabled */
318 spin_lock_irq(&stopper->lock);
319 stopper->enabled = true;
320 spin_unlock_irq(&stopper->lock);
321 break;
322
323#ifdef CONFIG_HOTPLUG_CPU
324 case CPU_UP_CANCELED:
325 case CPU_DEAD:
326 /* kill the stopper */
327 kthread_stop(stopper->thread);
328 /* drain remaining works */
329 spin_lock_irq(&stopper->lock);
330 list_for_each_entry(work, &stopper->works, list)
331 cpu_stop_signal_done(work->done, false);
332 stopper->enabled = false;
333 spin_unlock_irq(&stopper->lock);
334 /* release the stopper */
335 put_task_struct(stopper->thread);
336 stopper->thread = NULL;
337 break;
338#endif
339 }
340
341 return NOTIFY_OK;
342}
343
344/*
345 * Give it a higher priority so that cpu stopper is available to other
346 * cpu notifiers. It currently shares the same priority as sched
347 * migration_notifier.
348 */
349static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
350 .notifier_call = cpu_stop_cpu_callback,
351 .priority = 10,
352};
353
354static int __init cpu_stop_init(void)
355{
356 void *bcpu = (void *)(long)smp_processor_id();
357 unsigned int cpu;
358 int err;
359
360 for_each_possible_cpu(cpu) {
361 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
362
363 spin_lock_init(&stopper->lock);
364 INIT_LIST_HEAD(&stopper->works);
365 }
366
367 /* start one for the boot cpu */
368 err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
369 bcpu);
370 BUG_ON(err == NOTIFY_BAD);
371 cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
372 register_cpu_notifier(&cpu_stop_cpu_notifier);
373
374 return 0;
375}
376early_initcall(cpu_stop_init);
377
378#ifdef CONFIG_STOP_MACHINE
15 379
16/* This controls the threads on each CPU. */ 380/* This controls the threads on each CPU. */
17enum stopmachine_state { 381enum stopmachine_state {
@@ -26,174 +390,94 @@ enum stopmachine_state {
26 /* Exit */ 390 /* Exit */
27 STOPMACHINE_EXIT, 391 STOPMACHINE_EXIT,
28}; 392};
29static enum stopmachine_state state;
30 393
31struct stop_machine_data { 394struct stop_machine_data {
32 int (*fn)(void *); 395 int (*fn)(void *);
33 void *data; 396 void *data;
34 int fnret; 397 /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
398 unsigned int num_threads;
399 const struct cpumask *active_cpus;
400
401 enum stopmachine_state state;
402 atomic_t thread_ack;
35}; 403};
36 404
37/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ 405static void set_state(struct stop_machine_data *smdata,
38static unsigned int num_threads; 406 enum stopmachine_state newstate)
39static atomic_t thread_ack;
40static DEFINE_MUTEX(lock);
41/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
42static DEFINE_MUTEX(setup_lock);
43/* Users of stop_machine. */
44static int refcount;
45static struct workqueue_struct *stop_machine_wq;
46static struct stop_machine_data active, idle;
47static const struct cpumask *active_cpus;
48static void __percpu *stop_machine_work;
49
50static void set_state(enum stopmachine_state newstate)
51{ 407{
52 /* Reset ack counter. */ 408 /* Reset ack counter. */
53 atomic_set(&thread_ack, num_threads); 409 atomic_set(&smdata->thread_ack, smdata->num_threads);
54 smp_wmb(); 410 smp_wmb();
55 state = newstate; 411 smdata->state = newstate;
56} 412}
57 413
58/* Last one to ack a state moves to the next state. */ 414/* Last one to ack a state moves to the next state. */
59static void ack_state(void) 415static void ack_state(struct stop_machine_data *smdata)
60{ 416{
61 if (atomic_dec_and_test(&thread_ack)) 417 if (atomic_dec_and_test(&smdata->thread_ack))
62 set_state(state + 1); 418 set_state(smdata, smdata->state + 1);
63} 419}
64 420
65/* This is the actual function which stops the CPU. It runs 421/* This is the cpu_stop function which stops the CPU. */
66 * in the context of a dedicated stopmachine workqueue. */ 422static int stop_machine_cpu_stop(void *data)
67static void stop_cpu(struct work_struct *unused)
68{ 423{
424 struct stop_machine_data *smdata = data;
69 enum stopmachine_state curstate = STOPMACHINE_NONE; 425 enum stopmachine_state curstate = STOPMACHINE_NONE;
70 struct stop_machine_data *smdata = &idle; 426 int cpu = smp_processor_id(), err = 0;
71 int cpu = smp_processor_id(); 427 bool is_active;
72 int err; 428
429 if (!smdata->active_cpus)
430 is_active = cpu == cpumask_first(cpu_online_mask);
431 else
432 is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
73 433
74 if (!active_cpus) {
75 if (cpu == cpumask_first(cpu_online_mask))
76 smdata = &active;
77 } else {
78 if (cpumask_test_cpu(cpu, active_cpus))
79 smdata = &active;
80 }
81 /* Simple state machine */ 434 /* Simple state machine */
82 do { 435 do {
83 /* Chill out and ensure we re-read stopmachine_state. */ 436 /* Chill out and ensure we re-read stopmachine_state. */
84 cpu_relax(); 437 cpu_relax();
85 if (state != curstate) { 438 if (smdata->state != curstate) {
86 curstate = state; 439 curstate = smdata->state;
87 switch (curstate) { 440 switch (curstate) {
88 case STOPMACHINE_DISABLE_IRQ: 441 case STOPMACHINE_DISABLE_IRQ:
89 local_irq_disable(); 442 local_irq_disable();
90 hard_irq_disable(); 443 hard_irq_disable();
91 break; 444 break;
92 case STOPMACHINE_RUN: 445 case STOPMACHINE_RUN:
93 /* On multiple CPUs only a single error code 446 if (is_active)
94 * is needed to tell that something failed. */ 447 err = smdata->fn(smdata->data);
95 err = smdata->fn(smdata->data);
96 if (err)
97 smdata->fnret = err;
98 break; 448 break;
99 default: 449 default:
100 break; 450 break;
101 } 451 }
102 ack_state(); 452 ack_state(smdata);
103 } 453 }
104 } while (curstate != STOPMACHINE_EXIT); 454 } while (curstate != STOPMACHINE_EXIT);
105 455
106 local_irq_enable(); 456 local_irq_enable();
457 return err;
107} 458}
108 459
109/* Callback for CPUs which aren't supposed to do anything. */
110static int chill(void *unused)
111{
112 return 0;
113}
114
115int stop_machine_create(void)
116{
117 mutex_lock(&setup_lock);
118 if (refcount)
119 goto done;
120 stop_machine_wq = create_rt_workqueue("kstop");
121 if (!stop_machine_wq)
122 goto err_out;
123 stop_machine_work = alloc_percpu(struct work_struct);
124 if (!stop_machine_work)
125 goto err_out;
126done:
127 refcount++;
128 mutex_unlock(&setup_lock);
129 return 0;
130
131err_out:
132 if (stop_machine_wq)
133 destroy_workqueue(stop_machine_wq);
134 mutex_unlock(&setup_lock);
135 return -ENOMEM;
136}
137EXPORT_SYMBOL_GPL(stop_machine_create);
138
139void stop_machine_destroy(void)
140{
141 mutex_lock(&setup_lock);
142 refcount--;
143 if (refcount)
144 goto done;
145 destroy_workqueue(stop_machine_wq);
146 free_percpu(stop_machine_work);
147done:
148 mutex_unlock(&setup_lock);
149}
150EXPORT_SYMBOL_GPL(stop_machine_destroy);
151
152int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 460int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
153{ 461{
154 struct work_struct *sm_work; 462 struct stop_machine_data smdata = { .fn = fn, .data = data,
155 int i, ret; 463 .num_threads = num_online_cpus(),
156 464 .active_cpus = cpus };
157 /* Set up initial state. */ 465
158 mutex_lock(&lock); 466 /* Set the initial state and stop all online cpus. */
159 num_threads = num_online_cpus(); 467 set_state(&smdata, STOPMACHINE_PREPARE);
160 active_cpus = cpus; 468 return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
161 active.fn = fn;
162 active.data = data;
163 active.fnret = 0;
164 idle.fn = chill;
165 idle.data = NULL;
166
167 set_state(STOPMACHINE_PREPARE);
168
169 /* Schedule the stop_cpu work on all cpus: hold this CPU so one
170 * doesn't hit this CPU until we're ready. */
171 get_cpu();
172 for_each_online_cpu(i) {
173 sm_work = per_cpu_ptr(stop_machine_work, i);
174 INIT_WORK(sm_work, stop_cpu);
175 queue_work_on(i, stop_machine_wq, sm_work);
176 }
177 /* This will release the thread on our CPU. */
178 put_cpu();
179 flush_workqueue(stop_machine_wq);
180 ret = active.fnret;
181 mutex_unlock(&lock);
182 return ret;
183} 469}
184 470
185int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) 471int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
186{ 472{
187 int ret; 473 int ret;
188 474
189 ret = stop_machine_create();
190 if (ret)
191 return ret;
192 /* No CPUs can come up or down during this. */ 475 /* No CPUs can come up or down during this. */
193 get_online_cpus(); 476 get_online_cpus();
194 ret = __stop_machine(fn, data, cpus); 477 ret = __stop_machine(fn, data, cpus);
195 put_online_cpus(); 478 put_online_cpus();
196 stop_machine_destroy();
197 return ret; 479 return ret;
198} 480}
199EXPORT_SYMBOL_GPL(stop_machine); 481EXPORT_SYMBOL_GPL(stop_machine);
482
483#endif /* CONFIG_STOP_MACHINE */
diff --git a/kernel/sys.c b/kernel/sys.c
index 6d1a7e0f9d5b..7cb426a58965 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1118,7 +1118,7 @@ DECLARE_RWSEM(uts_sem);
1118 1118
1119#ifdef COMPAT_UTS_MACHINE 1119#ifdef COMPAT_UTS_MACHINE
1120#define override_architecture(name) \ 1120#define override_architecture(name) \
1121 (current->personality == PER_LINUX32 && \ 1121 (personality(current->personality) == PER_LINUX32 && \
1122 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1122 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1123 sizeof(COMPAT_UTS_MACHINE))) 1123 sizeof(COMPAT_UTS_MACHINE)))
1124#else 1124#else
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f992762d7f51..1d7b9bc1c034 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -150,14 +150,32 @@ static void tick_nohz_update_jiffies(ktime_t now)
150 touch_softlockup_watchdog(); 150 touch_softlockup_watchdog();
151} 151}
152 152
153/*
154 * Updates the per cpu time idle statistics counters
155 */
156static void
157update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time)
158{
159 ktime_t delta;
160
161 if (ts->idle_active) {
162 delta = ktime_sub(now, ts->idle_entrytime);
163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
164 if (nr_iowait_cpu() > 0)
165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
166 ts->idle_entrytime = now;
167 }
168
169 if (last_update_time)
170 *last_update_time = ktime_to_us(now);
171
172}
173
153static void tick_nohz_stop_idle(int cpu, ktime_t now) 174static void tick_nohz_stop_idle(int cpu, ktime_t now)
154{ 175{
155 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
156 ktime_t delta;
157 177
158 delta = ktime_sub(now, ts->idle_entrytime); 178 update_ts_time_stats(ts, now, NULL);
159 ts->idle_lastupdate = now;
160 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
161 ts->idle_active = 0; 179 ts->idle_active = 0;
162 180
163 sched_clock_idle_wakeup_event(0); 181 sched_clock_idle_wakeup_event(0);
@@ -165,20 +183,32 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
165 183
166static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 184static ktime_t tick_nohz_start_idle(struct tick_sched *ts)
167{ 185{
168 ktime_t now, delta; 186 ktime_t now;
169 187
170 now = ktime_get(); 188 now = ktime_get();
171 if (ts->idle_active) { 189
172 delta = ktime_sub(now, ts->idle_entrytime); 190 update_ts_time_stats(ts, now, NULL);
173 ts->idle_lastupdate = now; 191
174 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
175 }
176 ts->idle_entrytime = now; 192 ts->idle_entrytime = now;
177 ts->idle_active = 1; 193 ts->idle_active = 1;
178 sched_clock_idle_sleep_event(); 194 sched_clock_idle_sleep_event();
179 return now; 195 return now;
180} 196}
181 197
198/**
199 * get_cpu_idle_time_us - get the total idle time of a cpu
200 * @cpu: CPU number to query
201 * @last_update_time: variable to store update time in
202 *
203 * Return the cummulative idle time (since boot) for a given
204 * CPU, in microseconds. The idle time returned includes
205 * the iowait time (unlike what "top" and co report).
206 *
207 * This time is measured via accounting rather than sampling,
208 * and is as accurate as ktime_get() is.
209 *
210 * This function returns -1 if NOHZ is not enabled.
211 */
182u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 212u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
183{ 213{
184 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 214 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -186,15 +216,38 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
186 if (!tick_nohz_enabled) 216 if (!tick_nohz_enabled)
187 return -1; 217 return -1;
188 218
189 if (ts->idle_active) 219 update_ts_time_stats(ts, ktime_get(), last_update_time);
190 *last_update_time = ktime_to_us(ts->idle_lastupdate);
191 else
192 *last_update_time = ktime_to_us(ktime_get());
193 220
194 return ktime_to_us(ts->idle_sleeptime); 221 return ktime_to_us(ts->idle_sleeptime);
195} 222}
196EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 223EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
197 224
225/*
226 * get_cpu_iowait_time_us - get the total iowait time of a cpu
227 * @cpu: CPU number to query
228 * @last_update_time: variable to store update time in
229 *
230 * Return the cummulative iowait time (since boot) for a given
231 * CPU, in microseconds.
232 *
233 * This time is measured via accounting rather than sampling,
234 * and is as accurate as ktime_get() is.
235 *
236 * This function returns -1 if NOHZ is not enabled.
237 */
238u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
239{
240 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
241
242 if (!tick_nohz_enabled)
243 return -1;
244
245 update_ts_time_stats(ts, ktime_get(), last_update_time);
246
247 return ktime_to_us(ts->iowait_sleeptime);
248}
249EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
250
198/** 251/**
199 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 252 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
200 * 253 *
@@ -262,6 +315,9 @@ void tick_nohz_stop_sched_tick(int inidle)
262 goto end; 315 goto end;
263 } 316 }
264 317
318 if (nohz_ratelimit(cpu))
319 goto end;
320
265 ts->idle_calls++; 321 ts->idle_calls++;
266 /* Read jiffies and the time when jiffies were updated last */ 322 /* Read jiffies and the time when jiffies were updated last */
267 do { 323 do {
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 1a4a7dd78777..ab8f5e33fa92 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -176,6 +176,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
176 P_ns(idle_waketime); 176 P_ns(idle_waketime);
177 P_ns(idle_exittime); 177 P_ns(idle_exittime);
178 P_ns(idle_sleeptime); 178 P_ns(idle_sleeptime);
179 P_ns(iowait_sleeptime);
179 P(last_jiffies); 180 P(last_jiffies);
180 P(next_jiffies); 181 P(next_jiffies);
181 P_ns(idle_expires); 182 P_ns(idle_expires);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8c9c2934c45f..32837e19e3bd 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3234,8 +3234,7 @@ free:
3234} 3234}
3235 3235
3236static void 3236static void
3237ftrace_graph_probe_sched_switch(struct rq *__rq, struct task_struct *prev, 3237ftrace_graph_probe_sched_switch(struct task_struct *prev, struct task_struct *next)
3238 struct task_struct *next)
3239{ 3238{
3240 unsigned long long timestamp; 3239 unsigned long long timestamp;
3241 int index; 3240 int index;
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index 5fca0f51fde4..a55fccfede5d 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -50,8 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
50} 50}
51 51
52static void 52static void
53probe_sched_switch(struct rq *__rq, struct task_struct *prev, 53probe_sched_switch(struct task_struct *prev, struct task_struct *next)
54 struct task_struct *next)
55{ 54{
56 struct trace_array_cpu *data; 55 struct trace_array_cpu *data;
57 unsigned long flags; 56 unsigned long flags;
@@ -109,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
109} 108}
110 109
111static void 110static void
112probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) 111probe_sched_wakeup(struct task_struct *wakee, int success)
113{ 112{
114 struct trace_array_cpu *data; 113 struct trace_array_cpu *data;
115 unsigned long flags; 114 unsigned long flags;
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 0271742abb8d..8052446ceeaa 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -107,8 +107,7 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
107} 107}
108 108
109static void notrace 109static void notrace
110probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, 110probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next)
111 struct task_struct *next)
112{ 111{
113 struct trace_array_cpu *data; 112 struct trace_array_cpu *data;
114 cycle_t T0, T1, delta; 113 cycle_t T0, T1, delta;
@@ -200,7 +199,7 @@ static void wakeup_reset(struct trace_array *tr)
200} 199}
201 200
202static void 201static void
203probe_wakeup(struct rq *rq, struct task_struct *p, int success) 202probe_wakeup(struct task_struct *p, int success)
204{ 203{
205 struct trace_array_cpu *data; 204 struct trace_array_cpu *data;
206 int cpu = smp_processor_id(); 205 int cpu = smp_processor_id();
diff --git a/kernel/user.c b/kernel/user.c
index 766467b3bcb7..7e72614b736d 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,7 +16,6 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/user_namespace.h> 18#include <linux/user_namespace.h>
19#include "cred-internals.h"
20 19
21struct user_namespace init_user_ns = { 20struct user_namespace init_user_ns = {
22 .kref = { 21 .kref = {
@@ -137,9 +136,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
137 struct hlist_head *hashent = uidhashentry(ns, uid); 136 struct hlist_head *hashent = uidhashentry(ns, uid);
138 struct user_struct *up, *new; 137 struct user_struct *up, *new;
139 138
140 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
141 * atomic.
142 */
143 spin_lock_irq(&uidhash_lock); 139 spin_lock_irq(&uidhash_lock);
144 up = uid_hash_find(uid, hashent); 140 up = uid_hash_find(uid, hashent);
145 spin_unlock_irq(&uidhash_lock); 141 spin_unlock_irq(&uidhash_lock);
@@ -161,11 +157,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
161 spin_lock_irq(&uidhash_lock); 157 spin_lock_irq(&uidhash_lock);
162 up = uid_hash_find(uid, hashent); 158 up = uid_hash_find(uid, hashent);
163 if (up) { 159 if (up) {
164 /* This case is not possible when CONFIG_USER_SCHED
165 * is defined, since we serialize alloc_uid() using
166 * uids_mutex. Hence no need to call
167 * sched_destroy_user() or remove_user_sysfs_dir().
168 */
169 key_put(new->uid_keyring); 160 key_put(new->uid_keyring);
170 key_put(new->session_keyring); 161 key_put(new->session_keyring);
171 kmem_cache_free(uid_cachep, new); 162 kmem_cache_free(uid_cachep, new);
@@ -178,8 +169,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
178 169
179 return up; 170 return up;
180 171
181 put_user_ns(new->user_ns);
182 kmem_cache_free(uid_cachep, new);
183out_unlock: 172out_unlock:
184 return NULL; 173 return NULL;
185} 174}