aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/cpu.c18
-rw-r--r--kernel/cpuset.c67
-rw-r--r--kernel/cred-internals.h21
-rw-r--r--kernel/cred.c3
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/sched.c339
-rw-r--r--kernel/sched_debug.c108
-rw-r--r--kernel/sched_fair.c222
-rw-r--r--kernel/sched_features.h55
-rw-r--r--kernel/sched_idletask.c5
-rw-r--r--kernel/sched_rt.c15
-rw-r--r--kernel/time/tick-sched.c3
-rw-r--r--kernel/user.c12
14 files changed, 300 insertions, 570 deletions
diff --git a/kernel/capability.c b/kernel/capability.c
index 9e4697e9b276..2f05303715a5 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -15,7 +15,6 @@
15#include <linux/syscalls.h> 15#include <linux/syscalls.h>
16#include <linux/pid_namespace.h> 16#include <linux/pid_namespace.h>
17#include <asm/uaccess.h> 17#include <asm/uaccess.h>
18#include "cred-internals.h"
19 18
20/* 19/*
21 * Leveraged for setting/resetting capabilities 20 * Leveraged for setting/resetting capabilities
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 25bba73b1be3..914aedcde849 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -164,6 +164,7 @@ static inline void check_for_tasks(int cpu)
164} 164}
165 165
166struct take_cpu_down_param { 166struct take_cpu_down_param {
167 struct task_struct *caller;
167 unsigned long mod; 168 unsigned long mod;
168 void *hcpu; 169 void *hcpu;
169}; 170};
@@ -172,6 +173,7 @@ struct take_cpu_down_param {
172static int __ref take_cpu_down(void *_param) 173static int __ref take_cpu_down(void *_param)
173{ 174{
174 struct take_cpu_down_param *param = _param; 175 struct take_cpu_down_param *param = _param;
176 unsigned int cpu = (unsigned long)param->hcpu;
175 int err; 177 int err;
176 178
177 /* Ensure this CPU doesn't handle any more interrupts. */ 179 /* Ensure this CPU doesn't handle any more interrupts. */
@@ -182,6 +184,8 @@ static int __ref take_cpu_down(void *_param)
182 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, 184 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
183 param->hcpu); 185 param->hcpu);
184 186
187 if (task_cpu(param->caller) == cpu)
188 move_task_off_dead_cpu(cpu, param->caller);
185 /* Force idle task to run as soon as we yield: it should 189 /* Force idle task to run as soon as we yield: it should
186 immediately notice cpu is offline and die quickly. */ 190 immediately notice cpu is offline and die quickly. */
187 sched_idle_next(); 191 sched_idle_next();
@@ -192,10 +196,10 @@ static int __ref take_cpu_down(void *_param)
192static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 196static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
193{ 197{
194 int err, nr_calls = 0; 198 int err, nr_calls = 0;
195 cpumask_var_t old_allowed;
196 void *hcpu = (void *)(long)cpu; 199 void *hcpu = (void *)(long)cpu;
197 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 200 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
198 struct take_cpu_down_param tcd_param = { 201 struct take_cpu_down_param tcd_param = {
202 .caller = current,
199 .mod = mod, 203 .mod = mod,
200 .hcpu = hcpu, 204 .hcpu = hcpu,
201 }; 205 };
@@ -206,9 +210,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
206 if (!cpu_online(cpu)) 210 if (!cpu_online(cpu))
207 return -EINVAL; 211 return -EINVAL;
208 212
209 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
210 return -ENOMEM;
211
212 cpu_hotplug_begin(); 213 cpu_hotplug_begin();
213 set_cpu_active(cpu, false); 214 set_cpu_active(cpu, false);
214 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
@@ -225,10 +226,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
225 goto out_release; 226 goto out_release;
226 } 227 }
227 228
228 /* Ensure that we are not runnable on dying cpu */
229 cpumask_copy(old_allowed, &current->cpus_allowed);
230 set_cpus_allowed_ptr(current, cpu_active_mask);
231
232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 229 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
233 if (err) { 230 if (err) {
234 set_cpu_active(cpu, true); 231 set_cpu_active(cpu, true);
@@ -237,7 +234,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
237 hcpu) == NOTIFY_BAD) 234 hcpu) == NOTIFY_BAD)
238 BUG(); 235 BUG();
239 236
240 goto out_allowed; 237 goto out_release;
241 } 238 }
242 BUG_ON(cpu_online(cpu)); 239 BUG_ON(cpu_online(cpu));
243 240
@@ -255,8 +252,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
255 252
256 check_for_tasks(cpu); 253 check_for_tasks(cpu);
257 254
258out_allowed:
259 set_cpus_allowed_ptr(current, old_allowed);
260out_release: 255out_release:
261 cpu_hotplug_done(); 256 cpu_hotplug_done();
262 if (!err) { 257 if (!err) {
@@ -264,7 +259,6 @@ out_release:
264 hcpu) == NOTIFY_BAD) 259 hcpu) == NOTIFY_BAD)
265 BUG(); 260 BUG();
266 } 261 }
267 free_cpumask_var(old_allowed);
268 return err; 262 return err;
269} 263}
270 264
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index d10946748ec2..9a50c5f6e727 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2182,19 +2182,52 @@ void __init cpuset_init_smp(void)
2182void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 2182void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2183{ 2183{
2184 mutex_lock(&callback_mutex); 2184 mutex_lock(&callback_mutex);
2185 cpuset_cpus_allowed_locked(tsk, pmask); 2185 task_lock(tsk);
2186 guarantee_online_cpus(task_cs(tsk), pmask);
2187 task_unlock(tsk);
2186 mutex_unlock(&callback_mutex); 2188 mutex_unlock(&callback_mutex);
2187} 2189}
2188 2190
2189/** 2191int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2190 * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset.
2191 * Must be called with callback_mutex held.
2192 **/
2193void cpuset_cpus_allowed_locked(struct task_struct *tsk, struct cpumask *pmask)
2194{ 2192{
2195 task_lock(tsk); 2193 const struct cpuset *cs;
2196 guarantee_online_cpus(task_cs(tsk), pmask); 2194 int cpu;
2197 task_unlock(tsk); 2195
2196 rcu_read_lock();
2197 cs = task_cs(tsk);
2198 if (cs)
2199 cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
2200 rcu_read_unlock();
2201
2202 /*
2203 * We own tsk->cpus_allowed, nobody can change it under us.
2204 *
2205 * But we used cs && cs->cpus_allowed lockless and thus can
2206 * race with cgroup_attach_task() or update_cpumask() and get
2207 * the wrong tsk->cpus_allowed. However, both cases imply the
2208 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
2209 * which takes task_rq_lock().
2210 *
2211 * If we are called after it dropped the lock we must see all
2212 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
2213 * set any mask even if it is not right from task_cs() pov,
2214 * the pending set_cpus_allowed_ptr() will fix things.
2215 */
2216
2217 cpu = cpumask_any_and(&tsk->cpus_allowed, cpu_active_mask);
2218 if (cpu >= nr_cpu_ids) {
2219 /*
2220 * Either tsk->cpus_allowed is wrong (see above) or it
2221 * is actually empty. The latter case is only possible
2222 * if we are racing with remove_tasks_in_empty_cpuset().
2223 * Like above we can temporary set any mask and rely on
2224 * set_cpus_allowed_ptr() as synchronization point.
2225 */
2226 cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
2227 cpu = cpumask_any(cpu_active_mask);
2228 }
2229
2230 return cpu;
2198} 2231}
2199 2232
2200void cpuset_init_current_mems_allowed(void) 2233void cpuset_init_current_mems_allowed(void)
@@ -2383,22 +2416,6 @@ int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2383} 2416}
2384 2417
2385/** 2418/**
2386 * cpuset_lock - lock out any changes to cpuset structures
2387 *
2388 * The out of memory (oom) code needs to mutex_lock cpusets
2389 * from being changed while it scans the tasklist looking for a
2390 * task in an overlapping cpuset. Expose callback_mutex via this
2391 * cpuset_lock() routine, so the oom code can lock it, before
2392 * locking the task list. The tasklist_lock is a spinlock, so
2393 * must be taken inside callback_mutex.
2394 */
2395
2396void cpuset_lock(void)
2397{
2398 mutex_lock(&callback_mutex);
2399}
2400
2401/**
2402 * cpuset_unlock - release lock on cpuset changes 2419 * cpuset_unlock - release lock on cpuset changes
2403 * 2420 *
2404 * Undo the lock taken in a previous cpuset_lock() call. 2421 * Undo the lock taken in a previous cpuset_lock() call.
diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h
deleted file mode 100644
index 2dc4fc2d0bf1..000000000000
--- a/kernel/cred-internals.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/* Internal credentials stuff
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12/*
13 * user.c
14 */
15static inline void sched_switch_user(struct task_struct *p)
16{
17#ifdef CONFIG_USER_SCHED
18 sched_move_task(p);
19#endif /* CONFIG_USER_SCHED */
20}
21
diff --git a/kernel/cred.c b/kernel/cred.c
index e1dbe9eef800..4f483be5944c 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -17,7 +17,6 @@
17#include <linux/init_task.h> 17#include <linux/init_task.h>
18#include <linux/security.h> 18#include <linux/security.h>
19#include <linux/cn_proc.h> 19#include <linux/cn_proc.h>
20#include "cred-internals.h"
21 20
22#if 0 21#if 0
23#define kdebug(FMT, ...) \ 22#define kdebug(FMT, ...) \
@@ -558,8 +557,6 @@ int commit_creds(struct cred *new)
558 atomic_dec(&old->user->processes); 557 atomic_dec(&old->user->processes);
559 alter_cred_subscribers(old, -2); 558 alter_cred_subscribers(old, -2);
560 559
561 sched_switch_user(task);
562
563 /* send notifications */ 560 /* send notifications */
564 if (new->uid != old->uid || 561 if (new->uid != old->uid ||
565 new->euid != old->euid || 562 new->euid != old->euid ||
diff --git a/kernel/exit.c b/kernel/exit.c
index 7f2683a10ac4..eabca5a73a85 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -55,7 +55,6 @@
55#include <asm/unistd.h> 55#include <asm/unistd.h>
56#include <asm/pgtable.h> 56#include <asm/pgtable.h>
57#include <asm/mmu_context.h> 57#include <asm/mmu_context.h>
58#include "cred-internals.h"
59 58
60static void exit_mm(struct task_struct * tsk); 59static void exit_mm(struct task_struct * tsk);
61 60
diff --git a/kernel/sched.c b/kernel/sched.c
index 6af210a7de70..ab562ae4007c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -493,8 +493,11 @@ struct rq {
493 #define CPU_LOAD_IDX_MAX 5 493 #define CPU_LOAD_IDX_MAX 5
494 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 494 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
495#ifdef CONFIG_NO_HZ 495#ifdef CONFIG_NO_HZ
496 u64 nohz_stamp;
496 unsigned char in_nohz_recently; 497 unsigned char in_nohz_recently;
497#endif 498#endif
499 unsigned int skip_clock_update;
500
498 /* capture load from *all* tasks on this cpu: */ 501 /* capture load from *all* tasks on this cpu: */
499 struct load_weight load; 502 struct load_weight load;
500 unsigned long nr_load_updates; 503 unsigned long nr_load_updates;
@@ -592,6 +595,13 @@ static inline
592void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 595void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
593{ 596{
594 rq->curr->sched_class->check_preempt_curr(rq, p, flags); 597 rq->curr->sched_class->check_preempt_curr(rq, p, flags);
598
599 /*
600 * A queue event has occurred, and we're going to schedule. In
601 * this case, we can save a useless back to back clock update.
602 */
603 if (test_tsk_need_resched(p))
604 rq->skip_clock_update = 1;
595} 605}
596 606
597static inline int cpu_of(struct rq *rq) 607static inline int cpu_of(struct rq *rq)
@@ -626,7 +636,8 @@ static inline int cpu_of(struct rq *rq)
626 636
627inline void update_rq_clock(struct rq *rq) 637inline void update_rq_clock(struct rq *rq)
628{ 638{
629 rq->clock = sched_clock_cpu(cpu_of(rq)); 639 if (!rq->skip_clock_update)
640 rq->clock = sched_clock_cpu(cpu_of(rq));
630} 641}
631 642
632/* 643/*
@@ -904,16 +915,12 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
904#endif /* __ARCH_WANT_UNLOCKED_CTXSW */ 915#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
905 916
906/* 917/*
907 * Check whether the task is waking, we use this to synchronize against 918 * Check whether the task is waking, we use this to synchronize ->cpus_allowed
908 * ttwu() so that task_cpu() reports a stable number. 919 * against ttwu().
909 *
910 * We need to make an exception for PF_STARTING tasks because the fork
911 * path might require task_rq_lock() to work, eg. it can call
912 * set_cpus_allowed_ptr() from the cpuset clone_ns code.
913 */ 920 */
914static inline int task_is_waking(struct task_struct *p) 921static inline int task_is_waking(struct task_struct *p)
915{ 922{
916 return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING)); 923 return unlikely(p->state == TASK_WAKING);
917} 924}
918 925
919/* 926/*
@@ -926,11 +933,9 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
926 struct rq *rq; 933 struct rq *rq;
927 934
928 for (;;) { 935 for (;;) {
929 while (task_is_waking(p))
930 cpu_relax();
931 rq = task_rq(p); 936 rq = task_rq(p);
932 raw_spin_lock(&rq->lock); 937 raw_spin_lock(&rq->lock);
933 if (likely(rq == task_rq(p) && !task_is_waking(p))) 938 if (likely(rq == task_rq(p)))
934 return rq; 939 return rq;
935 raw_spin_unlock(&rq->lock); 940 raw_spin_unlock(&rq->lock);
936 } 941 }
@@ -947,12 +952,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
947 struct rq *rq; 952 struct rq *rq;
948 953
949 for (;;) { 954 for (;;) {
950 while (task_is_waking(p))
951 cpu_relax();
952 local_irq_save(*flags); 955 local_irq_save(*flags);
953 rq = task_rq(p); 956 rq = task_rq(p);
954 raw_spin_lock(&rq->lock); 957 raw_spin_lock(&rq->lock);
955 if (likely(rq == task_rq(p) && !task_is_waking(p))) 958 if (likely(rq == task_rq(p)))
956 return rq; 959 return rq;
957 raw_spin_unlock_irqrestore(&rq->lock, *flags); 960 raw_spin_unlock_irqrestore(&rq->lock, *flags);
958 } 961 }
@@ -1229,6 +1232,17 @@ void wake_up_idle_cpu(int cpu)
1229 if (!tsk_is_polling(rq->idle)) 1232 if (!tsk_is_polling(rq->idle))
1230 smp_send_reschedule(cpu); 1233 smp_send_reschedule(cpu);
1231} 1234}
1235
1236int nohz_ratelimit(int cpu)
1237{
1238 struct rq *rq = cpu_rq(cpu);
1239 u64 diff = rq->clock - rq->nohz_stamp;
1240
1241 rq->nohz_stamp = rq->clock;
1242
1243 return diff < (NSEC_PER_SEC / HZ) >> 1;
1244}
1245
1232#endif /* CONFIG_NO_HZ */ 1246#endif /* CONFIG_NO_HZ */
1233 1247
1234static u64 sched_avg_period(void) 1248static u64 sched_avg_period(void)
@@ -1771,8 +1785,6 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1771 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); 1785 raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
1772 } 1786 }
1773 } 1787 }
1774 update_rq_clock(rq1);
1775 update_rq_clock(rq2);
1776} 1788}
1777 1789
1778/* 1790/*
@@ -1866,56 +1878,43 @@ static void update_avg(u64 *avg, u64 sample)
1866 *avg += diff >> 3; 1878 *avg += diff >> 3;
1867} 1879}
1868 1880
1869static void 1881static void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
1870enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1871{ 1882{
1872 if (wakeup) 1883 update_rq_clock(rq);
1873 p->se.start_runtime = p->se.sum_exec_runtime;
1874
1875 sched_info_queued(p); 1884 sched_info_queued(p);
1876 p->sched_class->enqueue_task(rq, p, wakeup, head); 1885 p->sched_class->enqueue_task(rq, p, flags);
1877 p->se.on_rq = 1; 1886 p->se.on_rq = 1;
1878} 1887}
1879 1888
1880static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1889static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
1881{ 1890{
1882 if (sleep) { 1891 update_rq_clock(rq);
1883 if (p->se.last_wakeup) {
1884 update_avg(&p->se.avg_overlap,
1885 p->se.sum_exec_runtime - p->se.last_wakeup);
1886 p->se.last_wakeup = 0;
1887 } else {
1888 update_avg(&p->se.avg_wakeup,
1889 sysctl_sched_wakeup_granularity);
1890 }
1891 }
1892
1893 sched_info_dequeued(p); 1892 sched_info_dequeued(p);
1894 p->sched_class->dequeue_task(rq, p, sleep); 1893 p->sched_class->dequeue_task(rq, p, flags);
1895 p->se.on_rq = 0; 1894 p->se.on_rq = 0;
1896} 1895}
1897 1896
1898/* 1897/*
1899 * activate_task - move a task to the runqueue. 1898 * activate_task - move a task to the runqueue.
1900 */ 1899 */
1901static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) 1900static void activate_task(struct rq *rq, struct task_struct *p, int flags)
1902{ 1901{
1903 if (task_contributes_to_load(p)) 1902 if (task_contributes_to_load(p))
1904 rq->nr_uninterruptible--; 1903 rq->nr_uninterruptible--;
1905 1904
1906 enqueue_task(rq, p, wakeup, false); 1905 enqueue_task(rq, p, flags);
1907 inc_nr_running(rq); 1906 inc_nr_running(rq);
1908} 1907}
1909 1908
1910/* 1909/*
1911 * deactivate_task - remove a task from the runqueue. 1910 * deactivate_task - remove a task from the runqueue.
1912 */ 1911 */
1913static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep) 1912static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1914{ 1913{
1915 if (task_contributes_to_load(p)) 1914 if (task_contributes_to_load(p))
1916 rq->nr_uninterruptible++; 1915 rq->nr_uninterruptible++;
1917 1916
1918 dequeue_task(rq, p, sleep); 1917 dequeue_task(rq, p, flags);
1919 dec_nr_running(rq); 1918 dec_nr_running(rq);
1920} 1919}
1921 1920
@@ -2273,6 +2272,9 @@ void task_oncpu_function_call(struct task_struct *p,
2273} 2272}
2274 2273
2275#ifdef CONFIG_SMP 2274#ifdef CONFIG_SMP
2275/*
2276 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
2277 */
2276static int select_fallback_rq(int cpu, struct task_struct *p) 2278static int select_fallback_rq(int cpu, struct task_struct *p)
2277{ 2279{
2278 int dest_cpu; 2280 int dest_cpu;
@@ -2289,12 +2291,8 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2289 return dest_cpu; 2291 return dest_cpu;
2290 2292
2291 /* No more Mr. Nice Guy. */ 2293 /* No more Mr. Nice Guy. */
2292 if (dest_cpu >= nr_cpu_ids) { 2294 if (unlikely(dest_cpu >= nr_cpu_ids)) {
2293 rcu_read_lock(); 2295 dest_cpu = cpuset_cpus_allowed_fallback(p);
2294 cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
2295 rcu_read_unlock();
2296 dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
2297
2298 /* 2296 /*
2299 * Don't tell them about moving exiting tasks or 2297 * Don't tell them about moving exiting tasks or
2300 * kernel threads (both mm NULL), since they never 2298 * kernel threads (both mm NULL), since they never
@@ -2311,17 +2309,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
2311} 2309}
2312 2310
2313/* 2311/*
2314 * Gets called from 3 sites (exec, fork, wakeup), since it is called without 2312 * The caller (fork, wakeup) owns TASK_WAKING, ->cpus_allowed is stable.
2315 * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
2316 * by:
2317 *
2318 * exec: is unstable, retry loop
2319 * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
2320 */ 2313 */
2321static inline 2314static inline
2322int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) 2315int select_task_rq(struct rq *rq, struct task_struct *p, int sd_flags, int wake_flags)
2323{ 2316{
2324 int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags); 2317 int cpu = p->sched_class->select_task_rq(rq, p, sd_flags, wake_flags);
2325 2318
2326 /* 2319 /*
2327 * In order not to call set_task_cpu() on a blocking task we need 2320 * In order not to call set_task_cpu() on a blocking task we need
@@ -2360,16 +2353,13 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2360{ 2353{
2361 int cpu, orig_cpu, this_cpu, success = 0; 2354 int cpu, orig_cpu, this_cpu, success = 0;
2362 unsigned long flags; 2355 unsigned long flags;
2356 unsigned long en_flags = ENQUEUE_WAKEUP;
2363 struct rq *rq; 2357 struct rq *rq;
2364 2358
2365 if (!sched_feat(SYNC_WAKEUPS))
2366 wake_flags &= ~WF_SYNC;
2367
2368 this_cpu = get_cpu(); 2359 this_cpu = get_cpu();
2369 2360
2370 smp_wmb(); 2361 smp_wmb();
2371 rq = task_rq_lock(p, &flags); 2362 rq = task_rq_lock(p, &flags);
2372 update_rq_clock(rq);
2373 if (!(p->state & state)) 2363 if (!(p->state & state))
2374 goto out; 2364 goto out;
2375 2365
@@ -2389,28 +2379,26 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2389 * 2379 *
2390 * First fix up the nr_uninterruptible count: 2380 * First fix up the nr_uninterruptible count:
2391 */ 2381 */
2392 if (task_contributes_to_load(p)) 2382 if (task_contributes_to_load(p)) {
2393 rq->nr_uninterruptible--; 2383 if (likely(cpu_online(orig_cpu)))
2384 rq->nr_uninterruptible--;
2385 else
2386 this_rq()->nr_uninterruptible--;
2387 }
2394 p->state = TASK_WAKING; 2388 p->state = TASK_WAKING;
2395 2389
2396 if (p->sched_class->task_waking) 2390 if (p->sched_class->task_waking) {
2397 p->sched_class->task_waking(rq, p); 2391 p->sched_class->task_waking(rq, p);
2392 en_flags |= ENQUEUE_WAKING;
2393 }
2398 2394
2399 __task_rq_unlock(rq); 2395 cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags);
2400 2396 if (cpu != orig_cpu)
2401 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2402 if (cpu != orig_cpu) {
2403 /*
2404 * Since we migrate the task without holding any rq->lock,
2405 * we need to be careful with task_rq_lock(), since that
2406 * might end up locking an invalid rq.
2407 */
2408 set_task_cpu(p, cpu); 2397 set_task_cpu(p, cpu);
2409 } 2398 __task_rq_unlock(rq);
2410 2399
2411 rq = cpu_rq(cpu); 2400 rq = cpu_rq(cpu);
2412 raw_spin_lock(&rq->lock); 2401 raw_spin_lock(&rq->lock);
2413 update_rq_clock(rq);
2414 2402
2415 /* 2403 /*
2416 * We migrated the task without holding either rq->lock, however 2404 * We migrated the task without holding either rq->lock, however
@@ -2438,34 +2426,18 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2438 2426
2439out_activate: 2427out_activate:
2440#endif /* CONFIG_SMP */ 2428#endif /* CONFIG_SMP */
2441 schedstat_inc(p, se.nr_wakeups); 2429 schedstat_inc(p, se.statistics.nr_wakeups);
2442 if (wake_flags & WF_SYNC) 2430 if (wake_flags & WF_SYNC)
2443 schedstat_inc(p, se.nr_wakeups_sync); 2431 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2444 if (orig_cpu != cpu) 2432 if (orig_cpu != cpu)
2445 schedstat_inc(p, se.nr_wakeups_migrate); 2433 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2446 if (cpu == this_cpu) 2434 if (cpu == this_cpu)
2447 schedstat_inc(p, se.nr_wakeups_local); 2435 schedstat_inc(p, se.statistics.nr_wakeups_local);
2448 else 2436 else
2449 schedstat_inc(p, se.nr_wakeups_remote); 2437 schedstat_inc(p, se.statistics.nr_wakeups_remote);
2450 activate_task(rq, p, 1); 2438 activate_task(rq, p, en_flags);
2451 success = 1; 2439 success = 1;
2452 2440
2453 /*
2454 * Only attribute actual wakeups done by this task.
2455 */
2456 if (!in_interrupt()) {
2457 struct sched_entity *se = &current->se;
2458 u64 sample = se->sum_exec_runtime;
2459
2460 if (se->last_wakeup)
2461 sample -= se->last_wakeup;
2462 else
2463 sample -= se->start_runtime;
2464 update_avg(&se->avg_wakeup, sample);
2465
2466 se->last_wakeup = se->sum_exec_runtime;
2467 }
2468
2469out_running: 2441out_running:
2470 trace_sched_wakeup(rq, p, success); 2442 trace_sched_wakeup(rq, p, success);
2471 check_preempt_curr(rq, p, wake_flags); 2443 check_preempt_curr(rq, p, wake_flags);
@@ -2527,42 +2499,9 @@ static void __sched_fork(struct task_struct *p)
2527 p->se.sum_exec_runtime = 0; 2499 p->se.sum_exec_runtime = 0;
2528 p->se.prev_sum_exec_runtime = 0; 2500 p->se.prev_sum_exec_runtime = 0;
2529 p->se.nr_migrations = 0; 2501 p->se.nr_migrations = 0;
2530 p->se.last_wakeup = 0;
2531 p->se.avg_overlap = 0;
2532 p->se.start_runtime = 0;
2533 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2534 2502
2535#ifdef CONFIG_SCHEDSTATS 2503#ifdef CONFIG_SCHEDSTATS
2536 p->se.wait_start = 0; 2504 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
2537 p->se.wait_max = 0;
2538 p->se.wait_count = 0;
2539 p->se.wait_sum = 0;
2540
2541 p->se.sleep_start = 0;
2542 p->se.sleep_max = 0;
2543 p->se.sum_sleep_runtime = 0;
2544
2545 p->se.block_start = 0;
2546 p->se.block_max = 0;
2547 p->se.exec_max = 0;
2548 p->se.slice_max = 0;
2549
2550 p->se.nr_migrations_cold = 0;
2551 p->se.nr_failed_migrations_affine = 0;
2552 p->se.nr_failed_migrations_running = 0;
2553 p->se.nr_failed_migrations_hot = 0;
2554 p->se.nr_forced_migrations = 0;
2555
2556 p->se.nr_wakeups = 0;
2557 p->se.nr_wakeups_sync = 0;
2558 p->se.nr_wakeups_migrate = 0;
2559 p->se.nr_wakeups_local = 0;
2560 p->se.nr_wakeups_remote = 0;
2561 p->se.nr_wakeups_affine = 0;
2562 p->se.nr_wakeups_affine_attempts = 0;
2563 p->se.nr_wakeups_passive = 0;
2564 p->se.nr_wakeups_idle = 0;
2565
2566#endif 2505#endif
2567 2506
2568 INIT_LIST_HEAD(&p->rt.run_list); 2507 INIT_LIST_HEAD(&p->rt.run_list);
@@ -2583,11 +2522,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
2583 2522
2584 __sched_fork(p); 2523 __sched_fork(p);
2585 /* 2524 /*
2586 * We mark the process as waking here. This guarantees that 2525 * We mark the process as running here. This guarantees that
2587 * nobody will actually run it, and a signal or other external 2526 * nobody will actually run it, and a signal or other external
2588 * event cannot wake it up and insert it on the runqueue either. 2527 * event cannot wake it up and insert it on the runqueue either.
2589 */ 2528 */
2590 p->state = TASK_WAKING; 2529 p->state = TASK_RUNNING;
2591 2530
2592 /* 2531 /*
2593 * Revert to default priority/policy on fork if requested. 2532 * Revert to default priority/policy on fork if requested.
@@ -2654,29 +2593,25 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2654 int cpu __maybe_unused = get_cpu(); 2593 int cpu __maybe_unused = get_cpu();
2655 2594
2656#ifdef CONFIG_SMP 2595#ifdef CONFIG_SMP
2596 rq = task_rq_lock(p, &flags);
2597 p->state = TASK_WAKING;
2598
2657 /* 2599 /*
2658 * Fork balancing, do it here and not earlier because: 2600 * Fork balancing, do it here and not earlier because:
2659 * - cpus_allowed can change in the fork path 2601 * - cpus_allowed can change in the fork path
2660 * - any previously selected cpu might disappear through hotplug 2602 * - any previously selected cpu might disappear through hotplug
2661 * 2603 *
2662 * We still have TASK_WAKING but PF_STARTING is gone now, meaning 2604 * We set TASK_WAKING so that select_task_rq() can drop rq->lock
2663 * ->cpus_allowed is stable, we have preemption disabled, meaning 2605 * without people poking at ->cpus_allowed.
2664 * cpu_online_mask is stable.
2665 */ 2606 */
2666 cpu = select_task_rq(p, SD_BALANCE_FORK, 0); 2607 cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0);
2667 set_task_cpu(p, cpu); 2608 set_task_cpu(p, cpu);
2668#endif
2669
2670 /*
2671 * Since the task is not on the rq and we still have TASK_WAKING set
2672 * nobody else will migrate this task.
2673 */
2674 rq = cpu_rq(cpu);
2675 raw_spin_lock_irqsave(&rq->lock, flags);
2676 2609
2677 BUG_ON(p->state != TASK_WAKING);
2678 p->state = TASK_RUNNING; 2610 p->state = TASK_RUNNING;
2679 update_rq_clock(rq); 2611 task_rq_unlock(rq, &flags);
2612#endif
2613
2614 rq = task_rq_lock(p, &flags);
2680 activate_task(rq, p, 0); 2615 activate_task(rq, p, 0);
2681 trace_sched_wakeup_new(rq, p, 1); 2616 trace_sched_wakeup_new(rq, p, 1);
2682 check_preempt_curr(rq, p, WF_FORK); 2617 check_preempt_curr(rq, p, WF_FORK);
@@ -3122,32 +3057,21 @@ void sched_exec(void)
3122{ 3057{
3123 struct task_struct *p = current; 3058 struct task_struct *p = current;
3124 struct migration_req req; 3059 struct migration_req req;
3125 int dest_cpu, this_cpu;
3126 unsigned long flags; 3060 unsigned long flags;
3127 struct rq *rq; 3061 struct rq *rq;
3128 3062 int dest_cpu;
3129again:
3130 this_cpu = get_cpu();
3131 dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
3132 if (dest_cpu == this_cpu) {
3133 put_cpu();
3134 return;
3135 }
3136 3063
3137 rq = task_rq_lock(p, &flags); 3064 rq = task_rq_lock(p, &flags);
3138 put_cpu(); 3065 dest_cpu = p->sched_class->select_task_rq(rq, p, SD_BALANCE_EXEC, 0);
3066 if (dest_cpu == smp_processor_id())
3067 goto unlock;
3139 3068
3140 /* 3069 /*
3141 * select_task_rq() can race against ->cpus_allowed 3070 * select_task_rq() can race against ->cpus_allowed
3142 */ 3071 */
3143 if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed) 3072 if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed) &&
3144 || unlikely(!cpu_active(dest_cpu))) { 3073 likely(cpu_active(dest_cpu)) &&
3145 task_rq_unlock(rq, &flags); 3074 migrate_task(p, dest_cpu, &req)) {
3146 goto again;
3147 }
3148
3149 /* force the process onto the specified CPU */
3150 if (migrate_task(p, dest_cpu, &req)) {
3151 /* Need to wait for migration thread (might exit: take ref). */ 3075 /* Need to wait for migration thread (might exit: take ref). */
3152 struct task_struct *mt = rq->migration_thread; 3076 struct task_struct *mt = rq->migration_thread;
3153 3077
@@ -3159,6 +3083,7 @@ again:
3159 3083
3160 return; 3084 return;
3161 } 3085 }
3086unlock:
3162 task_rq_unlock(rq, &flags); 3087 task_rq_unlock(rq, &flags);
3163} 3088}
3164 3089
@@ -3630,23 +3555,9 @@ static inline void schedule_debug(struct task_struct *prev)
3630 3555
3631static void put_prev_task(struct rq *rq, struct task_struct *prev) 3556static void put_prev_task(struct rq *rq, struct task_struct *prev)
3632{ 3557{
3633 if (prev->state == TASK_RUNNING) { 3558 if (prev->se.on_rq)
3634 u64 runtime = prev->se.sum_exec_runtime; 3559 update_rq_clock(rq);
3635 3560 rq->skip_clock_update = 0;
3636 runtime -= prev->se.prev_sum_exec_runtime;
3637 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
3638
3639 /*
3640 * In order to avoid avg_overlap growing stale when we are
3641 * indeed overlapping and hence not getting put to sleep, grow
3642 * the avg_overlap on preemption.
3643 *
3644 * We use the average preemption runtime because that
3645 * correlates to the amount of cache footprint a task can
3646 * build up.
3647 */
3648 update_avg(&prev->se.avg_overlap, runtime);
3649 }
3650 prev->sched_class->put_prev_task(rq, prev); 3561 prev->sched_class->put_prev_task(rq, prev);
3651} 3562}
3652 3563
@@ -3709,14 +3620,13 @@ need_resched_nonpreemptible:
3709 hrtick_clear(rq); 3620 hrtick_clear(rq);
3710 3621
3711 raw_spin_lock_irq(&rq->lock); 3622 raw_spin_lock_irq(&rq->lock);
3712 update_rq_clock(rq);
3713 clear_tsk_need_resched(prev); 3623 clear_tsk_need_resched(prev);
3714 3624
3715 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3625 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3716 if (unlikely(signal_pending_state(prev->state, prev))) 3626 if (unlikely(signal_pending_state(prev->state, prev)))
3717 prev->state = TASK_RUNNING; 3627 prev->state = TASK_RUNNING;
3718 else 3628 else
3719 deactivate_task(rq, prev, 1); 3629 deactivate_task(rq, prev, DEQUEUE_SLEEP);
3720 switch_count = &prev->nvcsw; 3630 switch_count = &prev->nvcsw;
3721 } 3631 }
3722 3632
@@ -4266,7 +4176,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4266 BUG_ON(prio < 0 || prio > MAX_PRIO); 4176 BUG_ON(prio < 0 || prio > MAX_PRIO);
4267 4177
4268 rq = task_rq_lock(p, &flags); 4178 rq = task_rq_lock(p, &flags);
4269 update_rq_clock(rq);
4270 4179
4271 oldprio = p->prio; 4180 oldprio = p->prio;
4272 prev_class = p->sched_class; 4181 prev_class = p->sched_class;
@@ -4287,7 +4196,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4287 if (running) 4196 if (running)
4288 p->sched_class->set_curr_task(rq); 4197 p->sched_class->set_curr_task(rq);
4289 if (on_rq) { 4198 if (on_rq) {
4290 enqueue_task(rq, p, 0, oldprio < prio); 4199 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4291 4200
4292 check_class_changed(rq, p, prev_class, oldprio, running); 4201 check_class_changed(rq, p, prev_class, oldprio, running);
4293 } 4202 }
@@ -4309,7 +4218,6 @@ void set_user_nice(struct task_struct *p, long nice)
4309 * the task might be in the middle of scheduling on another CPU. 4218 * the task might be in the middle of scheduling on another CPU.
4310 */ 4219 */
4311 rq = task_rq_lock(p, &flags); 4220 rq = task_rq_lock(p, &flags);
4312 update_rq_clock(rq);
4313 /* 4221 /*
4314 * The RT priorities are set via sched_setscheduler(), but we still 4222 * The RT priorities are set via sched_setscheduler(), but we still
4315 * allow the 'normal' nice value to be set - but as expected 4223 * allow the 'normal' nice value to be set - but as expected
@@ -4331,7 +4239,7 @@ void set_user_nice(struct task_struct *p, long nice)
4331 delta = p->prio - old_prio; 4239 delta = p->prio - old_prio;
4332 4240
4333 if (on_rq) { 4241 if (on_rq) {
4334 enqueue_task(rq, p, 0, false); 4242 enqueue_task(rq, p, 0);
4335 /* 4243 /*
4336 * If the task increased its priority or is running and 4244 * If the task increased its priority or is running and
4337 * lowered its priority, then reschedule its CPU: 4245 * lowered its priority, then reschedule its CPU:
@@ -4592,7 +4500,6 @@ recheck:
4592 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 4500 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4593 goto recheck; 4501 goto recheck;
4594 } 4502 }
4595 update_rq_clock(rq);
4596 on_rq = p->se.on_rq; 4503 on_rq = p->se.on_rq;
4597 running = task_current(rq, p); 4504 running = task_current(rq, p);
4598 if (on_rq) 4505 if (on_rq)
@@ -5358,7 +5265,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5358 struct rq *rq; 5265 struct rq *rq;
5359 int ret = 0; 5266 int ret = 0;
5360 5267
5268 /*
5269 * Serialize against TASK_WAKING so that ttwu() and wunt() can
5270 * drop the rq->lock and still rely on ->cpus_allowed.
5271 */
5272again:
5273 while (task_is_waking(p))
5274 cpu_relax();
5361 rq = task_rq_lock(p, &flags); 5275 rq = task_rq_lock(p, &flags);
5276 if (task_is_waking(p)) {
5277 task_rq_unlock(rq, &flags);
5278 goto again;
5279 }
5362 5280
5363 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 5281 if (!cpumask_intersects(new_mask, cpu_active_mask)) {
5364 ret = -EINVAL; 5282 ret = -EINVAL;
@@ -5516,30 +5434,29 @@ static int migration_thread(void *data)
5516} 5434}
5517 5435
5518#ifdef CONFIG_HOTPLUG_CPU 5436#ifdef CONFIG_HOTPLUG_CPU
5519
5520static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
5521{
5522 int ret;
5523
5524 local_irq_disable();
5525 ret = __migrate_task(p, src_cpu, dest_cpu);
5526 local_irq_enable();
5527 return ret;
5528}
5529
5530/* 5437/*
5531 * Figure out where task on dead CPU should go, use force if necessary. 5438 * Figure out where task on dead CPU should go, use force if necessary.
5532 */ 5439 */
5533static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) 5440void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
5534{ 5441{
5535 int dest_cpu; 5442 struct rq *rq = cpu_rq(dead_cpu);
5443 int needs_cpu, uninitialized_var(dest_cpu);
5444 unsigned long flags;
5536 5445
5537again: 5446 local_irq_save(flags);
5538 dest_cpu = select_fallback_rq(dead_cpu, p);
5539 5447
5540 /* It can have affinity changed while we were choosing. */ 5448 raw_spin_lock(&rq->lock);
5541 if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) 5449 needs_cpu = (task_cpu(p) == dead_cpu) && (p->state != TASK_WAKING);
5542 goto again; 5450 if (needs_cpu)
5451 dest_cpu = select_fallback_rq(dead_cpu, p);
5452 raw_spin_unlock(&rq->lock);
5453 /*
5454 * It can only fail if we race with set_cpus_allowed(),
5455 * in the racer should migrate the task anyway.
5456 */
5457 if (needs_cpu)
5458 __migrate_task(p, dead_cpu, dest_cpu);
5459 local_irq_restore(flags);
5543} 5460}
5544 5461
5545/* 5462/*
@@ -5603,7 +5520,6 @@ void sched_idle_next(void)
5603 5520
5604 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1); 5521 __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
5605 5522
5606 update_rq_clock(rq);
5607 activate_task(rq, p, 0); 5523 activate_task(rq, p, 0);
5608 5524
5609 raw_spin_unlock_irqrestore(&rq->lock, flags); 5525 raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -5658,7 +5574,6 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
5658 for ( ; ; ) { 5574 for ( ; ; ) {
5659 if (!rq->nr_running) 5575 if (!rq->nr_running)
5660 break; 5576 break;
5661 update_rq_clock(rq);
5662 next = pick_next_task(rq); 5577 next = pick_next_task(rq);
5663 if (!next) 5578 if (!next)
5664 break; 5579 break;
@@ -5934,7 +5849,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5934 5849
5935 case CPU_DEAD: 5850 case CPU_DEAD:
5936 case CPU_DEAD_FROZEN: 5851 case CPU_DEAD_FROZEN:
5937 cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
5938 migrate_live_tasks(cpu); 5852 migrate_live_tasks(cpu);
5939 rq = cpu_rq(cpu); 5853 rq = cpu_rq(cpu);
5940 kthread_stop(rq->migration_thread); 5854 kthread_stop(rq->migration_thread);
@@ -5942,13 +5856,11 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5942 rq->migration_thread = NULL; 5856 rq->migration_thread = NULL;
5943 /* Idle task back to normal (off runqueue, low prio) */ 5857 /* Idle task back to normal (off runqueue, low prio) */
5944 raw_spin_lock_irq(&rq->lock); 5858 raw_spin_lock_irq(&rq->lock);
5945 update_rq_clock(rq);
5946 deactivate_task(rq, rq->idle, 0); 5859 deactivate_task(rq, rq->idle, 0);
5947 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0); 5860 __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
5948 rq->idle->sched_class = &idle_sched_class; 5861 rq->idle->sched_class = &idle_sched_class;
5949 migrate_dead_tasks(cpu); 5862 migrate_dead_tasks(cpu);
5950 raw_spin_unlock_irq(&rq->lock); 5863 raw_spin_unlock_irq(&rq->lock);
5951 cpuset_unlock();
5952 migrate_nr_uninterruptible(rq); 5864 migrate_nr_uninterruptible(rq);
5953 BUG_ON(rq->nr_running != 0); 5865 BUG_ON(rq->nr_running != 0);
5954 calc_global_load_remove(rq); 5866 calc_global_load_remove(rq);
@@ -7892,7 +7804,6 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
7892{ 7804{
7893 int on_rq; 7805 int on_rq;
7894 7806
7895 update_rq_clock(rq);
7896 on_rq = p->se.on_rq; 7807 on_rq = p->se.on_rq;
7897 if (on_rq) 7808 if (on_rq)
7898 deactivate_task(rq, p, 0); 7809 deactivate_task(rq, p, 0);
@@ -7919,9 +7830,9 @@ void normalize_rt_tasks(void)
7919 7830
7920 p->se.exec_start = 0; 7831 p->se.exec_start = 0;
7921#ifdef CONFIG_SCHEDSTATS 7832#ifdef CONFIG_SCHEDSTATS
7922 p->se.wait_start = 0; 7833 p->se.statistics.wait_start = 0;
7923 p->se.sleep_start = 0; 7834 p->se.statistics.sleep_start = 0;
7924 p->se.block_start = 0; 7835 p->se.statistics.block_start = 0;
7925#endif 7836#endif
7926 7837
7927 if (!rt_task(p)) { 7838 if (!rt_task(p)) {
@@ -8254,8 +8165,6 @@ void sched_move_task(struct task_struct *tsk)
8254 8165
8255 rq = task_rq_lock(tsk, &flags); 8166 rq = task_rq_lock(tsk, &flags);
8256 8167
8257 update_rq_clock(rq);
8258
8259 running = task_current(rq, tsk); 8168 running = task_current(rq, tsk);
8260 on_rq = tsk->se.on_rq; 8169 on_rq = tsk->se.on_rq;
8261 8170
@@ -8274,7 +8183,7 @@ void sched_move_task(struct task_struct *tsk)
8274 if (unlikely(running)) 8183 if (unlikely(running))
8275 tsk->sched_class->set_curr_task(rq); 8184 tsk->sched_class->set_curr_task(rq);
8276 if (on_rq) 8185 if (on_rq)
8277 enqueue_task(rq, tsk, 0, false); 8186 enqueue_task(rq, tsk, 0);
8278 8187
8279 task_rq_unlock(rq, &flags); 8188 task_rq_unlock(rq, &flags);
8280} 8189}
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 9b49db144037..9cf1baf6616a 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -70,16 +70,16 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu,
70 PN(se->vruntime); 70 PN(se->vruntime);
71 PN(se->sum_exec_runtime); 71 PN(se->sum_exec_runtime);
72#ifdef CONFIG_SCHEDSTATS 72#ifdef CONFIG_SCHEDSTATS
73 PN(se->wait_start); 73 PN(se->statistics.wait_start);
74 PN(se->sleep_start); 74 PN(se->statistics.sleep_start);
75 PN(se->block_start); 75 PN(se->statistics.block_start);
76 PN(se->sleep_max); 76 PN(se->statistics.sleep_max);
77 PN(se->block_max); 77 PN(se->statistics.block_max);
78 PN(se->exec_max); 78 PN(se->statistics.exec_max);
79 PN(se->slice_max); 79 PN(se->statistics.slice_max);
80 PN(se->wait_max); 80 PN(se->statistics.wait_max);
81 PN(se->wait_sum); 81 PN(se->statistics.wait_sum);
82 P(se->wait_count); 82 P(se->statistics.wait_count);
83#endif 83#endif
84 P(se->load.weight); 84 P(se->load.weight);
85#undef PN 85#undef PN
@@ -104,7 +104,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", 104 SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
105 SPLIT_NS(p->se.vruntime), 105 SPLIT_NS(p->se.vruntime),
106 SPLIT_NS(p->se.sum_exec_runtime), 106 SPLIT_NS(p->se.sum_exec_runtime),
107 SPLIT_NS(p->se.sum_sleep_runtime)); 107 SPLIT_NS(p->se.statistics.sum_sleep_runtime));
108#else 108#else
109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld", 109 SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L); 110 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
@@ -173,11 +173,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
173 task_group_path(tg, path, sizeof(path)); 173 task_group_path(tg, path, sizeof(path));
174 174
175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); 175 SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path);
176#elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
177 {
178 uid_t uid = cfs_rq->tg->uid;
179 SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid);
180 }
181#else 176#else
182 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); 177 SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
183#endif 178#endif
@@ -407,40 +402,38 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
407 PN(se.exec_start); 402 PN(se.exec_start);
408 PN(se.vruntime); 403 PN(se.vruntime);
409 PN(se.sum_exec_runtime); 404 PN(se.sum_exec_runtime);
410 PN(se.avg_overlap);
411 PN(se.avg_wakeup);
412 405
413 nr_switches = p->nvcsw + p->nivcsw; 406 nr_switches = p->nvcsw + p->nivcsw;
414 407
415#ifdef CONFIG_SCHEDSTATS 408#ifdef CONFIG_SCHEDSTATS
416 PN(se.wait_start); 409 PN(se.statistics.wait_start);
417 PN(se.sleep_start); 410 PN(se.statistics.sleep_start);
418 PN(se.block_start); 411 PN(se.statistics.block_start);
419 PN(se.sleep_max); 412 PN(se.statistics.sleep_max);
420 PN(se.block_max); 413 PN(se.statistics.block_max);
421 PN(se.exec_max); 414 PN(se.statistics.exec_max);
422 PN(se.slice_max); 415 PN(se.statistics.slice_max);
423 PN(se.wait_max); 416 PN(se.statistics.wait_max);
424 PN(se.wait_sum); 417 PN(se.statistics.wait_sum);
425 P(se.wait_count); 418 P(se.statistics.wait_count);
426 PN(se.iowait_sum); 419 PN(se.statistics.iowait_sum);
427 P(se.iowait_count); 420 P(se.statistics.iowait_count);
428 P(sched_info.bkl_count); 421 P(sched_info.bkl_count);
429 P(se.nr_migrations); 422 P(se.nr_migrations);
430 P(se.nr_migrations_cold); 423 P(se.statistics.nr_migrations_cold);
431 P(se.nr_failed_migrations_affine); 424 P(se.statistics.nr_failed_migrations_affine);
432 P(se.nr_failed_migrations_running); 425 P(se.statistics.nr_failed_migrations_running);
433 P(se.nr_failed_migrations_hot); 426 P(se.statistics.nr_failed_migrations_hot);
434 P(se.nr_forced_migrations); 427 P(se.statistics.nr_forced_migrations);
435 P(se.nr_wakeups); 428 P(se.statistics.nr_wakeups);
436 P(se.nr_wakeups_sync); 429 P(se.statistics.nr_wakeups_sync);
437 P(se.nr_wakeups_migrate); 430 P(se.statistics.nr_wakeups_migrate);
438 P(se.nr_wakeups_local); 431 P(se.statistics.nr_wakeups_local);
439 P(se.nr_wakeups_remote); 432 P(se.statistics.nr_wakeups_remote);
440 P(se.nr_wakeups_affine); 433 P(se.statistics.nr_wakeups_affine);
441 P(se.nr_wakeups_affine_attempts); 434 P(se.statistics.nr_wakeups_affine_attempts);
442 P(se.nr_wakeups_passive); 435 P(se.statistics.nr_wakeups_passive);
443 P(se.nr_wakeups_idle); 436 P(se.statistics.nr_wakeups_idle);
444 437
445 { 438 {
446 u64 avg_atom, avg_per_cpu; 439 u64 avg_atom, avg_per_cpu;
@@ -491,31 +484,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
491void proc_sched_set_task(struct task_struct *p) 484void proc_sched_set_task(struct task_struct *p)
492{ 485{
493#ifdef CONFIG_SCHEDSTATS 486#ifdef CONFIG_SCHEDSTATS
494 p->se.wait_max = 0; 487 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
495 p->se.wait_sum = 0;
496 p->se.wait_count = 0;
497 p->se.iowait_sum = 0;
498 p->se.iowait_count = 0;
499 p->se.sleep_max = 0;
500 p->se.sum_sleep_runtime = 0;
501 p->se.block_max = 0;
502 p->se.exec_max = 0;
503 p->se.slice_max = 0;
504 p->se.nr_migrations = 0;
505 p->se.nr_migrations_cold = 0;
506 p->se.nr_failed_migrations_affine = 0;
507 p->se.nr_failed_migrations_running = 0;
508 p->se.nr_failed_migrations_hot = 0;
509 p->se.nr_forced_migrations = 0;
510 p->se.nr_wakeups = 0;
511 p->se.nr_wakeups_sync = 0;
512 p->se.nr_wakeups_migrate = 0;
513 p->se.nr_wakeups_local = 0;
514 p->se.nr_wakeups_remote = 0;
515 p->se.nr_wakeups_affine = 0;
516 p->se.nr_wakeups_affine_attempts = 0;
517 p->se.nr_wakeups_passive = 0;
518 p->se.nr_wakeups_idle = 0;
519 p->sched_info.bkl_count = 0;
520#endif 488#endif
521} 489}
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 5a5ea2cd924f..88d3053ac7c2 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -35,8 +35,8 @@
35 * (to see the precise effective timeslice length of your workload, 35 * (to see the precise effective timeslice length of your workload,
36 * run vmstat and monitor the context-switches (cs) field) 36 * run vmstat and monitor the context-switches (cs) field)
37 */ 37 */
38unsigned int sysctl_sched_latency = 5000000ULL; 38unsigned int sysctl_sched_latency = 6000000ULL;
39unsigned int normalized_sysctl_sched_latency = 5000000ULL; 39unsigned int normalized_sysctl_sched_latency = 6000000ULL;
40 40
41/* 41/*
42 * The initial- and re-scaling of tunables is configurable 42 * The initial- and re-scaling of tunables is configurable
@@ -52,15 +52,15 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling
52 52
53/* 53/*
54 * Minimal preemption granularity for CPU-bound tasks: 54 * Minimal preemption granularity for CPU-bound tasks:
55 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds) 55 * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds)
56 */ 56 */
57unsigned int sysctl_sched_min_granularity = 1000000ULL; 57unsigned int sysctl_sched_min_granularity = 2000000ULL;
58unsigned int normalized_sysctl_sched_min_granularity = 1000000ULL; 58unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL;
59 59
60/* 60/*
61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity 61 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
62 */ 62 */
63static unsigned int sched_nr_latency = 5; 63static unsigned int sched_nr_latency = 3;
64 64
65/* 65/*
66 * After fork, child runs first. If set to 0 (default) then 66 * After fork, child runs first. If set to 0 (default) then
@@ -505,7 +505,8 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
505{ 505{
506 unsigned long delta_exec_weighted; 506 unsigned long delta_exec_weighted;
507 507
508 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max)); 508 schedstat_set(curr->statistics.exec_max,
509 max((u64)delta_exec, curr->statistics.exec_max));
509 510
510 curr->sum_exec_runtime += delta_exec; 511 curr->sum_exec_runtime += delta_exec;
511 schedstat_add(cfs_rq, exec_clock, delta_exec); 512 schedstat_add(cfs_rq, exec_clock, delta_exec);
@@ -548,7 +549,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
548static inline void 549static inline void
549update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 550update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
550{ 551{
551 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock); 552 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
552} 553}
553 554
554/* 555/*
@@ -567,18 +568,18 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
567static void 568static void
568update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se) 569update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
569{ 570{
570 schedstat_set(se->wait_max, max(se->wait_max, 571 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
571 rq_of(cfs_rq)->clock - se->wait_start)); 572 rq_of(cfs_rq)->clock - se->statistics.wait_start));
572 schedstat_set(se->wait_count, se->wait_count + 1); 573 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
573 schedstat_set(se->wait_sum, se->wait_sum + 574 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
574 rq_of(cfs_rq)->clock - se->wait_start); 575 rq_of(cfs_rq)->clock - se->statistics.wait_start);
575#ifdef CONFIG_SCHEDSTATS 576#ifdef CONFIG_SCHEDSTATS
576 if (entity_is_task(se)) { 577 if (entity_is_task(se)) {
577 trace_sched_stat_wait(task_of(se), 578 trace_sched_stat_wait(task_of(se),
578 rq_of(cfs_rq)->clock - se->wait_start); 579 rq_of(cfs_rq)->clock - se->statistics.wait_start);
579 } 580 }
580#endif 581#endif
581 schedstat_set(se->wait_start, 0); 582 schedstat_set(se->statistics.wait_start, 0);
582} 583}
583 584
584static inline void 585static inline void
@@ -657,39 +658,39 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
657 if (entity_is_task(se)) 658 if (entity_is_task(se))
658 tsk = task_of(se); 659 tsk = task_of(se);
659 660
660 if (se->sleep_start) { 661 if (se->statistics.sleep_start) {
661 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 662 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
662 663
663 if ((s64)delta < 0) 664 if ((s64)delta < 0)
664 delta = 0; 665 delta = 0;
665 666
666 if (unlikely(delta > se->sleep_max)) 667 if (unlikely(delta > se->statistics.sleep_max))
667 se->sleep_max = delta; 668 se->statistics.sleep_max = delta;
668 669
669 se->sleep_start = 0; 670 se->statistics.sleep_start = 0;
670 se->sum_sleep_runtime += delta; 671 se->statistics.sum_sleep_runtime += delta;
671 672
672 if (tsk) { 673 if (tsk) {
673 account_scheduler_latency(tsk, delta >> 10, 1); 674 account_scheduler_latency(tsk, delta >> 10, 1);
674 trace_sched_stat_sleep(tsk, delta); 675 trace_sched_stat_sleep(tsk, delta);
675 } 676 }
676 } 677 }
677 if (se->block_start) { 678 if (se->statistics.block_start) {
678 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 679 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
679 680
680 if ((s64)delta < 0) 681 if ((s64)delta < 0)
681 delta = 0; 682 delta = 0;
682 683
683 if (unlikely(delta > se->block_max)) 684 if (unlikely(delta > se->statistics.block_max))
684 se->block_max = delta; 685 se->statistics.block_max = delta;
685 686
686 se->block_start = 0; 687 se->statistics.block_start = 0;
687 se->sum_sleep_runtime += delta; 688 se->statistics.sum_sleep_runtime += delta;
688 689
689 if (tsk) { 690 if (tsk) {
690 if (tsk->in_iowait) { 691 if (tsk->in_iowait) {
691 se->iowait_sum += delta; 692 se->statistics.iowait_sum += delta;
692 se->iowait_count++; 693 se->statistics.iowait_count++;
693 trace_sched_stat_iowait(tsk, delta); 694 trace_sched_stat_iowait(tsk, delta);
694 } 695 }
695 696
@@ -737,20 +738,10 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
737 vruntime += sched_vslice(cfs_rq, se); 738 vruntime += sched_vslice(cfs_rq, se);
738 739
739 /* sleeps up to a single latency don't count. */ 740 /* sleeps up to a single latency don't count. */
740 if (!initial && sched_feat(FAIR_SLEEPERS)) { 741 if (!initial) {
741 unsigned long thresh = sysctl_sched_latency; 742 unsigned long thresh = sysctl_sched_latency;
742 743
743 /* 744 /*
744 * Convert the sleeper threshold into virtual time.
745 * SCHED_IDLE is a special sub-class. We care about
746 * fairness only relative to other SCHED_IDLE tasks,
747 * all of which have the same weight.
748 */
749 if (sched_feat(NORMALIZED_SLEEPER) && (!entity_is_task(se) ||
750 task_of(se)->policy != SCHED_IDLE))
751 thresh = calc_delta_fair(thresh, se);
752
753 /*
754 * Halve their sleep time's effect, to allow 745 * Halve their sleep time's effect, to allow
755 * for a gentler effect of sleepers: 746 * for a gentler effect of sleepers:
756 */ 747 */
@@ -766,9 +757,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
766 se->vruntime = vruntime; 757 se->vruntime = vruntime;
767} 758}
768 759
769#define ENQUEUE_WAKEUP 1
770#define ENQUEUE_MIGRATE 2
771
772static void 760static void
773enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) 761enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
774{ 762{
@@ -776,7 +764,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
776 * Update the normalized vruntime before updating min_vruntime 764 * Update the normalized vruntime before updating min_vruntime
777 * through callig update_curr(). 765 * through callig update_curr().
778 */ 766 */
779 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_MIGRATE)) 767 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
780 se->vruntime += cfs_rq->min_vruntime; 768 se->vruntime += cfs_rq->min_vruntime;
781 769
782 /* 770 /*
@@ -812,7 +800,7 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
812} 800}
813 801
814static void 802static void
815dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 803dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
816{ 804{
817 /* 805 /*
818 * Update run-time statistics of the 'current'. 806 * Update run-time statistics of the 'current'.
@@ -820,15 +808,15 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
820 update_curr(cfs_rq); 808 update_curr(cfs_rq);
821 809
822 update_stats_dequeue(cfs_rq, se); 810 update_stats_dequeue(cfs_rq, se);
823 if (sleep) { 811 if (flags & DEQUEUE_SLEEP) {
824#ifdef CONFIG_SCHEDSTATS 812#ifdef CONFIG_SCHEDSTATS
825 if (entity_is_task(se)) { 813 if (entity_is_task(se)) {
826 struct task_struct *tsk = task_of(se); 814 struct task_struct *tsk = task_of(se);
827 815
828 if (tsk->state & TASK_INTERRUPTIBLE) 816 if (tsk->state & TASK_INTERRUPTIBLE)
829 se->sleep_start = rq_of(cfs_rq)->clock; 817 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
830 if (tsk->state & TASK_UNINTERRUPTIBLE) 818 if (tsk->state & TASK_UNINTERRUPTIBLE)
831 se->block_start = rq_of(cfs_rq)->clock; 819 se->statistics.block_start = rq_of(cfs_rq)->clock;
832 } 820 }
833#endif 821#endif
834 } 822 }
@@ -845,7 +833,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
845 * update can refer to the ->curr item and we need to reflect this 833 * update can refer to the ->curr item and we need to reflect this
846 * movement in our normalized position. 834 * movement in our normalized position.
847 */ 835 */
848 if (!sleep) 836 if (!(flags & DEQUEUE_SLEEP))
849 se->vruntime -= cfs_rq->min_vruntime; 837 se->vruntime -= cfs_rq->min_vruntime;
850} 838}
851 839
@@ -912,7 +900,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
912 * when there are only lesser-weight tasks around): 900 * when there are only lesser-weight tasks around):
913 */ 901 */
914 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) { 902 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
915 se->slice_max = max(se->slice_max, 903 se->statistics.slice_max = max(se->statistics.slice_max,
916 se->sum_exec_runtime - se->prev_sum_exec_runtime); 904 se->sum_exec_runtime - se->prev_sum_exec_runtime);
917 } 905 }
918#endif 906#endif
@@ -1054,16 +1042,10 @@ static inline void hrtick_update(struct rq *rq)
1054 * then put the task into the rbtree: 1042 * then put the task into the rbtree:
1055 */ 1043 */
1056static void 1044static void
1057enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head) 1045enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1058{ 1046{
1059 struct cfs_rq *cfs_rq; 1047 struct cfs_rq *cfs_rq;
1060 struct sched_entity *se = &p->se; 1048 struct sched_entity *se = &p->se;
1061 int flags = 0;
1062
1063 if (wakeup)
1064 flags |= ENQUEUE_WAKEUP;
1065 if (p->state == TASK_WAKING)
1066 flags |= ENQUEUE_MIGRATE;
1067 1049
1068 for_each_sched_entity(se) { 1050 for_each_sched_entity(se) {
1069 if (se->on_rq) 1051 if (se->on_rq)
@@ -1081,18 +1063,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
1081 * decreased. We remove the task from the rbtree and 1063 * decreased. We remove the task from the rbtree and
1082 * update the fair scheduling stats: 1064 * update the fair scheduling stats:
1083 */ 1065 */
1084static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep) 1066static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
1085{ 1067{
1086 struct cfs_rq *cfs_rq; 1068 struct cfs_rq *cfs_rq;
1087 struct sched_entity *se = &p->se; 1069 struct sched_entity *se = &p->se;
1088 1070
1089 for_each_sched_entity(se) { 1071 for_each_sched_entity(se) {
1090 cfs_rq = cfs_rq_of(se); 1072 cfs_rq = cfs_rq_of(se);
1091 dequeue_entity(cfs_rq, se, sleep); 1073 dequeue_entity(cfs_rq, se, flags);
1092 /* Don't dequeue parent if it has other entities besides us */ 1074 /* Don't dequeue parent if it has other entities besides us */
1093 if (cfs_rq->load.weight) 1075 if (cfs_rq->load.weight)
1094 break; 1076 break;
1095 sleep = 1; 1077 flags |= DEQUEUE_SLEEP;
1096 } 1078 }
1097 1079
1098 hrtick_update(rq); 1080 hrtick_update(rq);
@@ -1240,7 +1222,6 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
1240 1222
1241static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync) 1223static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1242{ 1224{
1243 struct task_struct *curr = current;
1244 unsigned long this_load, load; 1225 unsigned long this_load, load;
1245 int idx, this_cpu, prev_cpu; 1226 int idx, this_cpu, prev_cpu;
1246 unsigned long tl_per_task; 1227 unsigned long tl_per_task;
@@ -1255,18 +1236,6 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1255 load = source_load(prev_cpu, idx); 1236 load = source_load(prev_cpu, idx);
1256 this_load = target_load(this_cpu, idx); 1237 this_load = target_load(this_cpu, idx);
1257 1238
1258 if (sync) {
1259 if (sched_feat(SYNC_LESS) &&
1260 (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1261 p->se.avg_overlap > sysctl_sched_migration_cost))
1262 sync = 0;
1263 } else {
1264 if (sched_feat(SYNC_MORE) &&
1265 (curr->se.avg_overlap < sysctl_sched_migration_cost &&
1266 p->se.avg_overlap < sysctl_sched_migration_cost))
1267 sync = 1;
1268 }
1269
1270 /* 1239 /*
1271 * If sync wakeup then subtract the (maximum possible) 1240 * If sync wakeup then subtract the (maximum possible)
1272 * effect of the currently running task from the load 1241 * effect of the currently running task from the load
@@ -1306,7 +1275,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1306 if (sync && balanced) 1275 if (sync && balanced)
1307 return 1; 1276 return 1;
1308 1277
1309 schedstat_inc(p, se.nr_wakeups_affine_attempts); 1278 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
1310 tl_per_task = cpu_avg_load_per_task(this_cpu); 1279 tl_per_task = cpu_avg_load_per_task(this_cpu);
1311 1280
1312 if (balanced || 1281 if (balanced ||
@@ -1318,7 +1287,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
1318 * there is no bad imbalance. 1287 * there is no bad imbalance.
1319 */ 1288 */
1320 schedstat_inc(sd, ttwu_move_affine); 1289 schedstat_inc(sd, ttwu_move_affine);
1321 schedstat_inc(p, se.nr_wakeups_affine); 1290 schedstat_inc(p, se.statistics.nr_wakeups_affine);
1322 1291
1323 return 1; 1292 return 1;
1324 } 1293 }
@@ -1445,19 +1414,19 @@ select_idle_sibling(struct task_struct *p, struct sched_domain *sd, int target)
1445 * 1414 *
1446 * preempt must be disabled. 1415 * preempt must be disabled.
1447 */ 1416 */
1448static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) 1417static int
1418select_task_rq_fair(struct rq *rq, struct task_struct *p, int sd_flag, int wake_flags)
1449{ 1419{
1450 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL; 1420 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
1451 int cpu = smp_processor_id(); 1421 int cpu = smp_processor_id();
1452 int prev_cpu = task_cpu(p); 1422 int prev_cpu = task_cpu(p);
1453 int new_cpu = cpu; 1423 int new_cpu = cpu;
1454 int want_affine = 0; 1424 int want_affine = 0, cpu_idle = !current->pid;
1455 int want_sd = 1; 1425 int want_sd = 1;
1456 int sync = wake_flags & WF_SYNC; 1426 int sync = wake_flags & WF_SYNC;
1457 1427
1458 if (sd_flag & SD_BALANCE_WAKE) { 1428 if (sd_flag & SD_BALANCE_WAKE) {
1459 if (sched_feat(AFFINE_WAKEUPS) && 1429 if (cpumask_test_cpu(cpu, &p->cpus_allowed))
1460 cpumask_test_cpu(cpu, &p->cpus_allowed))
1461 want_affine = 1; 1430 want_affine = 1;
1462 new_cpu = prev_cpu; 1431 new_cpu = prev_cpu;
1463 } 1432 }
@@ -1509,13 +1478,15 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1509 * If there's an idle sibling in this domain, make that 1478 * If there's an idle sibling in this domain, make that
1510 * the wake_affine target instead of the current cpu. 1479 * the wake_affine target instead of the current cpu.
1511 */ 1480 */
1512 if (tmp->flags & SD_SHARE_PKG_RESOURCES) 1481 if (!cpu_idle && tmp->flags & SD_SHARE_PKG_RESOURCES)
1513 target = select_idle_sibling(p, tmp, target); 1482 target = select_idle_sibling(p, tmp, target);
1514 1483
1515 if (target >= 0) { 1484 if (target >= 0) {
1516 if (tmp->flags & SD_WAKE_AFFINE) { 1485 if (tmp->flags & SD_WAKE_AFFINE) {
1517 affine_sd = tmp; 1486 affine_sd = tmp;
1518 want_affine = 0; 1487 want_affine = 0;
1488 if (target != cpu)
1489 cpu_idle = 1;
1519 } 1490 }
1520 cpu = target; 1491 cpu = target;
1521 } 1492 }
@@ -1531,6 +1502,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1531 sd = tmp; 1502 sd = tmp;
1532 } 1503 }
1533 1504
1505#ifdef CONFIG_FAIR_GROUP_SCHED
1534 if (sched_feat(LB_SHARES_UPDATE)) { 1506 if (sched_feat(LB_SHARES_UPDATE)) {
1535 /* 1507 /*
1536 * Pick the largest domain to update shares over 1508 * Pick the largest domain to update shares over
@@ -1541,12 +1513,18 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1541 cpumask_weight(sched_domain_span(sd)))) 1513 cpumask_weight(sched_domain_span(sd))))
1542 tmp = affine_sd; 1514 tmp = affine_sd;
1543 1515
1544 if (tmp) 1516 if (tmp) {
1517 raw_spin_unlock(&rq->lock);
1545 update_shares(tmp); 1518 update_shares(tmp);
1519 raw_spin_lock(&rq->lock);
1520 }
1546 } 1521 }
1522#endif
1547 1523
1548 if (affine_sd && wake_affine(affine_sd, p, sync)) 1524 if (affine_sd) {
1549 return cpu; 1525 if (cpu_idle || cpu == prev_cpu || wake_affine(affine_sd, p, sync))
1526 return cpu;
1527 }
1550 1528
1551 while (sd) { 1529 while (sd) {
1552 int load_idx = sd->forkexec_idx; 1530 int load_idx = sd->forkexec_idx;
@@ -1591,63 +1569,26 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
1591} 1569}
1592#endif /* CONFIG_SMP */ 1570#endif /* CONFIG_SMP */
1593 1571
1594/*
1595 * Adaptive granularity
1596 *
1597 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1598 * with the limit of wakeup_gran -- when it never does a wakeup.
1599 *
1600 * So the smaller avg_wakeup is the faster we want this task to preempt,
1601 * but we don't want to treat the preemptee unfairly and therefore allow it
1602 * to run for at least the amount of time we'd like to run.
1603 *
1604 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1605 *
1606 * NOTE: we use *nr_running to scale with load, this nicely matches the
1607 * degrading latency on load.
1608 */
1609static unsigned long
1610adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1611{
1612 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1613 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1614 u64 gran = 0;
1615
1616 if (this_run < expected_wakeup)
1617 gran = expected_wakeup - this_run;
1618
1619 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1620}
1621
1622static unsigned long 1572static unsigned long
1623wakeup_gran(struct sched_entity *curr, struct sched_entity *se) 1573wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1624{ 1574{
1625 unsigned long gran = sysctl_sched_wakeup_granularity; 1575 unsigned long gran = sysctl_sched_wakeup_granularity;
1626 1576
1627 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1628 gran = adaptive_gran(curr, se);
1629
1630 /* 1577 /*
1631 * Since its curr running now, convert the gran from real-time 1578 * Since its curr running now, convert the gran from real-time
1632 * to virtual-time in his units. 1579 * to virtual-time in his units.
1580 *
1581 * By using 'se' instead of 'curr' we penalize light tasks, so
1582 * they get preempted easier. That is, if 'se' < 'curr' then
1583 * the resulting gran will be larger, therefore penalizing the
1584 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1585 * be smaller, again penalizing the lighter task.
1586 *
1587 * This is especially important for buddies when the leftmost
1588 * task is higher priority than the buddy.
1633 */ 1589 */
1634 if (sched_feat(ASYM_GRAN)) { 1590 if (unlikely(se->load.weight != NICE_0_LOAD))
1635 /* 1591 gran = calc_delta_fair(gran, se);
1636 * By using 'se' instead of 'curr' we penalize light tasks, so
1637 * they get preempted easier. That is, if 'se' < 'curr' then
1638 * the resulting gran will be larger, therefore penalizing the
1639 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1640 * be smaller, again penalizing the lighter task.
1641 *
1642 * This is especially important for buddies when the leftmost
1643 * task is higher priority than the buddy.
1644 */
1645 if (unlikely(se->load.weight != NICE_0_LOAD))
1646 gran = calc_delta_fair(gran, se);
1647 } else {
1648 if (unlikely(curr->load.weight != NICE_0_LOAD))
1649 gran = calc_delta_fair(gran, curr);
1650 }
1651 1592
1652 return gran; 1593 return gran;
1653} 1594}
@@ -1705,7 +1646,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1705 struct task_struct *curr = rq->curr; 1646 struct task_struct *curr = rq->curr;
1706 struct sched_entity *se = &curr->se, *pse = &p->se; 1647 struct sched_entity *se = &curr->se, *pse = &p->se;
1707 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1648 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1708 int sync = wake_flags & WF_SYNC;
1709 int scale = cfs_rq->nr_running >= sched_nr_latency; 1649 int scale = cfs_rq->nr_running >= sched_nr_latency;
1710 1650
1711 if (unlikely(rt_prio(p->prio))) 1651 if (unlikely(rt_prio(p->prio)))
@@ -1738,14 +1678,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1738 if (unlikely(curr->policy == SCHED_IDLE)) 1678 if (unlikely(curr->policy == SCHED_IDLE))
1739 goto preempt; 1679 goto preempt;
1740 1680
1741 if (sched_feat(WAKEUP_SYNC) && sync)
1742 goto preempt;
1743
1744 if (sched_feat(WAKEUP_OVERLAP) &&
1745 se->avg_overlap < sysctl_sched_migration_cost &&
1746 pse->avg_overlap < sysctl_sched_migration_cost)
1747 goto preempt;
1748
1749 if (!sched_feat(WAKEUP_PREEMPT)) 1681 if (!sched_feat(WAKEUP_PREEMPT))
1750 return; 1682 return;
1751 1683
@@ -1844,13 +1776,13 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1844 * 3) are cache-hot on their current CPU. 1776 * 3) are cache-hot on their current CPU.
1845 */ 1777 */
1846 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { 1778 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
1847 schedstat_inc(p, se.nr_failed_migrations_affine); 1779 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
1848 return 0; 1780 return 0;
1849 } 1781 }
1850 *all_pinned = 0; 1782 *all_pinned = 0;
1851 1783
1852 if (task_running(rq, p)) { 1784 if (task_running(rq, p)) {
1853 schedstat_inc(p, se.nr_failed_migrations_running); 1785 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1854 return 0; 1786 return 0;
1855 } 1787 }
1856 1788
@@ -1866,14 +1798,14 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
1866#ifdef CONFIG_SCHEDSTATS 1798#ifdef CONFIG_SCHEDSTATS
1867 if (tsk_cache_hot) { 1799 if (tsk_cache_hot) {
1868 schedstat_inc(sd, lb_hot_gained[idle]); 1800 schedstat_inc(sd, lb_hot_gained[idle]);
1869 schedstat_inc(p, se.nr_forced_migrations); 1801 schedstat_inc(p, se.statistics.nr_forced_migrations);
1870 } 1802 }
1871#endif 1803#endif
1872 return 1; 1804 return 1;
1873 } 1805 }
1874 1806
1875 if (tsk_cache_hot) { 1807 if (tsk_cache_hot) {
1876 schedstat_inc(p, se.nr_failed_migrations_hot); 1808 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
1877 return 0; 1809 return 0;
1878 } 1810 }
1879 return 1; 1811 return 1;
@@ -3112,8 +3044,6 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3112 3044
3113 /* move a task from busiest_rq to target_rq */ 3045 /* move a task from busiest_rq to target_rq */
3114 double_lock_balance(busiest_rq, target_rq); 3046 double_lock_balance(busiest_rq, target_rq);
3115 update_rq_clock(busiest_rq);
3116 update_rq_clock(target_rq);
3117 3047
3118 /* Search for an sd spanning us and the target CPU. */ 3048 /* Search for an sd spanning us and the target CPU. */
3119 for_each_domain(target_cpu, sd) { 3049 for_each_domain(target_cpu, sd) {
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index d5059fd761d9..83c66e8ad3ee 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,11 +1,4 @@
1/* 1/*
2 * Disregards a certain amount of sleep time (sched_latency_ns) and
3 * considers the task to be running during that period. This gives it
4 * a service deficit on wakeup, allowing it to run sooner.
5 */
6SCHED_FEAT(FAIR_SLEEPERS, 1)
7
8/*
9 * Only give sleepers 50% of their service deficit. This allows 2 * Only give sleepers 50% of their service deficit. This allows
10 * them to run sooner, but does not allow tons of sleepers to 3 * them to run sooner, but does not allow tons of sleepers to
11 * rip the spread apart. 4 * rip the spread apart.
@@ -13,13 +6,6 @@ SCHED_FEAT(FAIR_SLEEPERS, 1)
13SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) 6SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
14 7
15/* 8/*
16 * By not normalizing the sleep time, heavy tasks get an effective
17 * longer period, and lighter task an effective shorter period they
18 * are considered running.
19 */
20SCHED_FEAT(NORMALIZED_SLEEPER, 0)
21
22/*
23 * Place new tasks ahead so that they do not starve already running 9 * Place new tasks ahead so that they do not starve already running
24 * tasks 10 * tasks
25 */ 11 */
@@ -31,37 +17,6 @@ SCHED_FEAT(START_DEBIT, 1)
31SCHED_FEAT(WAKEUP_PREEMPT, 1) 17SCHED_FEAT(WAKEUP_PREEMPT, 1)
32 18
33/* 19/*
34 * Compute wakeup_gran based on task behaviour, clipped to
35 * [0, sched_wakeup_gran_ns]
36 */
37SCHED_FEAT(ADAPTIVE_GRAN, 1)
38
39/*
40 * When converting the wakeup granularity to virtual time, do it such
41 * that heavier tasks preempting a lighter task have an edge.
42 */
43SCHED_FEAT(ASYM_GRAN, 1)
44
45/*
46 * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS.
47 */
48SCHED_FEAT(WAKEUP_SYNC, 0)
49
50/*
51 * Wakeup preempt based on task behaviour. Tasks that do not overlap
52 * don't get preempted.
53 */
54SCHED_FEAT(WAKEUP_OVERLAP, 0)
55
56/*
57 * Use the SYNC wakeup hint, pipes and the likes use this to indicate
58 * the remote end is likely to consume the data we just wrote, and
59 * therefore has cache benefit from being placed on the same cpu, see
60 * also AFFINE_WAKEUPS.
61 */
62SCHED_FEAT(SYNC_WAKEUPS, 1)
63
64/*
65 * Based on load and program behaviour, see if it makes sense to place 20 * Based on load and program behaviour, see if it makes sense to place
66 * a newly woken task on the same cpu as the task that woke it -- 21 * a newly woken task on the same cpu as the task that woke it --
67 * improve cache locality. Typically used with SYNC wakeups as 22 * improve cache locality. Typically used with SYNC wakeups as
@@ -70,16 +25,6 @@ SCHED_FEAT(SYNC_WAKEUPS, 1)
70SCHED_FEAT(AFFINE_WAKEUPS, 1) 25SCHED_FEAT(AFFINE_WAKEUPS, 1)
71 26
72/* 27/*
73 * Weaken SYNC hint based on overlap
74 */
75SCHED_FEAT(SYNC_LESS, 1)
76
77/*
78 * Add SYNC hint based on overlap
79 */
80SCHED_FEAT(SYNC_MORE, 0)
81
82/*
83 * Prefer to schedule the task we woke last (assuming it failed 28 * Prefer to schedule the task we woke last (assuming it failed
84 * wakeup-preemption), since its likely going to consume data we 29 * wakeup-preemption), since its likely going to consume data we
85 * touched, increases cache locality. 30 * touched, increases cache locality.
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index a8a6d8a50947..bea2b8f12024 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -6,7 +6,8 @@
6 */ 6 */
7 7
8#ifdef CONFIG_SMP 8#ifdef CONFIG_SMP
9static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) 9static int
10select_task_rq_idle(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
10{ 11{
11 return task_cpu(p); /* IDLE tasks as never migrated */ 12 return task_cpu(p); /* IDLE tasks as never migrated */
12} 13}
@@ -32,7 +33,7 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
32 * message if some code attempts to do it: 33 * message if some code attempts to do it:
33 */ 34 */
34static void 35static void
35dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep) 36dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
36{ 37{
37 raw_spin_unlock_irq(&rq->lock); 38 raw_spin_unlock_irq(&rq->lock);
38 printk(KERN_ERR "bad: scheduling from the idle thread!\n"); 39 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index b5b920ae2ea7..8afb953e31c6 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -613,7 +613,7 @@ static void update_curr_rt(struct rq *rq)
613 if (unlikely((s64)delta_exec < 0)) 613 if (unlikely((s64)delta_exec < 0))
614 delta_exec = 0; 614 delta_exec = 0;
615 615
616 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); 616 schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
617 617
618 curr->se.sum_exec_runtime += delta_exec; 618 curr->se.sum_exec_runtime += delta_exec;
619 account_group_exec_runtime(curr, delta_exec); 619 account_group_exec_runtime(curr, delta_exec);
@@ -888,20 +888,20 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
888 * Adding/removing a task to/from a priority array: 888 * Adding/removing a task to/from a priority array:
889 */ 889 */
890static void 890static void
891enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup, bool head) 891enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
892{ 892{
893 struct sched_rt_entity *rt_se = &p->rt; 893 struct sched_rt_entity *rt_se = &p->rt;
894 894
895 if (wakeup) 895 if (flags & ENQUEUE_WAKEUP)
896 rt_se->timeout = 0; 896 rt_se->timeout = 0;
897 897
898 enqueue_rt_entity(rt_se, head); 898 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
899 899
900 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 900 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
901 enqueue_pushable_task(rq, p); 901 enqueue_pushable_task(rq, p);
902} 902}
903 903
904static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) 904static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
905{ 905{
906 struct sched_rt_entity *rt_se = &p->rt; 906 struct sched_rt_entity *rt_se = &p->rt;
907 907
@@ -948,10 +948,9 @@ static void yield_task_rt(struct rq *rq)
948#ifdef CONFIG_SMP 948#ifdef CONFIG_SMP
949static int find_lowest_rq(struct task_struct *task); 949static int find_lowest_rq(struct task_struct *task);
950 950
951static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) 951static int
952select_task_rq_rt(struct rq *rq, struct task_struct *p, int sd_flag, int flags)
952{ 953{
953 struct rq *rq = task_rq(p);
954
955 if (sd_flag != SD_BALANCE_WAKE) 954 if (sd_flag != SD_BALANCE_WAKE)
956 return smp_processor_id(); 955 return smp_processor_id();
957 956
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f992762d7f51..f25735a767af 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -262,6 +262,9 @@ void tick_nohz_stop_sched_tick(int inidle)
262 goto end; 262 goto end;
263 } 263 }
264 264
265 if (nohz_ratelimit(cpu))
266 goto end;
267
265 ts->idle_calls++; 268 ts->idle_calls++;
266 /* Read jiffies and the time when jiffies were updated last */ 269 /* Read jiffies and the time when jiffies were updated last */
267 do { 270 do {
diff --git a/kernel/user.c b/kernel/user.c
index 766467b3bcb7..8e1c8c0a496c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -16,7 +16,6 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/user_namespace.h> 18#include <linux/user_namespace.h>
19#include "cred-internals.h"
20 19
21struct user_namespace init_user_ns = { 20struct user_namespace init_user_ns = {
22 .kref = { 21 .kref = {
@@ -137,9 +136,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
137 struct hlist_head *hashent = uidhashentry(ns, uid); 136 struct hlist_head *hashent = uidhashentry(ns, uid);
138 struct user_struct *up, *new; 137 struct user_struct *up, *new;
139 138
140 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert() 139 /* Make uid_hash_find() + uid_hash_insert() atomic. */
141 * atomic.
142 */
143 spin_lock_irq(&uidhash_lock); 140 spin_lock_irq(&uidhash_lock);
144 up = uid_hash_find(uid, hashent); 141 up = uid_hash_find(uid, hashent);
145 spin_unlock_irq(&uidhash_lock); 142 spin_unlock_irq(&uidhash_lock);
@@ -161,11 +158,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
161 spin_lock_irq(&uidhash_lock); 158 spin_lock_irq(&uidhash_lock);
162 up = uid_hash_find(uid, hashent); 159 up = uid_hash_find(uid, hashent);
163 if (up) { 160 if (up) {
164 /* This case is not possible when CONFIG_USER_SCHED
165 * is defined, since we serialize alloc_uid() using
166 * uids_mutex. Hence no need to call
167 * sched_destroy_user() or remove_user_sysfs_dir().
168 */
169 key_put(new->uid_keyring); 161 key_put(new->uid_keyring);
170 key_put(new->session_keyring); 162 key_put(new->session_keyring);
171 kmem_cache_free(uid_cachep, new); 163 kmem_cache_free(uid_cachep, new);
@@ -178,8 +170,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
178 170
179 return up; 171 return up;
180 172
181 put_user_ns(new->user_ns);
182 kmem_cache_free(uid_cachep, new);
183out_unlock: 173out_unlock:
184 return NULL; 174 return NULL;
185} 175}