aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c444
1 files changed, 378 insertions, 66 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 26efa475bdc1..f04aa9664504 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,6 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h>
42#include <linux/security.h> 43#include <linux/security.h>
43#include <linux/notifier.h> 44#include <linux/notifier.h>
44#include <linux/profile.h> 45#include <linux/profile.h>
@@ -68,17 +69,18 @@
68#include <linux/pagemap.h> 69#include <linux/pagemap.h>
69#include <linux/hrtimer.h> 70#include <linux/hrtimer.h>
70#include <linux/tick.h> 71#include <linux/tick.h>
71#include <linux/bootmem.h>
72#include <linux/debugfs.h> 72#include <linux/debugfs.h>
73#include <linux/ctype.h> 73#include <linux/ctype.h>
74#include <linux/ftrace.h> 74#include <linux/ftrace.h>
75#include <trace/sched.h>
76 75
77#include <asm/tlb.h> 76#include <asm/tlb.h>
78#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
79 78
80#include "sched_cpupri.h" 79#include "sched_cpupri.h"
81 80
81#define CREATE_TRACE_POINTS
82#include <trace/events/sched.h>
83
82/* 84/*
83 * Convert user-nice values [ -20 ... 0 ... 19 ] 85 * Convert user-nice values [ -20 ... 0 ... 19 ]
84 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 86 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
@@ -118,12 +120,6 @@
118 */ 120 */
119#define RUNTIME_INF ((u64)~0ULL) 121#define RUNTIME_INF ((u64)~0ULL)
120 122
121DEFINE_TRACE(sched_wait_task);
122DEFINE_TRACE(sched_wakeup);
123DEFINE_TRACE(sched_wakeup_new);
124DEFINE_TRACE(sched_switch);
125DEFINE_TRACE(sched_migrate_task);
126
127#ifdef CONFIG_SMP 123#ifdef CONFIG_SMP
128 124
129static void double_rq_lock(struct rq *rq1, struct rq *rq2); 125static void double_rq_lock(struct rq *rq1, struct rq *rq2);
@@ -584,6 +580,7 @@ struct rq {
584 struct load_weight load; 580 struct load_weight load;
585 unsigned long nr_load_updates; 581 unsigned long nr_load_updates;
586 u64 nr_switches; 582 u64 nr_switches;
583 u64 nr_migrations_in;
587 584
588 struct cfs_rq cfs; 585 struct cfs_rq cfs;
589 struct rt_rq rt; 586 struct rt_rq rt;
@@ -630,6 +627,10 @@ struct rq {
630 struct list_head migration_queue; 627 struct list_head migration_queue;
631#endif 628#endif
632 629
630 /* calc_load related fields */
631 unsigned long calc_load_update;
632 long calc_load_active;
633
633#ifdef CONFIG_SCHED_HRTICK 634#ifdef CONFIG_SCHED_HRTICK
634#ifdef CONFIG_SMP 635#ifdef CONFIG_SMP
635 int hrtick_csd_pending; 636 int hrtick_csd_pending;
@@ -692,7 +693,7 @@ static inline int cpu_of(struct rq *rq)
692#define task_rq(p) cpu_rq(task_cpu(p)) 693#define task_rq(p) cpu_rq(task_cpu(p))
693#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 694#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
694 695
695static inline void update_rq_clock(struct rq *rq) 696inline void update_rq_clock(struct rq *rq)
696{ 697{
697 rq->clock = sched_clock_cpu(cpu_of(rq)); 698 rq->clock = sched_clock_cpu(cpu_of(rq));
698} 699}
@@ -1728,6 +1729,8 @@ static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
1728} 1729}
1729#endif 1730#endif
1730 1731
1732static void calc_load_account_active(struct rq *this_rq);
1733
1731#include "sched_stats.h" 1734#include "sched_stats.h"
1732#include "sched_idletask.c" 1735#include "sched_idletask.c"
1733#include "sched_fair.c" 1736#include "sched_fair.c"
@@ -1958,7 +1961,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1958 1961
1959 clock_offset = old_rq->clock - new_rq->clock; 1962 clock_offset = old_rq->clock - new_rq->clock;
1960 1963
1961 trace_sched_migrate_task(p, task_cpu(p), new_cpu); 1964 trace_sched_migrate_task(p, new_cpu);
1962 1965
1963#ifdef CONFIG_SCHEDSTATS 1966#ifdef CONFIG_SCHEDSTATS
1964 if (p->se.wait_start) 1967 if (p->se.wait_start)
@@ -1967,12 +1970,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1967 p->se.sleep_start -= clock_offset; 1970 p->se.sleep_start -= clock_offset;
1968 if (p->se.block_start) 1971 if (p->se.block_start)
1969 p->se.block_start -= clock_offset; 1972 p->se.block_start -= clock_offset;
1973#endif
1970 if (old_cpu != new_cpu) { 1974 if (old_cpu != new_cpu) {
1971 schedstat_inc(p, se.nr_migrations); 1975 p->se.nr_migrations++;
1976 new_rq->nr_migrations_in++;
1977#ifdef CONFIG_SCHEDSTATS
1972 if (task_hot(p, old_rq->clock, NULL)) 1978 if (task_hot(p, old_rq->clock, NULL))
1973 schedstat_inc(p, se.nr_forced2_migrations); 1979 schedstat_inc(p, se.nr_forced2_migrations);
1974 }
1975#endif 1980#endif
1981 perf_counter_task_migration(p, new_cpu);
1982 }
1976 p->se.vruntime -= old_cfsrq->min_vruntime - 1983 p->se.vruntime -= old_cfsrq->min_vruntime -
1977 new_cfsrq->min_vruntime; 1984 new_cfsrq->min_vruntime;
1978 1985
@@ -2015,6 +2022,49 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
2015} 2022}
2016 2023
2017/* 2024/*
2025 * wait_task_context_switch - wait for a thread to complete at least one
2026 * context switch.
2027 *
2028 * @p must not be current.
2029 */
2030void wait_task_context_switch(struct task_struct *p)
2031{
2032 unsigned long nvcsw, nivcsw, flags;
2033 int running;
2034 struct rq *rq;
2035
2036 nvcsw = p->nvcsw;
2037 nivcsw = p->nivcsw;
2038 for (;;) {
2039 /*
2040 * The runqueue is assigned before the actual context
2041 * switch. We need to take the runqueue lock.
2042 *
2043 * We could check initially without the lock but it is
2044 * very likely that we need to take the lock in every
2045 * iteration.
2046 */
2047 rq = task_rq_lock(p, &flags);
2048 running = task_running(rq, p);
2049 task_rq_unlock(rq, &flags);
2050
2051 if (likely(!running))
2052 break;
2053 /*
2054 * The switch count is incremented before the actual
2055 * context switch. We thus wait for two switches to be
2056 * sure at least one completed.
2057 */
2058 if ((p->nvcsw - nvcsw) > 1)
2059 break;
2060 if ((p->nivcsw - nivcsw) > 1)
2061 break;
2062
2063 cpu_relax();
2064 }
2065}
2066
2067/*
2018 * wait_task_inactive - wait for a thread to unschedule. 2068 * wait_task_inactive - wait for a thread to unschedule.
2019 * 2069 *
2020 * If @match_state is nonzero, it's the @p->state value just checked and 2070 * If @match_state is nonzero, it's the @p->state value just checked and
@@ -2324,6 +2374,27 @@ static int sched_balance_self(int cpu, int flag)
2324 2374
2325#endif /* CONFIG_SMP */ 2375#endif /* CONFIG_SMP */
2326 2376
2377/**
2378 * task_oncpu_function_call - call a function on the cpu on which a task runs
2379 * @p: the task to evaluate
2380 * @func: the function to be called
2381 * @info: the function call argument
2382 *
2383 * Calls the function @func when the task is currently running. This might
2384 * be on the current CPU, which just calls the function directly
2385 */
2386void task_oncpu_function_call(struct task_struct *p,
2387 void (*func) (void *info), void *info)
2388{
2389 int cpu;
2390
2391 preempt_disable();
2392 cpu = task_cpu(p);
2393 if (task_curr(p))
2394 smp_call_function_single(cpu, func, info, 1);
2395 preempt_enable();
2396}
2397
2327/*** 2398/***
2328 * try_to_wake_up - wake up a thread 2399 * try_to_wake_up - wake up a thread
2329 * @p: the to-be-woken-up thread 2400 * @p: the to-be-woken-up thread
@@ -2458,6 +2529,17 @@ out:
2458 return success; 2529 return success;
2459} 2530}
2460 2531
2532/**
2533 * wake_up_process - Wake up a specific process
2534 * @p: The process to be woken up.
2535 *
2536 * Attempt to wake up the nominated process and move it to the set of runnable
2537 * processes. Returns 1 if the process was woken up, 0 if it was already
2538 * running.
2539 *
2540 * It may be assumed that this function implies a write memory barrier before
2541 * changing the task state if and only if any tasks are woken up.
2542 */
2461int wake_up_process(struct task_struct *p) 2543int wake_up_process(struct task_struct *p)
2462{ 2544{
2463 return try_to_wake_up(p, TASK_ALL, 0); 2545 return try_to_wake_up(p, TASK_ALL, 0);
@@ -2480,6 +2562,7 @@ static void __sched_fork(struct task_struct *p)
2480 p->se.exec_start = 0; 2562 p->se.exec_start = 0;
2481 p->se.sum_exec_runtime = 0; 2563 p->se.sum_exec_runtime = 0;
2482 p->se.prev_sum_exec_runtime = 0; 2564 p->se.prev_sum_exec_runtime = 0;
2565 p->se.nr_migrations = 0;
2483 p->se.last_wakeup = 0; 2566 p->se.last_wakeup = 0;
2484 p->se.avg_overlap = 0; 2567 p->se.avg_overlap = 0;
2485 p->se.start_runtime = 0; 2568 p->se.start_runtime = 0;
@@ -2710,6 +2793,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2710 */ 2793 */
2711 prev_state = prev->state; 2794 prev_state = prev->state;
2712 finish_arch_switch(prev); 2795 finish_arch_switch(prev);
2796 perf_counter_task_sched_in(current, cpu_of(rq));
2713 finish_lock_switch(rq, prev); 2797 finish_lock_switch(rq, prev);
2714#ifdef CONFIG_SMP 2798#ifdef CONFIG_SMP
2715 if (post_schedule) 2799 if (post_schedule)
@@ -2766,7 +2850,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2766 * combine the page table reload and the switch backend into 2850 * combine the page table reload and the switch backend into
2767 * one hypercall. 2851 * one hypercall.
2768 */ 2852 */
2769 arch_enter_lazy_cpu_mode(); 2853 arch_start_context_switch(prev);
2770 2854
2771 if (unlikely(!mm)) { 2855 if (unlikely(!mm)) {
2772 next->active_mm = oldmm; 2856 next->active_mm = oldmm;
@@ -2856,19 +2940,81 @@ unsigned long nr_iowait(void)
2856 return sum; 2940 return sum;
2857} 2941}
2858 2942
2859unsigned long nr_active(void) 2943/* Variables and functions for calc_load */
2944static atomic_long_t calc_load_tasks;
2945static unsigned long calc_load_update;
2946unsigned long avenrun[3];
2947EXPORT_SYMBOL(avenrun);
2948
2949/**
2950 * get_avenrun - get the load average array
2951 * @loads: pointer to dest load array
2952 * @offset: offset to add
2953 * @shift: shift count to shift the result left
2954 *
2955 * These values are estimates at best, so no need for locking.
2956 */
2957void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2860{ 2958{
2861 unsigned long i, running = 0, uninterruptible = 0; 2959 loads[0] = (avenrun[0] + offset) << shift;
2960 loads[1] = (avenrun[1] + offset) << shift;
2961 loads[2] = (avenrun[2] + offset) << shift;
2962}
2862 2963
2863 for_each_online_cpu(i) { 2964static unsigned long
2864 running += cpu_rq(i)->nr_running; 2965calc_load(unsigned long load, unsigned long exp, unsigned long active)
2865 uninterruptible += cpu_rq(i)->nr_uninterruptible; 2966{
2866 } 2967 load *= exp;
2968 load += active * (FIXED_1 - exp);
2969 return load >> FSHIFT;
2970}
2867 2971
2868 if (unlikely((long)uninterruptible < 0)) 2972/*
2869 uninterruptible = 0; 2973 * calc_load - update the avenrun load estimates 10 ticks after the
2974 * CPUs have updated calc_load_tasks.
2975 */
2976void calc_global_load(void)
2977{
2978 unsigned long upd = calc_load_update + 10;
2979 long active;
2870 2980
2871 return running + uninterruptible; 2981 if (time_before(jiffies, upd))
2982 return;
2983
2984 active = atomic_long_read(&calc_load_tasks);
2985 active = active > 0 ? active * FIXED_1 : 0;
2986
2987 avenrun[0] = calc_load(avenrun[0], EXP_1, active);
2988 avenrun[1] = calc_load(avenrun[1], EXP_5, active);
2989 avenrun[2] = calc_load(avenrun[2], EXP_15, active);
2990
2991 calc_load_update += LOAD_FREQ;
2992}
2993
2994/*
2995 * Either called from update_cpu_load() or from a cpu going idle
2996 */
2997static void calc_load_account_active(struct rq *this_rq)
2998{
2999 long nr_active, delta;
3000
3001 nr_active = this_rq->nr_running;
3002 nr_active += (long) this_rq->nr_uninterruptible;
3003
3004 if (nr_active != this_rq->calc_load_active) {
3005 delta = nr_active - this_rq->calc_load_active;
3006 this_rq->calc_load_active = nr_active;
3007 atomic_long_add(delta, &calc_load_tasks);
3008 }
3009}
3010
3011/*
3012 * Externally visible per-cpu scheduler statistics:
3013 * cpu_nr_migrations(cpu) - number of migrations into that cpu
3014 */
3015u64 cpu_nr_migrations(int cpu)
3016{
3017 return cpu_rq(cpu)->nr_migrations_in;
2872} 3018}
2873 3019
2874/* 3020/*
@@ -2899,6 +3045,11 @@ static void update_cpu_load(struct rq *this_rq)
2899 new_load += scale-1; 3045 new_load += scale-1;
2900 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i; 3046 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2901 } 3047 }
3048
3049 if (time_after_eq(jiffies, this_rq->calc_load_update)) {
3050 this_rq->calc_load_update += LOAD_FREQ;
3051 calc_load_account_active(this_rq);
3052 }
2902} 3053}
2903 3054
2904#ifdef CONFIG_SMP 3055#ifdef CONFIG_SMP
@@ -4240,10 +4391,126 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
4240static struct { 4391static struct {
4241 atomic_t load_balancer; 4392 atomic_t load_balancer;
4242 cpumask_var_t cpu_mask; 4393 cpumask_var_t cpu_mask;
4394 cpumask_var_t ilb_grp_nohz_mask;
4243} nohz ____cacheline_aligned = { 4395} nohz ____cacheline_aligned = {
4244 .load_balancer = ATOMIC_INIT(-1), 4396 .load_balancer = ATOMIC_INIT(-1),
4245}; 4397};
4246 4398
4399#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
4400/**
4401 * lowest_flag_domain - Return lowest sched_domain containing flag.
4402 * @cpu: The cpu whose lowest level of sched domain is to
4403 * be returned.
4404 * @flag: The flag to check for the lowest sched_domain
4405 * for the given cpu.
4406 *
4407 * Returns the lowest sched_domain of a cpu which contains the given flag.
4408 */
4409static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
4410{
4411 struct sched_domain *sd;
4412
4413 for_each_domain(cpu, sd)
4414 if (sd && (sd->flags & flag))
4415 break;
4416
4417 return sd;
4418}
4419
4420/**
4421 * for_each_flag_domain - Iterates over sched_domains containing the flag.
4422 * @cpu: The cpu whose domains we're iterating over.
4423 * @sd: variable holding the value of the power_savings_sd
4424 * for cpu.
4425 * @flag: The flag to filter the sched_domains to be iterated.
4426 *
4427 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
4428 * set, starting from the lowest sched_domain to the highest.
4429 */
4430#define for_each_flag_domain(cpu, sd, flag) \
4431 for (sd = lowest_flag_domain(cpu, flag); \
4432 (sd && (sd->flags & flag)); sd = sd->parent)
4433
4434/**
4435 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
4436 * @ilb_group: group to be checked for semi-idleness
4437 *
4438 * Returns: 1 if the group is semi-idle. 0 otherwise.
4439 *
4440 * We define a sched_group to be semi idle if it has atleast one idle-CPU
4441 * and atleast one non-idle CPU. This helper function checks if the given
4442 * sched_group is semi-idle or not.
4443 */
4444static inline int is_semi_idle_group(struct sched_group *ilb_group)
4445{
4446 cpumask_and(nohz.ilb_grp_nohz_mask, nohz.cpu_mask,
4447 sched_group_cpus(ilb_group));
4448
4449 /*
4450 * A sched_group is semi-idle when it has atleast one busy cpu
4451 * and atleast one idle cpu.
4452 */
4453 if (cpumask_empty(nohz.ilb_grp_nohz_mask))
4454 return 0;
4455
4456 if (cpumask_equal(nohz.ilb_grp_nohz_mask, sched_group_cpus(ilb_group)))
4457 return 0;
4458
4459 return 1;
4460}
4461/**
4462 * find_new_ilb - Finds the optimum idle load balancer for nomination.
4463 * @cpu: The cpu which is nominating a new idle_load_balancer.
4464 *
4465 * Returns: Returns the id of the idle load balancer if it exists,
4466 * Else, returns >= nr_cpu_ids.
4467 *
4468 * This algorithm picks the idle load balancer such that it belongs to a
4469 * semi-idle powersavings sched_domain. The idea is to try and avoid
4470 * completely idle packages/cores just for the purpose of idle load balancing
4471 * when there are other idle cpu's which are better suited for that job.
4472 */
4473static int find_new_ilb(int cpu)
4474{
4475 struct sched_domain *sd;
4476 struct sched_group *ilb_group;
4477
4478 /*
4479 * Have idle load balancer selection from semi-idle packages only
4480 * when power-aware load balancing is enabled
4481 */
4482 if (!(sched_smt_power_savings || sched_mc_power_savings))
4483 goto out_done;
4484
4485 /*
4486 * Optimize for the case when we have no idle CPUs or only one
4487 * idle CPU. Don't walk the sched_domain hierarchy in such cases
4488 */
4489 if (cpumask_weight(nohz.cpu_mask) < 2)
4490 goto out_done;
4491
4492 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
4493 ilb_group = sd->groups;
4494
4495 do {
4496 if (is_semi_idle_group(ilb_group))
4497 return cpumask_first(nohz.ilb_grp_nohz_mask);
4498
4499 ilb_group = ilb_group->next;
4500
4501 } while (ilb_group != sd->groups);
4502 }
4503
4504out_done:
4505 return cpumask_first(nohz.cpu_mask);
4506}
4507#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
4508static inline int find_new_ilb(int call_cpu)
4509{
4510 return cpumask_first(nohz.cpu_mask);
4511}
4512#endif
4513
4247/* 4514/*
4248 * This routine will try to nominate the ilb (idle load balancing) 4515 * This routine will try to nominate the ilb (idle load balancing)
4249 * owner among the cpus whose ticks are stopped. ilb owner will do the idle 4516 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
@@ -4298,8 +4565,24 @@ int select_nohz_load_balancer(int stop_tick)
4298 /* make me the ilb owner */ 4565 /* make me the ilb owner */
4299 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1) 4566 if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
4300 return 1; 4567 return 1;
4301 } else if (atomic_read(&nohz.load_balancer) == cpu) 4568 } else if (atomic_read(&nohz.load_balancer) == cpu) {
4569 int new_ilb;
4570
4571 if (!(sched_smt_power_savings ||
4572 sched_mc_power_savings))
4573 return 1;
4574 /*
4575 * Check to see if there is a more power-efficient
4576 * ilb.
4577 */
4578 new_ilb = find_new_ilb(cpu);
4579 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
4580 atomic_set(&nohz.load_balancer, -1);
4581 resched_cpu(new_ilb);
4582 return 0;
4583 }
4302 return 1; 4584 return 1;
4585 }
4303 } else { 4586 } else {
4304 if (!cpumask_test_cpu(cpu, nohz.cpu_mask)) 4587 if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
4305 return 0; 4588 return 0;
@@ -4468,15 +4751,7 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4468 } 4751 }
4469 4752
4470 if (atomic_read(&nohz.load_balancer) == -1) { 4753 if (atomic_read(&nohz.load_balancer) == -1) {
4471 /* 4754 int ilb = find_new_ilb(cpu);
4472 * simple selection for now: Nominate the
4473 * first cpu in the nohz list to be the next
4474 * ilb owner.
4475 *
4476 * TBD: Traverse the sched domains and nominate
4477 * the nearest cpu in the nohz.cpu_mask.
4478 */
4479 int ilb = cpumask_first(nohz.cpu_mask);
4480 4755
4481 if (ilb < nr_cpu_ids) 4756 if (ilb < nr_cpu_ids)
4482 resched_cpu(ilb); 4757 resched_cpu(ilb);
@@ -4840,6 +5115,8 @@ void scheduler_tick(void)
4840 curr->sched_class->task_tick(rq, curr, 0); 5115 curr->sched_class->task_tick(rq, curr, 0);
4841 spin_unlock(&rq->lock); 5116 spin_unlock(&rq->lock);
4842 5117
5118 perf_counter_task_tick(curr, cpu);
5119
4843#ifdef CONFIG_SMP 5120#ifdef CONFIG_SMP
4844 rq->idle_at_tick = idle_cpu(cpu); 5121 rq->idle_at_tick = idle_cpu(cpu);
4845 trigger_load_balance(rq, cpu); 5122 trigger_load_balance(rq, cpu);
@@ -5007,13 +5284,15 @@ pick_next_task(struct rq *rq)
5007/* 5284/*
5008 * schedule() is the main scheduler function. 5285 * schedule() is the main scheduler function.
5009 */ 5286 */
5010asmlinkage void __sched __schedule(void) 5287asmlinkage void __sched schedule(void)
5011{ 5288{
5012 struct task_struct *prev, *next; 5289 struct task_struct *prev, *next;
5013 unsigned long *switch_count; 5290 unsigned long *switch_count;
5014 struct rq *rq; 5291 struct rq *rq;
5015 int cpu; 5292 int cpu;
5016 5293
5294need_resched:
5295 preempt_disable();
5017 cpu = smp_processor_id(); 5296 cpu = smp_processor_id();
5018 rq = cpu_rq(cpu); 5297 rq = cpu_rq(cpu);
5019 rcu_qsctr_inc(cpu); 5298 rcu_qsctr_inc(cpu);
@@ -5053,6 +5332,7 @@ need_resched_nonpreemptible:
5053 5332
5054 if (likely(prev != next)) { 5333 if (likely(prev != next)) {
5055 sched_info_switch(prev, next); 5334 sched_info_switch(prev, next);
5335 perf_counter_task_sched_out(prev, next, cpu);
5056 5336
5057 rq->nr_switches++; 5337 rq->nr_switches++;
5058 rq->curr = next; 5338 rq->curr = next;
@@ -5070,15 +5350,9 @@ need_resched_nonpreemptible:
5070 5350
5071 if (unlikely(reacquire_kernel_lock(current) < 0)) 5351 if (unlikely(reacquire_kernel_lock(current) < 0))
5072 goto need_resched_nonpreemptible; 5352 goto need_resched_nonpreemptible;
5073}
5074 5353
5075asmlinkage void __sched schedule(void)
5076{
5077need_resched:
5078 preempt_disable();
5079 __schedule();
5080 preempt_enable_no_resched(); 5354 preempt_enable_no_resched();
5081 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 5355 if (need_resched())
5082 goto need_resched; 5356 goto need_resched;
5083} 5357}
5084EXPORT_SYMBOL(schedule); 5358EXPORT_SYMBOL(schedule);
@@ -5221,7 +5495,7 @@ EXPORT_SYMBOL(default_wake_function);
5221 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 5495 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
5222 * zero in this (rare) case, and we handle it by continuing to scan the queue. 5496 * zero in this (rare) case, and we handle it by continuing to scan the queue.
5223 */ 5497 */
5224void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 5498static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
5225 int nr_exclusive, int sync, void *key) 5499 int nr_exclusive, int sync, void *key)
5226{ 5500{
5227 wait_queue_t *curr, *next; 5501 wait_queue_t *curr, *next;
@@ -5241,6 +5515,9 @@ void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
5241 * @mode: which threads 5515 * @mode: which threads
5242 * @nr_exclusive: how many wake-one or wake-many threads to wake up 5516 * @nr_exclusive: how many wake-one or wake-many threads to wake up
5243 * @key: is directly passed to the wakeup function 5517 * @key: is directly passed to the wakeup function
5518 *
5519 * It may be assumed that this function implies a write memory barrier before
5520 * changing the task state if and only if any tasks are woken up.
5244 */ 5521 */
5245void __wake_up(wait_queue_head_t *q, unsigned int mode, 5522void __wake_up(wait_queue_head_t *q, unsigned int mode,
5246 int nr_exclusive, void *key) 5523 int nr_exclusive, void *key)
@@ -5279,6 +5556,9 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
5279 * with each other. This can prevent needless bouncing between CPUs. 5556 * with each other. This can prevent needless bouncing between CPUs.
5280 * 5557 *
5281 * On UP it can prevent extra preemption. 5558 * On UP it can prevent extra preemption.
5559 *
5560 * It may be assumed that this function implies a write memory barrier before
5561 * changing the task state if and only if any tasks are woken up.
5282 */ 5562 */
5283void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, 5563void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
5284 int nr_exclusive, void *key) 5564 int nr_exclusive, void *key)
@@ -5315,6 +5595,9 @@ EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
5315 * awakened in the same order in which they were queued. 5595 * awakened in the same order in which they were queued.
5316 * 5596 *
5317 * See also complete_all(), wait_for_completion() and related routines. 5597 * See also complete_all(), wait_for_completion() and related routines.
5598 *
5599 * It may be assumed that this function implies a write memory barrier before
5600 * changing the task state if and only if any tasks are woken up.
5318 */ 5601 */
5319void complete(struct completion *x) 5602void complete(struct completion *x)
5320{ 5603{
@@ -5332,6 +5615,9 @@ EXPORT_SYMBOL(complete);
5332 * @x: holds the state of this particular completion 5615 * @x: holds the state of this particular completion
5333 * 5616 *
5334 * This will wake up all threads waiting on this particular completion event. 5617 * This will wake up all threads waiting on this particular completion event.
5618 *
5619 * It may be assumed that this function implies a write memory barrier before
5620 * changing the task state if and only if any tasks are woken up.
5335 */ 5621 */
5336void complete_all(struct completion *x) 5622void complete_all(struct completion *x)
5337{ 5623{
@@ -6490,8 +6776,9 @@ void sched_show_task(struct task_struct *p)
6490#ifdef CONFIG_DEBUG_STACK_USAGE 6776#ifdef CONFIG_DEBUG_STACK_USAGE
6491 free = stack_not_used(p); 6777 free = stack_not_used(p);
6492#endif 6778#endif
6493 printk(KERN_CONT "%5lu %5d %6d\n", free, 6779 printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
6494 task_pid_nr(p), task_pid_nr(p->real_parent)); 6780 task_pid_nr(p), task_pid_nr(p->real_parent),
6781 (unsigned long)task_thread_info(p)->flags);
6495 6782
6496 show_stack(p, NULL); 6783 show_stack(p, NULL);
6497} 6784}
@@ -6970,6 +7257,14 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
6970 7257
6971 } 7258 }
6972} 7259}
7260
7261/*
7262 * remove the tasks which were accounted by rq from calc_load_tasks.
7263 */
7264static void calc_global_load_remove(struct rq *rq)
7265{
7266 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
7267}
6973#endif /* CONFIG_HOTPLUG_CPU */ 7268#endif /* CONFIG_HOTPLUG_CPU */
6974 7269
6975#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) 7270#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
@@ -7204,6 +7499,8 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7204 /* Update our root-domain */ 7499 /* Update our root-domain */
7205 rq = cpu_rq(cpu); 7500 rq = cpu_rq(cpu);
7206 spin_lock_irqsave(&rq->lock, flags); 7501 spin_lock_irqsave(&rq->lock, flags);
7502 rq->calc_load_update = calc_load_update;
7503 rq->calc_load_active = 0;
7207 if (rq->rd) { 7504 if (rq->rd) {
7208 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7505 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7209 7506
@@ -7243,7 +7540,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7243 cpuset_unlock(); 7540 cpuset_unlock();
7244 migrate_nr_uninterruptible(rq); 7541 migrate_nr_uninterruptible(rq);
7245 BUG_ON(rq->nr_running != 0); 7542 BUG_ON(rq->nr_running != 0);
7246 7543 calc_global_load_remove(rq);
7247 /* 7544 /*
7248 * No need to migrate the tasks: it was best-effort if 7545 * No need to migrate the tasks: it was best-effort if
7249 * they didn't take sched_hotcpu_mutex. Just wake up 7546 * they didn't take sched_hotcpu_mutex. Just wake up
@@ -7279,8 +7576,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7279 return NOTIFY_OK; 7576 return NOTIFY_OK;
7280} 7577}
7281 7578
7282/* Register at highest priority so that task migration (migrate_all_tasks) 7579/*
7283 * happens before everything else. 7580 * Register at high priority so that task migration (migrate_all_tasks)
7581 * happens before everything else. This has to be lower priority than
7582 * the notifier in the perf_counter subsystem, though.
7284 */ 7583 */
7285static struct notifier_block __cpuinitdata migration_notifier = { 7584static struct notifier_block __cpuinitdata migration_notifier = {
7286 .notifier_call = migration_call, 7585 .notifier_call = migration_call,
@@ -7525,24 +7824,21 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7525 7824
7526static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) 7825static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
7527{ 7826{
7827 gfp_t gfp = GFP_KERNEL;
7828
7528 memset(rd, 0, sizeof(*rd)); 7829 memset(rd, 0, sizeof(*rd));
7529 7830
7530 if (bootmem) { 7831 if (bootmem)
7531 alloc_bootmem_cpumask_var(&def_root_domain.span); 7832 gfp = GFP_NOWAIT;
7532 alloc_bootmem_cpumask_var(&def_root_domain.online);
7533 alloc_bootmem_cpumask_var(&def_root_domain.rto_mask);
7534 cpupri_init(&rd->cpupri, true);
7535 return 0;
7536 }
7537 7833
7538 if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) 7834 if (!alloc_cpumask_var(&rd->span, gfp))
7539 goto out; 7835 goto out;
7540 if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) 7836 if (!alloc_cpumask_var(&rd->online, gfp))
7541 goto free_span; 7837 goto free_span;
7542 if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) 7838 if (!alloc_cpumask_var(&rd->rto_mask, gfp))
7543 goto free_online; 7839 goto free_online;
7544 7840
7545 if (cpupri_init(&rd->cpupri, false) != 0) 7841 if (cpupri_init(&rd->cpupri, bootmem) != 0)
7546 goto free_rto_mask; 7842 goto free_rto_mask;
7547 return 0; 7843 return 0;
7548 7844
@@ -7753,8 +8049,9 @@ int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
7753 8049
7754/* 8050/*
7755 * The cpus mask in sched_group and sched_domain hangs off the end. 8051 * The cpus mask in sched_group and sched_domain hangs off the end.
7756 * FIXME: use cpumask_var_t or dynamic percpu alloc to avoid wasting space 8052 *
7757 * for nr_cpu_ids < CONFIG_NR_CPUS. 8053 * ( See the the comments in include/linux/sched.h:struct sched_group
8054 * and struct sched_domain. )
7758 */ 8055 */
7759struct static_sched_group { 8056struct static_sched_group {
7760 struct sched_group sg; 8057 struct sched_group sg;
@@ -7875,7 +8172,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7875 struct sched_domain *sd; 8172 struct sched_domain *sd;
7876 8173
7877 sd = &per_cpu(phys_domains, j).sd; 8174 sd = &per_cpu(phys_domains, j).sd;
7878 if (j != cpumask_first(sched_group_cpus(sd->groups))) { 8175 if (j != group_first_cpu(sd->groups)) {
7879 /* 8176 /*
7880 * Only add "power" once for each 8177 * Only add "power" once for each
7881 * physical package. 8178 * physical package.
@@ -7953,7 +8250,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7953 8250
7954 WARN_ON(!sd || !sd->groups); 8251 WARN_ON(!sd || !sd->groups);
7955 8252
7956 if (cpu != cpumask_first(sched_group_cpus(sd->groups))) 8253 if (cpu != group_first_cpu(sd->groups))
7957 return; 8254 return;
7958 8255
7959 child = sd->child; 8256 child = sd->child;
@@ -8865,7 +9162,7 @@ void __init sched_init(void)
8865 * we use alloc_bootmem(). 9162 * we use alloc_bootmem().
8866 */ 9163 */
8867 if (alloc_size) { 9164 if (alloc_size) {
8868 ptr = (unsigned long)alloc_bootmem(alloc_size); 9165 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
8869 9166
8870#ifdef CONFIG_FAIR_GROUP_SCHED 9167#ifdef CONFIG_FAIR_GROUP_SCHED
8871 init_task_group.se = (struct sched_entity **)ptr; 9168 init_task_group.se = (struct sched_entity **)ptr;
@@ -8938,6 +9235,8 @@ void __init sched_init(void)
8938 rq = cpu_rq(i); 9235 rq = cpu_rq(i);
8939 spin_lock_init(&rq->lock); 9236 spin_lock_init(&rq->lock);
8940 rq->nr_running = 0; 9237 rq->nr_running = 0;
9238 rq->calc_load_active = 0;
9239 rq->calc_load_update = jiffies + LOAD_FREQ;
8941 init_cfs_rq(&rq->cfs, rq); 9240 init_cfs_rq(&rq->cfs, rq);
8942 init_rt_rq(&rq->rt, rq); 9241 init_rt_rq(&rq->rt, rq);
8943#ifdef CONFIG_FAIR_GROUP_SCHED 9242#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -8958,7 +9257,7 @@ void __init sched_init(void)
8958 * 1024) and two child groups A0 and A1 (of weight 1024 each), 9257 * 1024) and two child groups A0 and A1 (of weight 1024 each),
8959 * then A0's share of the cpu resource is: 9258 * then A0's share of the cpu resource is:
8960 * 9259 *
8961 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33% 9260 * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
8962 * 9261 *
8963 * We achieve this by letting init_task_group's tasks sit 9262 * We achieve this by letting init_task_group's tasks sit
8964 * directly in rq->cfs (i.e init_task_group->se[] = NULL). 9263 * directly in rq->cfs (i.e init_task_group->se[] = NULL).
@@ -9045,20 +9344,26 @@ void __init sched_init(void)
9045 * when this runqueue becomes "idle". 9344 * when this runqueue becomes "idle".
9046 */ 9345 */
9047 init_idle(current, smp_processor_id()); 9346 init_idle(current, smp_processor_id());
9347
9348 calc_load_update = jiffies + LOAD_FREQ;
9349
9048 /* 9350 /*
9049 * During early bootup we pretend to be a normal task: 9351 * During early bootup we pretend to be a normal task:
9050 */ 9352 */
9051 current->sched_class = &fair_sched_class; 9353 current->sched_class = &fair_sched_class;
9052 9354
9053 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9355 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9054 alloc_bootmem_cpumask_var(&nohz_cpu_mask); 9356 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9055#ifdef CONFIG_SMP 9357#ifdef CONFIG_SMP
9056#ifdef CONFIG_NO_HZ 9358#ifdef CONFIG_NO_HZ
9057 alloc_bootmem_cpumask_var(&nohz.cpu_mask); 9359 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9360 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9058#endif 9361#endif
9059 alloc_bootmem_cpumask_var(&cpu_isolated_map); 9362 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9060#endif /* SMP */ 9363#endif /* SMP */
9061 9364
9365 perf_counter_init();
9366
9062 scheduler_running = 1; 9367 scheduler_running = 1;
9063} 9368}
9064 9369
@@ -9800,6 +10105,13 @@ static int sched_rt_global_constraints(void)
9800 if (sysctl_sched_rt_period <= 0) 10105 if (sysctl_sched_rt_period <= 0)
9801 return -EINVAL; 10106 return -EINVAL;
9802 10107
10108 /*
10109 * There's always some RT tasks in the root group
10110 * -- migration, kstopmachine etc..
10111 */
10112 if (sysctl_sched_rt_runtime == 0)
10113 return -EBUSY;
10114
9803 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); 10115 spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
9804 for_each_possible_cpu(i) { 10116 for_each_possible_cpu(i) {
9805 struct rt_rq *rt_rq = &cpu_rq(i)->rt; 10117 struct rt_rq *rt_rq = &cpu_rq(i)->rt;