aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c204
1 files changed, 129 insertions, 75 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index faf4d463bbff..3c11ae0a948d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -39,7 +39,7 @@
39#include <linux/completion.h> 39#include <linux/completion.h>
40#include <linux/kernel_stat.h> 40#include <linux/kernel_stat.h>
41#include <linux/debug_locks.h> 41#include <linux/debug_locks.h>
42#include <linux/perf_counter.h> 42#include <linux/perf_event.h>
43#include <linux/security.h> 43#include <linux/security.h>
44#include <linux/notifier.h> 44#include <linux/notifier.h>
45#include <linux/profile.h> 45#include <linux/profile.h>
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -676,20 +677,15 @@ inline void update_rq_clock(struct rq *rq)
676 677
677/** 678/**
678 * runqueue_is_locked 679 * runqueue_is_locked
680 * @cpu: the processor in question.
679 * 681 *
680 * Returns true if the current cpu runqueue is locked. 682 * Returns true if the current cpu runqueue is locked.
681 * This interface allows printk to be called with the runqueue lock 683 * This interface allows printk to be called with the runqueue lock
682 * held and know whether or not it is OK to wake up the klogd. 684 * held and know whether or not it is OK to wake up the klogd.
683 */ 685 */
684int runqueue_is_locked(void) 686int runqueue_is_locked(int cpu)
685{ 687{
686 int cpu = get_cpu(); 688 return spin_is_locked(&cpu_rq(cpu)->lock);
687 struct rq *rq = cpu_rq(cpu);
688 int ret;
689
690 ret = spin_is_locked(&rq->lock);
691 put_cpu();
692 return ret;
693} 689}
694 690
695/* 691/*
@@ -786,7 +782,7 @@ static int sched_feat_open(struct inode *inode, struct file *filp)
786 return single_open(filp, sched_feat_show, NULL); 782 return single_open(filp, sched_feat_show, NULL);
787} 783}
788 784
789static struct file_operations sched_feat_fops = { 785static const struct file_operations sched_feat_fops = {
790 .open = sched_feat_open, 786 .open = sched_feat_open,
791 .write = sched_feat_write, 787 .write = sched_feat_write,
792 .read = seq_read, 788 .read = seq_read,
@@ -1569,11 +1565,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
1569 1565
1570#ifdef CONFIG_FAIR_GROUP_SCHED 1566#ifdef CONFIG_FAIR_GROUP_SCHED
1571 1567
1572struct update_shares_data { 1568static __read_mostly unsigned long *update_shares_data;
1573 unsigned long rq_weight[NR_CPUS];
1574};
1575
1576static DEFINE_PER_CPU(struct update_shares_data, update_shares_data);
1577 1569
1578static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1570static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1579 1571
@@ -1583,12 +1575,12 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1583static void update_group_shares_cpu(struct task_group *tg, int cpu, 1575static void update_group_shares_cpu(struct task_group *tg, int cpu,
1584 unsigned long sd_shares, 1576 unsigned long sd_shares,
1585 unsigned long sd_rq_weight, 1577 unsigned long sd_rq_weight,
1586 struct update_shares_data *usd) 1578 unsigned long *usd_rq_weight)
1587{ 1579{
1588 unsigned long shares, rq_weight; 1580 unsigned long shares, rq_weight;
1589 int boost = 0; 1581 int boost = 0;
1590 1582
1591 rq_weight = usd->rq_weight[cpu]; 1583 rq_weight = usd_rq_weight[cpu];
1592 if (!rq_weight) { 1584 if (!rq_weight) {
1593 boost = 1; 1585 boost = 1;
1594 rq_weight = NICE_0_LOAD; 1586 rq_weight = NICE_0_LOAD;
@@ -1623,7 +1615,7 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
1623static int tg_shares_up(struct task_group *tg, void *data) 1615static int tg_shares_up(struct task_group *tg, void *data)
1624{ 1616{
1625 unsigned long weight, rq_weight = 0, shares = 0; 1617 unsigned long weight, rq_weight = 0, shares = 0;
1626 struct update_shares_data *usd; 1618 unsigned long *usd_rq_weight;
1627 struct sched_domain *sd = data; 1619 struct sched_domain *sd = data;
1628 unsigned long flags; 1620 unsigned long flags;
1629 int i; 1621 int i;
@@ -1632,11 +1624,11 @@ static int tg_shares_up(struct task_group *tg, void *data)
1632 return 0; 1624 return 0;
1633 1625
1634 local_irq_save(flags); 1626 local_irq_save(flags);
1635 usd = &__get_cpu_var(update_shares_data); 1627 usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
1636 1628
1637 for_each_cpu(i, sched_domain_span(sd)) { 1629 for_each_cpu(i, sched_domain_span(sd)) {
1638 weight = tg->cfs_rq[i]->load.weight; 1630 weight = tg->cfs_rq[i]->load.weight;
1639 usd->rq_weight[i] = weight; 1631 usd_rq_weight[i] = weight;
1640 1632
1641 /* 1633 /*
1642 * If there are currently no tasks on the cpu pretend there 1634 * If there are currently no tasks on the cpu pretend there
@@ -1657,7 +1649,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1657 shares = tg->shares; 1649 shares = tg->shares;
1658 1650
1659 for_each_cpu(i, sched_domain_span(sd)) 1651 for_each_cpu(i, sched_domain_span(sd))
1660 update_group_shares_cpu(tg, i, shares, rq_weight, usd); 1652 update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
1661 1653
1662 local_irq_restore(flags); 1654 local_irq_restore(flags);
1663 1655
@@ -2001,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2001 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
2002} 1994}
2003 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1;
2023 p->flags |= PF_THREAD_BOUND;
2024 spin_unlock_irqrestore(&rq->lock, flags);
2025}
2026EXPORT_SYMBOL(kthread_bind);
2027
2004#ifdef CONFIG_SMP 2028#ifdef CONFIG_SMP
2005/* 2029/*
2006 * Is this task likely cache-hot: 2030 * Is this task likely cache-hot:
@@ -2013,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2013 /* 2037 /*
2014 * Buddy candidates are cache hot: 2038 * Buddy candidates are cache hot:
2015 */ 2039 */
2016 if (sched_feat(CACHE_HOT_BUDDY) && 2040 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2017 (&p->se == cfs_rq_of(&p->se)->next || 2041 (&p->se == cfs_rq_of(&p->se)->next ||
2018 &p->se == cfs_rq_of(&p->se)->last)) 2042 &p->se == cfs_rq_of(&p->se)->last))
2019 return 1; 2043 return 1;
@@ -2059,7 +2083,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2059 if (task_hot(p, old_rq->clock, NULL)) 2083 if (task_hot(p, old_rq->clock, NULL))
2060 schedstat_inc(p, se.nr_forced2_migrations); 2084 schedstat_inc(p, se.nr_forced2_migrations);
2061#endif 2085#endif
2062 perf_swcounter_event(PERF_COUNT_SW_CPU_MIGRATIONS, 2086 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS,
2063 1, 1, NULL, 0); 2087 1, 1, NULL, 0);
2064 } 2088 }
2065 p->se.vruntime -= old_cfsrq->min_vruntime - 2089 p->se.vruntime -= old_cfsrq->min_vruntime -
@@ -2317,7 +2341,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2317{ 2341{
2318 int cpu, orig_cpu, this_cpu, success = 0; 2342 int cpu, orig_cpu, this_cpu, success = 0;
2319 unsigned long flags; 2343 unsigned long flags;
2320 struct rq *rq; 2344 struct rq *rq, *orig_rq;
2321 2345
2322 if (!sched_feat(SYNC_WAKEUPS)) 2346 if (!sched_feat(SYNC_WAKEUPS))
2323 wake_flags &= ~WF_SYNC; 2347 wake_flags &= ~WF_SYNC;
@@ -2325,7 +2349,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2325 this_cpu = get_cpu(); 2349 this_cpu = get_cpu();
2326 2350
2327 smp_wmb(); 2351 smp_wmb();
2328 rq = task_rq_lock(p, &flags); 2352 rq = orig_rq = task_rq_lock(p, &flags);
2329 update_rq_clock(rq); 2353 update_rq_clock(rq);
2330 if (!(p->state & state)) 2354 if (!(p->state & state))
2331 goto out; 2355 goto out;
@@ -2356,6 +2380,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state,
2356 set_task_cpu(p, cpu); 2380 set_task_cpu(p, cpu);
2357 2381
2358 rq = task_rq_lock(p, &flags); 2382 rq = task_rq_lock(p, &flags);
2383
2384 if (rq != orig_rq)
2385 update_rq_clock(rq);
2386
2359 WARN_ON(p->state != TASK_WAKING); 2387 WARN_ON(p->state != TASK_WAKING);
2360 cpu = task_cpu(p); 2388 cpu = task_cpu(p);
2361 2389
@@ -2521,22 +2549,17 @@ void sched_fork(struct task_struct *p, int clone_flags)
2521 __sched_fork(p); 2549 __sched_fork(p);
2522 2550
2523 /* 2551 /*
2524 * Make sure we do not leak PI boosting priority to the child.
2525 */
2526 p->prio = current->normal_prio;
2527
2528 /*
2529 * Revert to default priority/policy on fork if requested. 2552 * Revert to default priority/policy on fork if requested.
2530 */ 2553 */
2531 if (unlikely(p->sched_reset_on_fork)) { 2554 if (unlikely(p->sched_reset_on_fork)) {
2532 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) 2555 if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
2533 p->policy = SCHED_NORMAL; 2556 p->policy = SCHED_NORMAL;
2534 2557 p->normal_prio = p->static_prio;
2535 if (p->normal_prio < DEFAULT_PRIO) 2558 }
2536 p->prio = DEFAULT_PRIO;
2537 2559
2538 if (PRIO_TO_NICE(p->static_prio) < 0) { 2560 if (PRIO_TO_NICE(p->static_prio) < 0) {
2539 p->static_prio = NICE_TO_PRIO(0); 2561 p->static_prio = NICE_TO_PRIO(0);
2562 p->normal_prio = p->static_prio;
2540 set_load_weight(p); 2563 set_load_weight(p);
2541 } 2564 }
2542 2565
@@ -2547,6 +2570,11 @@ void sched_fork(struct task_struct *p, int clone_flags)
2547 p->sched_reset_on_fork = 0; 2570 p->sched_reset_on_fork = 0;
2548 } 2571 }
2549 2572
2573 /*
2574 * Make sure we do not leak PI boosting priority to the child.
2575 */
2576 p->prio = current->normal_prio;
2577
2550 if (!rt_prio(p->prio)) 2578 if (!rt_prio(p->prio))
2551 p->sched_class = &fair_sched_class; 2579 p->sched_class = &fair_sched_class;
2552 2580
@@ -2587,8 +2615,6 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2587 BUG_ON(p->state != TASK_RUNNING); 2615 BUG_ON(p->state != TASK_RUNNING);
2588 update_rq_clock(rq); 2616 update_rq_clock(rq);
2589 2617
2590 p->prio = effective_prio(p);
2591
2592 if (!p->sched_class->task_new || !current->se.on_rq) { 2618 if (!p->sched_class->task_new || !current->se.on_rq) {
2593 activate_task(rq, p, 0); 2619 activate_task(rq, p, 0);
2594 } else { 2620 } else {
@@ -2724,7 +2750,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2724 */ 2750 */
2725 prev_state = prev->state; 2751 prev_state = prev->state;
2726 finish_arch_switch(prev); 2752 finish_arch_switch(prev);
2727 perf_counter_task_sched_in(current, cpu_of(rq)); 2753 perf_event_task_sched_in(current, cpu_of(rq));
2728 finish_lock_switch(rq, prev); 2754 finish_lock_switch(rq, prev);
2729 2755
2730 fire_sched_in_preempt_notifiers(current); 2756 fire_sched_in_preempt_notifiers(current);
@@ -2910,6 +2936,19 @@ unsigned long nr_iowait(void)
2910 return sum; 2936 return sum;
2911} 2937}
2912 2938
2939unsigned long nr_iowait_cpu(void)
2940{
2941 struct rq *this = this_rq();
2942 return atomic_read(&this->nr_iowait);
2943}
2944
2945unsigned long this_cpu_load(void)
2946{
2947 struct rq *this = this_rq();
2948 return this->cpu_load[0];
2949}
2950
2951
2913/* Variables and functions for calc_load */ 2952/* Variables and functions for calc_load */
2914static atomic_long_t calc_load_tasks; 2953static atomic_long_t calc_load_tasks;
2915static unsigned long calc_load_update; 2954static unsigned long calc_load_update;
@@ -3651,6 +3690,7 @@ static void update_group_power(struct sched_domain *sd, int cpu)
3651 3690
3652/** 3691/**
3653 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 3692 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3693 * @sd: The sched_domain whose statistics are to be updated.
3654 * @group: sched_group whose statistics are to be updated. 3694 * @group: sched_group whose statistics are to be updated.
3655 * @this_cpu: Cpu for which load balance is currently performed. 3695 * @this_cpu: Cpu for which load balance is currently performed.
3656 * @idle: Idle status of this_cpu 3696 * @idle: Idle status of this_cpu
@@ -5085,17 +5125,16 @@ void account_idle_time(cputime_t cputime)
5085 */ 5125 */
5086void account_process_tick(struct task_struct *p, int user_tick) 5126void account_process_tick(struct task_struct *p, int user_tick)
5087{ 5127{
5088 cputime_t one_jiffy = jiffies_to_cputime(1); 5128 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
5089 cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
5090 struct rq *rq = this_rq(); 5129 struct rq *rq = this_rq();
5091 5130
5092 if (user_tick) 5131 if (user_tick)
5093 account_user_time(p, one_jiffy, one_jiffy_scaled); 5132 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
5094 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 5133 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
5095 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 5134 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
5096 one_jiffy_scaled); 5135 one_jiffy_scaled);
5097 else 5136 else
5098 account_idle_time(one_jiffy); 5137 account_idle_time(cputime_one_jiffy);
5099} 5138}
5100 5139
5101/* 5140/*
@@ -5199,7 +5238,7 @@ void scheduler_tick(void)
5199 curr->sched_class->task_tick(rq, curr, 0); 5238 curr->sched_class->task_tick(rq, curr, 0);
5200 spin_unlock(&rq->lock); 5239 spin_unlock(&rq->lock);
5201 5240
5202 perf_counter_task_tick(curr, cpu); 5241 perf_event_task_tick(curr, cpu);
5203 5242
5204#ifdef CONFIG_SMP 5243#ifdef CONFIG_SMP
5205 rq->idle_at_tick = idle_cpu(cpu); 5244 rq->idle_at_tick = idle_cpu(cpu);
@@ -5415,7 +5454,7 @@ need_resched_nonpreemptible:
5415 5454
5416 if (likely(prev != next)) { 5455 if (likely(prev != next)) {
5417 sched_info_switch(prev, next); 5456 sched_info_switch(prev, next);
5418 perf_counter_task_sched_out(prev, next, cpu); 5457 perf_event_task_sched_out(prev, next, cpu);
5419 5458
5420 rq->nr_switches++; 5459 rq->nr_switches++;
5421 rq->curr = next; 5460 rq->curr = next;
@@ -6714,9 +6753,6 @@ EXPORT_SYMBOL(yield);
6714/* 6753/*
6715 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 6754 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
6716 * that process accounting knows that this is a task in IO wait state. 6755 * that process accounting knows that this is a task in IO wait state.
6717 *
6718 * But don't do that if it is a deliberate, throttling IO wait (this task
6719 * has set its backing_dev_info: the queue against which it should throttle)
6720 */ 6756 */
6721void __sched io_schedule(void) 6757void __sched io_schedule(void)
6722{ 6758{
@@ -6825,23 +6861,8 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
6825 if (retval) 6861 if (retval)
6826 goto out_unlock; 6862 goto out_unlock;
6827 6863
6828 /* 6864 time_slice = p->sched_class->get_rr_interval(p);
6829 * Time slice is 0 for SCHED_FIFO tasks and for SCHED_OTHER
6830 * tasks that are on an otherwise idle runqueue:
6831 */
6832 time_slice = 0;
6833 if (p->policy == SCHED_RR) {
6834 time_slice = DEF_TIMESLICE;
6835 } else if (p->policy != SCHED_FIFO) {
6836 struct sched_entity *se = &p->se;
6837 unsigned long flags;
6838 struct rq *rq;
6839 6865
6840 rq = task_rq_lock(p, &flags);
6841 if (rq->cfs.load.weight)
6842 time_slice = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
6843 task_rq_unlock(rq, &flags);
6844 }
6845 read_unlock(&tasklist_lock); 6866 read_unlock(&tasklist_lock);
6846 jiffies_to_timespec(time_slice, &t); 6867 jiffies_to_timespec(time_slice, &t);
6847 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; 6868 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
@@ -7692,7 +7713,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7692/* 7713/*
7693 * Register at high priority so that task migration (migrate_all_tasks) 7714 * Register at high priority so that task migration (migrate_all_tasks)
7694 * happens before everything else. This has to be lower priority than 7715 * happens before everything else. This has to be lower priority than
7695 * the notifier in the perf_counter subsystem, though. 7716 * the notifier in the perf_event subsystem, though.
7696 */ 7717 */
7697static struct notifier_block __cpuinitdata migration_notifier = { 7718static struct notifier_block __cpuinitdata migration_notifier = {
7698 .notifier_call = migration_call, 7719 .notifier_call = migration_call,
@@ -9171,6 +9192,7 @@ void __init sched_init_smp(void)
9171 cpumask_var_t non_isolated_cpus; 9192 cpumask_var_t non_isolated_cpus;
9172 9193
9173 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 9194 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
9195 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
9174 9196
9175#if defined(CONFIG_NUMA) 9197#if defined(CONFIG_NUMA)
9176 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **), 9198 sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
@@ -9202,7 +9224,6 @@ void __init sched_init_smp(void)
9202 sched_init_granularity(); 9224 sched_init_granularity();
9203 free_cpumask_var(non_isolated_cpus); 9225 free_cpumask_var(non_isolated_cpus);
9204 9226
9205 alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
9206 init_sched_rt_class(); 9227 init_sched_rt_class();
9207} 9228}
9208#else 9229#else
@@ -9415,6 +9436,10 @@ void __init sched_init(void)
9415#endif /* CONFIG_USER_SCHED */ 9436#endif /* CONFIG_USER_SCHED */
9416#endif /* CONFIG_GROUP_SCHED */ 9437#endif /* CONFIG_GROUP_SCHED */
9417 9438
9439#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
9440 update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
9441 __alignof__(unsigned long));
9442#endif
9418 for_each_possible_cpu(i) { 9443 for_each_possible_cpu(i) {
9419 struct rq *rq; 9444 struct rq *rq;
9420 9445
@@ -9540,16 +9565,16 @@ void __init sched_init(void)
9540 current->sched_class = &fair_sched_class; 9565 current->sched_class = &fair_sched_class;
9541 9566
9542 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9567 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9543 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9568 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9544#ifdef CONFIG_SMP 9569#ifdef CONFIG_SMP
9545#ifdef CONFIG_NO_HZ 9570#ifdef CONFIG_NO_HZ
9546 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9571 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9547 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9572 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9548#endif 9573#endif
9549 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9574 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9550#endif /* SMP */ 9575#endif /* SMP */
9551 9576
9552 perf_counter_init(); 9577 perf_event_init();
9553 9578
9554 scheduler_running = 1; 9579 scheduler_running = 1;
9555} 9580}
@@ -10321,7 +10346,7 @@ static int sched_rt_global_constraints(void)
10321#endif /* CONFIG_RT_GROUP_SCHED */ 10346#endif /* CONFIG_RT_GROUP_SCHED */
10322 10347
10323int sched_rt_handler(struct ctl_table *table, int write, 10348int sched_rt_handler(struct ctl_table *table, int write,
10324 struct file *filp, void __user *buffer, size_t *lenp, 10349 void __user *buffer, size_t *lenp,
10325 loff_t *ppos) 10350 loff_t *ppos)
10326{ 10351{
10327 int ret; 10352 int ret;
@@ -10332,7 +10357,7 @@ int sched_rt_handler(struct ctl_table *table, int write,
10332 old_period = sysctl_sched_rt_period; 10357 old_period = sysctl_sched_rt_period;
10333 old_runtime = sysctl_sched_rt_runtime; 10358 old_runtime = sysctl_sched_rt_runtime;
10334 10359
10335 ret = proc_dointvec(table, write, filp, buffer, lenp, ppos); 10360 ret = proc_dointvec(table, write, buffer, lenp, ppos);
10336 10361
10337 if (!ret && write) { 10362 if (!ret && write) {
10338 ret = sched_rt_global_constraints(); 10363 ret = sched_rt_global_constraints();
@@ -10386,8 +10411,7 @@ cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
10386} 10411}
10387 10412
10388static int 10413static int
10389cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 10414cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
10390 struct task_struct *tsk)
10391{ 10415{
10392#ifdef CONFIG_RT_GROUP_SCHED 10416#ifdef CONFIG_RT_GROUP_SCHED
10393 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk)) 10417 if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
@@ -10397,15 +10421,45 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10397 if (tsk->sched_class != &fair_sched_class) 10421 if (tsk->sched_class != &fair_sched_class)
10398 return -EINVAL; 10422 return -EINVAL;
10399#endif 10423#endif
10424 return 0;
10425}
10400 10426
10427static int
10428cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10429 struct task_struct *tsk, bool threadgroup)
10430{
10431 int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
10432 if (retval)
10433 return retval;
10434 if (threadgroup) {
10435 struct task_struct *c;
10436 rcu_read_lock();
10437 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10438 retval = cpu_cgroup_can_attach_task(cgrp, c);
10439 if (retval) {
10440 rcu_read_unlock();
10441 return retval;
10442 }
10443 }
10444 rcu_read_unlock();
10445 }
10401 return 0; 10446 return 0;
10402} 10447}
10403 10448
10404static void 10449static void
10405cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, 10450cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
10406 struct cgroup *old_cont, struct task_struct *tsk) 10451 struct cgroup *old_cont, struct task_struct *tsk,
10452 bool threadgroup)
10407{ 10453{
10408 sched_move_task(tsk); 10454 sched_move_task(tsk);
10455 if (threadgroup) {
10456 struct task_struct *c;
10457 rcu_read_lock();
10458 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
10459 sched_move_task(c);
10460 }
10461 rcu_read_unlock();
10462 }
10409} 10463}
10410 10464
10411#ifdef CONFIG_FAIR_GROUP_SCHED 10465#ifdef CONFIG_FAIR_GROUP_SCHED