aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c342
1 files changed, 275 insertions, 67 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 18d38e4ec7ba..58d66ea7d200 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -324,7 +324,7 @@ struct cfs_rq {
324 * 'curr' points to currently running entity on this cfs_rq. 324 * 'curr' points to currently running entity on this cfs_rq.
325 * It is set to NULL otherwise (i.e when none are currently running). 325 * It is set to NULL otherwise (i.e when none are currently running).
326 */ 326 */
327 struct sched_entity *curr, *next, *last; 327 struct sched_entity *curr, *next, *last, *skip;
328 328
329 unsigned int nr_spread_over; 329 unsigned int nr_spread_over;
330 330
@@ -606,9 +606,6 @@ static inline struct task_group *task_group(struct task_struct *p)
606 struct task_group *tg; 606 struct task_group *tg;
607 struct cgroup_subsys_state *css; 607 struct cgroup_subsys_state *css;
608 608
609 if (p->flags & PF_EXITING)
610 return &root_task_group;
611
612 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 609 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
613 lockdep_is_held(&task_rq(p)->lock)); 610 lockdep_is_held(&task_rq(p)->lock));
614 tg = container_of(css, struct task_group, css); 611 tg = container_of(css, struct task_group, css);
@@ -664,10 +661,9 @@ static void update_rq_clock(struct rq *rq)
664#endif 661#endif
665 662
666/** 663/**
667 * runqueue_is_locked 664 * runqueue_is_locked - Returns true if the current cpu runqueue is locked
668 * @cpu: the processor in question. 665 * @cpu: the processor in question.
669 * 666 *
670 * Returns true if the current cpu runqueue is locked.
671 * This interface allows printk to be called with the runqueue lock 667 * This interface allows printk to be called with the runqueue lock
672 * held and know whether or not it is OK to wake up the klogd. 668 * held and know whether or not it is OK to wake up the klogd.
673 */ 669 */
@@ -1686,6 +1682,39 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1686 __release(rq2->lock); 1682 __release(rq2->lock);
1687} 1683}
1688 1684
1685#else /* CONFIG_SMP */
1686
1687/*
1688 * double_rq_lock - safely lock two runqueues
1689 *
1690 * Note this does not disable interrupts like task_rq_lock,
1691 * you need to do so manually before calling.
1692 */
1693static void double_rq_lock(struct rq *rq1, struct rq *rq2)
1694 __acquires(rq1->lock)
1695 __acquires(rq2->lock)
1696{
1697 BUG_ON(!irqs_disabled());
1698 BUG_ON(rq1 != rq2);
1699 raw_spin_lock(&rq1->lock);
1700 __acquire(rq2->lock); /* Fake it out ;) */
1701}
1702
1703/*
1704 * double_rq_unlock - safely unlock two runqueues
1705 *
1706 * Note this does not restore interrupts like task_rq_unlock,
1707 * you need to do so manually after calling.
1708 */
1709static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
1710 __releases(rq1->lock)
1711 __releases(rq2->lock)
1712{
1713 BUG_ON(rq1 != rq2);
1714 raw_spin_unlock(&rq1->lock);
1715 __release(rq2->lock);
1716}
1717
1689#endif 1718#endif
1690 1719
1691static void calc_load_account_idle(struct rq *this_rq); 1720static void calc_load_account_idle(struct rq *this_rq);
@@ -1880,7 +1909,7 @@ void account_system_vtime(struct task_struct *curr)
1880 */ 1909 */
1881 if (hardirq_count()) 1910 if (hardirq_count())
1882 __this_cpu_add(cpu_hardirq_time, delta); 1911 __this_cpu_add(cpu_hardirq_time, delta);
1883 else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD)) 1912 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
1884 __this_cpu_add(cpu_softirq_time, delta); 1913 __this_cpu_add(cpu_softirq_time, delta);
1885 1914
1886 irq_time_write_end(); 1915 irq_time_write_end();
@@ -1920,8 +1949,40 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
1920 sched_rt_avg_update(rq, irq_delta); 1949 sched_rt_avg_update(rq, irq_delta);
1921} 1950}
1922 1951
1952static int irqtime_account_hi_update(void)
1953{
1954 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1955 unsigned long flags;
1956 u64 latest_ns;
1957 int ret = 0;
1958
1959 local_irq_save(flags);
1960 latest_ns = this_cpu_read(cpu_hardirq_time);
1961 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
1962 ret = 1;
1963 local_irq_restore(flags);
1964 return ret;
1965}
1966
1967static int irqtime_account_si_update(void)
1968{
1969 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1970 unsigned long flags;
1971 u64 latest_ns;
1972 int ret = 0;
1973
1974 local_irq_save(flags);
1975 latest_ns = this_cpu_read(cpu_softirq_time);
1976 if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
1977 ret = 1;
1978 local_irq_restore(flags);
1979 return ret;
1980}
1981
1923#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 1982#else /* CONFIG_IRQ_TIME_ACCOUNTING */
1924 1983
1984#define sched_clock_irqtime (0)
1985
1925static void update_rq_clock_task(struct rq *rq, s64 delta) 1986static void update_rq_clock_task(struct rq *rq, s64 delta)
1926{ 1987{
1927 rq->clock_task += delta; 1988 rq->clock_task += delta;
@@ -2025,14 +2086,14 @@ inline int task_curr(const struct task_struct *p)
2025 2086
2026static inline void check_class_changed(struct rq *rq, struct task_struct *p, 2087static inline void check_class_changed(struct rq *rq, struct task_struct *p,
2027 const struct sched_class *prev_class, 2088 const struct sched_class *prev_class,
2028 int oldprio, int running) 2089 int oldprio)
2029{ 2090{
2030 if (prev_class != p->sched_class) { 2091 if (prev_class != p->sched_class) {
2031 if (prev_class->switched_from) 2092 if (prev_class->switched_from)
2032 prev_class->switched_from(rq, p, running); 2093 prev_class->switched_from(rq, p);
2033 p->sched_class->switched_to(rq, p, running); 2094 p->sched_class->switched_to(rq, p);
2034 } else 2095 } else if (oldprio != p->prio)
2035 p->sched_class->prio_changed(rq, p, oldprio, running); 2096 p->sched_class->prio_changed(rq, p, oldprio);
2036} 2097}
2037 2098
2038static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) 2099static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
@@ -2224,7 +2285,10 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2224 * yield - it could be a while. 2285 * yield - it could be a while.
2225 */ 2286 */
2226 if (unlikely(on_rq)) { 2287 if (unlikely(on_rq)) {
2227 schedule_timeout_uninterruptible(1); 2288 ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
2289
2290 set_current_state(TASK_UNINTERRUPTIBLE);
2291 schedule_hrtimeout(&to, HRTIMER_MODE_REL);
2228 continue; 2292 continue;
2229 } 2293 }
2230 2294
@@ -2265,27 +2329,6 @@ void kick_process(struct task_struct *p)
2265EXPORT_SYMBOL_GPL(kick_process); 2329EXPORT_SYMBOL_GPL(kick_process);
2266#endif /* CONFIG_SMP */ 2330#endif /* CONFIG_SMP */
2267 2331
2268/**
2269 * task_oncpu_function_call - call a function on the cpu on which a task runs
2270 * @p: the task to evaluate
2271 * @func: the function to be called
2272 * @info: the function call argument
2273 *
2274 * Calls the function @func when the task is currently running. This might
2275 * be on the current CPU, which just calls the function directly
2276 */
2277void task_oncpu_function_call(struct task_struct *p,
2278 void (*func) (void *info), void *info)
2279{
2280 int cpu;
2281
2282 preempt_disable();
2283 cpu = task_cpu(p);
2284 if (task_curr(p))
2285 smp_call_function_single(cpu, func, info, 1);
2286 preempt_enable();
2287}
2288
2289#ifdef CONFIG_SMP 2332#ifdef CONFIG_SMP
2290/* 2333/*
2291 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held. 2334 * ->cpus_allowed is protected by either TASK_WAKING or rq->lock held.
@@ -2566,6 +2609,7 @@ static void __sched_fork(struct task_struct *p)
2566 p->se.sum_exec_runtime = 0; 2609 p->se.sum_exec_runtime = 0;
2567 p->se.prev_sum_exec_runtime = 0; 2610 p->se.prev_sum_exec_runtime = 0;
2568 p->se.nr_migrations = 0; 2611 p->se.nr_migrations = 0;
2612 p->se.vruntime = 0;
2569 2613
2570#ifdef CONFIG_SCHEDSTATS 2614#ifdef CONFIG_SCHEDSTATS
2571 memset(&p->se.statistics, 0, sizeof(p->se.statistics)); 2615 memset(&p->se.statistics, 0, sizeof(p->se.statistics));
@@ -2776,9 +2820,12 @@ static inline void
2776prepare_task_switch(struct rq *rq, struct task_struct *prev, 2820prepare_task_switch(struct rq *rq, struct task_struct *prev,
2777 struct task_struct *next) 2821 struct task_struct *next)
2778{ 2822{
2823 sched_info_switch(prev, next);
2824 perf_event_task_sched_out(prev, next);
2779 fire_sched_out_preempt_notifiers(prev, next); 2825 fire_sched_out_preempt_notifiers(prev, next);
2780 prepare_lock_switch(rq, next); 2826 prepare_lock_switch(rq, next);
2781 prepare_arch_switch(next); 2827 prepare_arch_switch(next);
2828 trace_sched_switch(prev, next);
2782} 2829}
2783 2830
2784/** 2831/**
@@ -2911,7 +2958,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2911 struct mm_struct *mm, *oldmm; 2958 struct mm_struct *mm, *oldmm;
2912 2959
2913 prepare_task_switch(rq, prev, next); 2960 prepare_task_switch(rq, prev, next);
2914 trace_sched_switch(prev, next); 2961
2915 mm = next->mm; 2962 mm = next->mm;
2916 oldmm = prev->active_mm; 2963 oldmm = prev->active_mm;
2917 /* 2964 /*
@@ -3568,6 +3615,32 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
3568} 3615}
3569 3616
3570/* 3617/*
3618 * Account system cpu time to a process and desired cpustat field
3619 * @p: the process that the cpu time gets accounted to
3620 * @cputime: the cpu time spent in kernel space since the last update
3621 * @cputime_scaled: cputime scaled by cpu frequency
3622 * @target_cputime64: pointer to cpustat field that has to be updated
3623 */
3624static inline
3625void __account_system_time(struct task_struct *p, cputime_t cputime,
3626 cputime_t cputime_scaled, cputime64_t *target_cputime64)
3627{
3628 cputime64_t tmp = cputime_to_cputime64(cputime);
3629
3630 /* Add system time to process. */
3631 p->stime = cputime_add(p->stime, cputime);
3632 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3633 account_group_system_time(p, cputime);
3634
3635 /* Add system time to cpustat. */
3636 *target_cputime64 = cputime64_add(*target_cputime64, tmp);
3637 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
3638
3639 /* Account for system time used */
3640 acct_update_integrals(p);
3641}
3642
3643/*
3571 * Account system cpu time to a process. 3644 * Account system cpu time to a process.
3572 * @p: the process that the cpu time gets accounted to 3645 * @p: the process that the cpu time gets accounted to
3573 * @hardirq_offset: the offset to subtract from hardirq_count() 3646 * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -3578,36 +3651,26 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3578 cputime_t cputime, cputime_t cputime_scaled) 3651 cputime_t cputime, cputime_t cputime_scaled)
3579{ 3652{
3580 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3653 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3581 cputime64_t tmp; 3654 cputime64_t *target_cputime64;
3582 3655
3583 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { 3656 if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
3584 account_guest_time(p, cputime, cputime_scaled); 3657 account_guest_time(p, cputime, cputime_scaled);
3585 return; 3658 return;
3586 } 3659 }
3587 3660
3588 /* Add system time to process. */
3589 p->stime = cputime_add(p->stime, cputime);
3590 p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
3591 account_group_system_time(p, cputime);
3592
3593 /* Add system time to cpustat. */
3594 tmp = cputime_to_cputime64(cputime);
3595 if (hardirq_count() - hardirq_offset) 3661 if (hardirq_count() - hardirq_offset)
3596 cpustat->irq = cputime64_add(cpustat->irq, tmp); 3662 target_cputime64 = &cpustat->irq;
3597 else if (in_serving_softirq()) 3663 else if (in_serving_softirq())
3598 cpustat->softirq = cputime64_add(cpustat->softirq, tmp); 3664 target_cputime64 = &cpustat->softirq;
3599 else 3665 else
3600 cpustat->system = cputime64_add(cpustat->system, tmp); 3666 target_cputime64 = &cpustat->system;
3601 3667
3602 cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime); 3668 __account_system_time(p, cputime, cputime_scaled, target_cputime64);
3603
3604 /* Account for system time used */
3605 acct_update_integrals(p);
3606} 3669}
3607 3670
3608/* 3671/*
3609 * Account for involuntary wait time. 3672 * Account for involuntary wait time.
3610 * @steal: the cpu time spent in involuntary wait 3673 * @cputime: the cpu time spent in involuntary wait
3611 */ 3674 */
3612void account_steal_time(cputime_t cputime) 3675void account_steal_time(cputime_t cputime)
3613{ 3676{
@@ -3635,6 +3698,73 @@ void account_idle_time(cputime_t cputime)
3635 3698
3636#ifndef CONFIG_VIRT_CPU_ACCOUNTING 3699#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3637 3700
3701#ifdef CONFIG_IRQ_TIME_ACCOUNTING
3702/*
3703 * Account a tick to a process and cpustat
3704 * @p: the process that the cpu time gets accounted to
3705 * @user_tick: is the tick from userspace
3706 * @rq: the pointer to rq
3707 *
3708 * Tick demultiplexing follows the order
3709 * - pending hardirq update
3710 * - pending softirq update
3711 * - user_time
3712 * - idle_time
3713 * - system time
3714 * - check for guest_time
3715 * - else account as system_time
3716 *
3717 * Check for hardirq is done both for system and user time as there is
3718 * no timer going off while we are on hardirq and hence we may never get an
3719 * opportunity to update it solely in system time.
3720 * p->stime and friends are only updated on system time and not on irq
3721 * softirq as those do not count in task exec_runtime any more.
3722 */
3723static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3724 struct rq *rq)
3725{
3726 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3727 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3728 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3729
3730 if (irqtime_account_hi_update()) {
3731 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3732 } else if (irqtime_account_si_update()) {
3733 cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
3734 } else if (this_cpu_ksoftirqd() == p) {
3735 /*
3736 * ksoftirqd time do not get accounted in cpu_softirq_time.
3737 * So, we have to handle it separately here.
3738 * Also, p->stime needs to be updated for ksoftirqd.
3739 */
3740 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3741 &cpustat->softirq);
3742 } else if (user_tick) {
3743 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3744 } else if (p == rq->idle) {
3745 account_idle_time(cputime_one_jiffy);
3746 } else if (p->flags & PF_VCPU) { /* System time or guest time */
3747 account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
3748 } else {
3749 __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
3750 &cpustat->system);
3751 }
3752}
3753
3754static void irqtime_account_idle_ticks(int ticks)
3755{
3756 int i;
3757 struct rq *rq = this_rq();
3758
3759 for (i = 0; i < ticks; i++)
3760 irqtime_account_process_tick(current, 0, rq);
3761}
3762#else /* CONFIG_IRQ_TIME_ACCOUNTING */
3763static void irqtime_account_idle_ticks(int ticks) {}
3764static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3765 struct rq *rq) {}
3766#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
3767
3638/* 3768/*
3639 * Account a single tick of cpu time. 3769 * Account a single tick of cpu time.
3640 * @p: the process that the cpu time gets accounted to 3770 * @p: the process that the cpu time gets accounted to
@@ -3645,6 +3775,11 @@ void account_process_tick(struct task_struct *p, int user_tick)
3645 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 3775 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
3646 struct rq *rq = this_rq(); 3776 struct rq *rq = this_rq();
3647 3777
3778 if (sched_clock_irqtime) {
3779 irqtime_account_process_tick(p, user_tick, rq);
3780 return;
3781 }
3782
3648 if (user_tick) 3783 if (user_tick)
3649 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 3784 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3650 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 3785 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
@@ -3670,6 +3805,12 @@ void account_steal_ticks(unsigned long ticks)
3670 */ 3805 */
3671void account_idle_ticks(unsigned long ticks) 3806void account_idle_ticks(unsigned long ticks)
3672{ 3807{
3808
3809 if (sched_clock_irqtime) {
3810 irqtime_account_idle_ticks(ticks);
3811 return;
3812 }
3813
3673 account_idle_time(jiffies_to_cputime(ticks)); 3814 account_idle_time(jiffies_to_cputime(ticks));
3674} 3815}
3675 3816
@@ -3989,9 +4130,6 @@ need_resched_nonpreemptible:
3989 rq->skip_clock_update = 0; 4130 rq->skip_clock_update = 0;
3990 4131
3991 if (likely(prev != next)) { 4132 if (likely(prev != next)) {
3992 sched_info_switch(prev, next);
3993 perf_event_task_sched_out(prev, next);
3994
3995 rq->nr_switches++; 4133 rq->nr_switches++;
3996 rq->curr = next; 4134 rq->curr = next;
3997 ++*switch_count; 4135 ++*switch_count;
@@ -4213,6 +4351,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4213{ 4351{
4214 __wake_up_common(q, mode, 1, 0, key); 4352 __wake_up_common(q, mode, 1, 0, key);
4215} 4353}
4354EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4216 4355
4217/** 4356/**
4218 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 4357 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
@@ -4570,11 +4709,10 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
4570 4709
4571 if (running) 4710 if (running)
4572 p->sched_class->set_curr_task(rq); 4711 p->sched_class->set_curr_task(rq);
4573 if (on_rq) { 4712 if (on_rq)
4574 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0); 4713 enqueue_task(rq, p, oldprio < prio ? ENQUEUE_HEAD : 0);
4575 4714
4576 check_class_changed(rq, p, prev_class, oldprio, running); 4715 check_class_changed(rq, p, prev_class, oldprio);
4577 }
4578 task_rq_unlock(rq, &flags); 4716 task_rq_unlock(rq, &flags);
4579} 4717}
4580 4718
@@ -4822,12 +4960,15 @@ recheck:
4822 param->sched_priority > rlim_rtprio) 4960 param->sched_priority > rlim_rtprio)
4823 return -EPERM; 4961 return -EPERM;
4824 } 4962 }
4963
4825 /* 4964 /*
4826 * Like positive nice levels, dont allow tasks to 4965 * Treat SCHED_IDLE as nice 20. Only allow a switch to
4827 * move out of SCHED_IDLE either: 4966 * SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
4828 */ 4967 */
4829 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) 4968 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) {
4830 return -EPERM; 4969 if (!can_nice(p, TASK_NICE(p)))
4970 return -EPERM;
4971 }
4831 4972
4832 /* can't change other user's priorities */ 4973 /* can't change other user's priorities */
4833 if (!check_same_owner(p)) 4974 if (!check_same_owner(p))
@@ -4902,11 +5043,10 @@ recheck:
4902 5043
4903 if (running) 5044 if (running)
4904 p->sched_class->set_curr_task(rq); 5045 p->sched_class->set_curr_task(rq);
4905 if (on_rq) { 5046 if (on_rq)
4906 activate_task(rq, p, 0); 5047 activate_task(rq, p, 0);
4907 5048
4908 check_class_changed(rq, p, prev_class, oldprio, running); 5049 check_class_changed(rq, p, prev_class, oldprio);
4909 }
4910 __task_rq_unlock(rq); 5050 __task_rq_unlock(rq);
4911 raw_spin_unlock_irqrestore(&p->pi_lock, flags); 5051 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
4912 5052
@@ -5323,6 +5463,65 @@ void __sched yield(void)
5323} 5463}
5324EXPORT_SYMBOL(yield); 5464EXPORT_SYMBOL(yield);
5325 5465
5466/**
5467 * yield_to - yield the current processor to another thread in
5468 * your thread group, or accelerate that thread toward the
5469 * processor it's on.
5470 *
5471 * It's the caller's job to ensure that the target task struct
5472 * can't go away on us before we can do any checks.
5473 *
5474 * Returns true if we indeed boosted the target task.
5475 */
5476bool __sched yield_to(struct task_struct *p, bool preempt)
5477{
5478 struct task_struct *curr = current;
5479 struct rq *rq, *p_rq;
5480 unsigned long flags;
5481 bool yielded = 0;
5482
5483 local_irq_save(flags);
5484 rq = this_rq();
5485
5486again:
5487 p_rq = task_rq(p);
5488 double_rq_lock(rq, p_rq);
5489 while (task_rq(p) != p_rq) {
5490 double_rq_unlock(rq, p_rq);
5491 goto again;
5492 }
5493
5494 if (!curr->sched_class->yield_to_task)
5495 goto out;
5496
5497 if (curr->sched_class != p->sched_class)
5498 goto out;
5499
5500 if (task_running(p_rq, p) || p->state)
5501 goto out;
5502
5503 yielded = curr->sched_class->yield_to_task(rq, p, preempt);
5504 if (yielded) {
5505 schedstat_inc(rq, yld_count);
5506 /*
5507 * Make p's CPU reschedule; pick_next_entity takes care of
5508 * fairness.
5509 */
5510 if (preempt && rq != p_rq)
5511 resched_task(p_rq->curr);
5512 }
5513
5514out:
5515 double_rq_unlock(rq, p_rq);
5516 local_irq_restore(flags);
5517
5518 if (yielded)
5519 schedule();
5520
5521 return yielded;
5522}
5523EXPORT_SYMBOL_GPL(yield_to);
5524
5326/* 5525/*
5327 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 5526 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
5328 * that process accounting knows that this is a task in IO wait state. 5527 * that process accounting knows that this is a task in IO wait state.
@@ -5571,7 +5770,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5571 * The idle tasks have their own, simple scheduling class: 5770 * The idle tasks have their own, simple scheduling class:
5572 */ 5771 */
5573 idle->sched_class = &idle_sched_class; 5772 idle->sched_class = &idle_sched_class;
5574 ftrace_graph_init_task(idle); 5773 ftrace_graph_init_idle_task(idle, cpu);
5575} 5774}
5576 5775
5577/* 5776/*
@@ -7796,6 +7995,10 @@ static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
7796 INIT_LIST_HEAD(&cfs_rq->tasks); 7995 INIT_LIST_HEAD(&cfs_rq->tasks);
7797#ifdef CONFIG_FAIR_GROUP_SCHED 7996#ifdef CONFIG_FAIR_GROUP_SCHED
7798 cfs_rq->rq = rq; 7997 cfs_rq->rq = rq;
7998 /* allow initial update_cfs_load() to truncate */
7999#ifdef CONFIG_SMP
8000 cfs_rq->load_stamp = 1;
8001#endif
7799#endif 8002#endif
7800 cfs_rq->min_vruntime = (u64)(-(1LL << 20)); 8003 cfs_rq->min_vruntime = (u64)(-(1LL << 20));
7801} 8004}
@@ -8109,6 +8312,8 @@ EXPORT_SYMBOL(__might_sleep);
8109#ifdef CONFIG_MAGIC_SYSRQ 8312#ifdef CONFIG_MAGIC_SYSRQ
8110static void normalize_task(struct rq *rq, struct task_struct *p) 8313static void normalize_task(struct rq *rq, struct task_struct *p)
8111{ 8314{
8315 const struct sched_class *prev_class = p->sched_class;
8316 int old_prio = p->prio;
8112 int on_rq; 8317 int on_rq;
8113 8318
8114 on_rq = p->se.on_rq; 8319 on_rq = p->se.on_rq;
@@ -8119,6 +8324,8 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
8119 activate_task(rq, p, 0); 8324 activate_task(rq, p, 0);
8120 resched_task(rq->curr); 8325 resched_task(rq->curr);
8121 } 8326 }
8327
8328 check_class_changed(rq, p, prev_class, old_prio);
8122} 8329}
8123 8330
8124void normalize_rt_tasks(void) 8331void normalize_rt_tasks(void)
@@ -8510,7 +8717,7 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
8510 /* Propagate contribution to hierarchy */ 8717 /* Propagate contribution to hierarchy */
8511 raw_spin_lock_irqsave(&rq->lock, flags); 8718 raw_spin_lock_irqsave(&rq->lock, flags);
8512 for_each_sched_entity(se) 8719 for_each_sched_entity(se)
8513 update_cfs_shares(group_cfs_rq(se), 0); 8720 update_cfs_shares(group_cfs_rq(se));
8514 raw_spin_unlock_irqrestore(&rq->lock, flags); 8721 raw_spin_unlock_irqrestore(&rq->lock, flags);
8515 } 8722 }
8516 8723
@@ -8884,7 +9091,8 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
8884} 9091}
8885 9092
8886static void 9093static void
8887cpu_cgroup_exit(struct cgroup_subsys *ss, struct task_struct *task) 9094cpu_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
9095 struct cgroup *old_cgrp, struct task_struct *task)
8888{ 9096{
8889 /* 9097 /*
8890 * cgroup_exit() is called in the copy_process() failure path. 9098 * cgroup_exit() is called in the copy_process() failure path.