diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 67 |
1 files changed, 37 insertions, 30 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 0236958addcb..d601fb0406ca 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -600,7 +600,6 @@ struct rq { | |||
600 | /* BKL stats */ | 600 | /* BKL stats */ |
601 | unsigned int bkl_count; | 601 | unsigned int bkl_count; |
602 | #endif | 602 | #endif |
603 | struct lock_class_key rq_lock_key; | ||
604 | }; | 603 | }; |
605 | 604 | ||
606 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 605 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
@@ -834,7 +833,7 @@ static inline u64 global_rt_period(void) | |||
834 | 833 | ||
835 | static inline u64 global_rt_runtime(void) | 834 | static inline u64 global_rt_runtime(void) |
836 | { | 835 | { |
837 | if (sysctl_sched_rt_period < 0) | 836 | if (sysctl_sched_rt_runtime < 0) |
838 | return RUNTIME_INF; | 837 | return RUNTIME_INF; |
839 | 838 | ||
840 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | 839 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
@@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
2759 | } else { | 2758 | } else { |
2760 | if (rq1 < rq2) { | 2759 | if (rq1 < rq2) { |
2761 | spin_lock(&rq1->lock); | 2760 | spin_lock(&rq1->lock); |
2762 | spin_lock(&rq2->lock); | 2761 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
2763 | } else { | 2762 | } else { |
2764 | spin_lock(&rq2->lock); | 2763 | spin_lock(&rq2->lock); |
2765 | spin_lock(&rq1->lock); | 2764 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
2766 | } | 2765 | } |
2767 | } | 2766 | } |
2768 | update_rq_clock(rq1); | 2767 | update_rq_clock(rq1); |
@@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
2805 | if (busiest < this_rq) { | 2804 | if (busiest < this_rq) { |
2806 | spin_unlock(&this_rq->lock); | 2805 | spin_unlock(&this_rq->lock); |
2807 | spin_lock(&busiest->lock); | 2806 | spin_lock(&busiest->lock); |
2808 | spin_lock(&this_rq->lock); | 2807 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); |
2809 | ret = 1; | 2808 | ret = 1; |
2810 | } else | 2809 | } else |
2811 | spin_lock(&busiest->lock); | 2810 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); |
2812 | } | 2811 | } |
2813 | return ret; | 2812 | return ret; |
2814 | } | 2813 | } |
2815 | 2814 | ||
2815 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2816 | __releases(busiest->lock) | ||
2817 | { | ||
2818 | spin_unlock(&busiest->lock); | ||
2819 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2820 | } | ||
2821 | |||
2816 | /* | 2822 | /* |
2817 | * If dest_cpu is allowed for this process, migrate the task to it. | 2823 | * If dest_cpu is allowed for this process, migrate the task to it. |
2818 | * This is accomplished by forcing the cpu_allowed mask to only | 2824 | * This is accomplished by forcing the cpu_allowed mask to only |
@@ -3637,7 +3643,7 @@ redo: | |||
3637 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3643 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
3638 | imbalance, sd, CPU_NEWLY_IDLE, | 3644 | imbalance, sd, CPU_NEWLY_IDLE, |
3639 | &all_pinned); | 3645 | &all_pinned); |
3640 | spin_unlock(&busiest->lock); | 3646 | double_unlock_balance(this_rq, busiest); |
3641 | 3647 | ||
3642 | if (unlikely(all_pinned)) { | 3648 | if (unlikely(all_pinned)) { |
3643 | cpu_clear(cpu_of(busiest), *cpus); | 3649 | cpu_clear(cpu_of(busiest), *cpus); |
@@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3752 | else | 3758 | else |
3753 | schedstat_inc(sd, alb_failed); | 3759 | schedstat_inc(sd, alb_failed); |
3754 | } | 3760 | } |
3755 | spin_unlock(&target_rq->lock); | 3761 | double_unlock_balance(busiest_rq, target_rq); |
3756 | } | 3762 | } |
3757 | 3763 | ||
3758 | #ifdef CONFIG_NO_HZ | 3764 | #ifdef CONFIG_NO_HZ |
@@ -5004,19 +5010,21 @@ recheck: | |||
5004 | return -EPERM; | 5010 | return -EPERM; |
5005 | } | 5011 | } |
5006 | 5012 | ||
5013 | if (user) { | ||
5007 | #ifdef CONFIG_RT_GROUP_SCHED | 5014 | #ifdef CONFIG_RT_GROUP_SCHED |
5008 | /* | 5015 | /* |
5009 | * Do not allow realtime tasks into groups that have no runtime | 5016 | * Do not allow realtime tasks into groups that have no runtime |
5010 | * assigned. | 5017 | * assigned. |
5011 | */ | 5018 | */ |
5012 | if (user | 5019 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) |
5013 | && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | 5020 | return -EPERM; |
5014 | return -EPERM; | ||
5015 | #endif | 5021 | #endif |
5016 | 5022 | ||
5017 | retval = security_task_setscheduler(p, policy, param); | 5023 | retval = security_task_setscheduler(p, policy, param); |
5018 | if (retval) | 5024 | if (retval) |
5019 | return retval; | 5025 | return retval; |
5026 | } | ||
5027 | |||
5020 | /* | 5028 | /* |
5021 | * make sure no PI-waiters arrive (or leave) while we are | 5029 | * make sure no PI-waiters arrive (or leave) while we are |
5022 | * changing the priority of the task: | 5030 | * changing the priority of the task: |
@@ -7671,34 +7679,34 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
7671 | } | 7679 | } |
7672 | 7680 | ||
7673 | #ifdef CONFIG_SCHED_MC | 7681 | #ifdef CONFIG_SCHED_MC |
7674 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, | 7682 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
7675 | struct sysdev_attribute *attr, char *page) | 7683 | char *page) |
7676 | { | 7684 | { |
7677 | return sprintf(page, "%u\n", sched_mc_power_savings); | 7685 | return sprintf(page, "%u\n", sched_mc_power_savings); |
7678 | } | 7686 | } |
7679 | static ssize_t sched_mc_power_savings_store(struct sys_device *dev, | 7687 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
7680 | struct sysdev_attribute *attr, | ||
7681 | const char *buf, size_t count) | 7688 | const char *buf, size_t count) |
7682 | { | 7689 | { |
7683 | return sched_power_savings_store(buf, count, 0); | 7690 | return sched_power_savings_store(buf, count, 0); |
7684 | } | 7691 | } |
7685 | static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, | 7692 | static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, |
7686 | sched_mc_power_savings_store); | 7693 | sched_mc_power_savings_show, |
7694 | sched_mc_power_savings_store); | ||
7687 | #endif | 7695 | #endif |
7688 | 7696 | ||
7689 | #ifdef CONFIG_SCHED_SMT | 7697 | #ifdef CONFIG_SCHED_SMT |
7690 | static ssize_t sched_smt_power_savings_show(struct sys_device *dev, | 7698 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
7691 | struct sysdev_attribute *attr, char *page) | 7699 | char *page) |
7692 | { | 7700 | { |
7693 | return sprintf(page, "%u\n", sched_smt_power_savings); | 7701 | return sprintf(page, "%u\n", sched_smt_power_savings); |
7694 | } | 7702 | } |
7695 | static ssize_t sched_smt_power_savings_store(struct sys_device *dev, | 7703 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
7696 | struct sysdev_attribute *attr, | ||
7697 | const char *buf, size_t count) | 7704 | const char *buf, size_t count) |
7698 | { | 7705 | { |
7699 | return sched_power_savings_store(buf, count, 1); | 7706 | return sched_power_savings_store(buf, count, 1); |
7700 | } | 7707 | } |
7701 | static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, | 7708 | static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, |
7709 | sched_smt_power_savings_show, | ||
7702 | sched_smt_power_savings_store); | 7710 | sched_smt_power_savings_store); |
7703 | #endif | 7711 | #endif |
7704 | 7712 | ||
@@ -7998,7 +8006,6 @@ void __init sched_init(void) | |||
7998 | 8006 | ||
7999 | rq = cpu_rq(i); | 8007 | rq = cpu_rq(i); |
8000 | spin_lock_init(&rq->lock); | 8008 | spin_lock_init(&rq->lock); |
8001 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); | ||
8002 | rq->nr_running = 0; | 8009 | rq->nr_running = 0; |
8003 | init_cfs_rq(&rq->cfs, rq); | 8010 | init_cfs_rq(&rq->cfs, rq); |
8004 | init_rt_rq(&rq->rt, rq); | 8011 | init_rt_rq(&rq->rt, rq); |