diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-22 07:08:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-22 07:08:57 -0400 |
commit | 0b88641f1bafdbd087d5e63987a30cc0eadd63b9 (patch) | |
tree | 81dcf756db373444140bb2623584710c628e3048 /kernel/sched.c | |
parent | fbdbf709938d155c719c76b9894d28342632c797 (diff) | |
parent | 72d31053f62c4bc464c2783974926969614a8649 (diff) |
Merge commit 'v2.6.27-rc7' into x86/debug
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 241 |
1 files changed, 199 insertions, 42 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 6acf749d3336..98890807375b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -600,7 +600,6 @@ struct rq { | |||
600 | /* BKL stats */ | 600 | /* BKL stats */ |
601 | unsigned int bkl_count; | 601 | unsigned int bkl_count; |
602 | #endif | 602 | #endif |
603 | struct lock_class_key rq_lock_key; | ||
604 | }; | 603 | }; |
605 | 604 | ||
606 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 605 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
@@ -809,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
809 | 808 | ||
810 | /* | 809 | /* |
811 | * ratelimit for updating the group shares. | 810 | * ratelimit for updating the group shares. |
812 | * default: 0.5ms | 811 | * default: 0.25ms |
813 | */ | 812 | */ |
814 | const_debug unsigned int sysctl_sched_shares_ratelimit = 500000; | 813 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
815 | 814 | ||
816 | /* | 815 | /* |
817 | * period over which we measure -rt task cpu usage in us. | 816 | * period over which we measure -rt task cpu usage in us. |
@@ -834,7 +833,7 @@ static inline u64 global_rt_period(void) | |||
834 | 833 | ||
835 | static inline u64 global_rt_runtime(void) | 834 | static inline u64 global_rt_runtime(void) |
836 | { | 835 | { |
837 | if (sysctl_sched_rt_period < 0) | 836 | if (sysctl_sched_rt_runtime < 0) |
838 | return RUNTIME_INF; | 837 | return RUNTIME_INF; |
839 | 838 | ||
840 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | 839 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
@@ -1867,16 +1866,24 @@ migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req) | |||
1867 | /* | 1866 | /* |
1868 | * wait_task_inactive - wait for a thread to unschedule. | 1867 | * wait_task_inactive - wait for a thread to unschedule. |
1869 | * | 1868 | * |
1869 | * If @match_state is nonzero, it's the @p->state value just checked and | ||
1870 | * not expected to change. If it changes, i.e. @p might have woken up, | ||
1871 | * then return zero. When we succeed in waiting for @p to be off its CPU, | ||
1872 | * we return a positive number (its total switch count). If a second call | ||
1873 | * a short while later returns the same number, the caller can be sure that | ||
1874 | * @p has remained unscheduled the whole time. | ||
1875 | * | ||
1870 | * The caller must ensure that the task *will* unschedule sometime soon, | 1876 | * The caller must ensure that the task *will* unschedule sometime soon, |
1871 | * else this function might spin for a *long* time. This function can't | 1877 | * else this function might spin for a *long* time. This function can't |
1872 | * be called with interrupts off, or it may introduce deadlock with | 1878 | * be called with interrupts off, or it may introduce deadlock with |
1873 | * smp_call_function() if an IPI is sent by the same process we are | 1879 | * smp_call_function() if an IPI is sent by the same process we are |
1874 | * waiting to become inactive. | 1880 | * waiting to become inactive. |
1875 | */ | 1881 | */ |
1876 | void wait_task_inactive(struct task_struct *p) | 1882 | unsigned long wait_task_inactive(struct task_struct *p, long match_state) |
1877 | { | 1883 | { |
1878 | unsigned long flags; | 1884 | unsigned long flags; |
1879 | int running, on_rq; | 1885 | int running, on_rq; |
1886 | unsigned long ncsw; | ||
1880 | struct rq *rq; | 1887 | struct rq *rq; |
1881 | 1888 | ||
1882 | for (;;) { | 1889 | for (;;) { |
@@ -1899,8 +1906,11 @@ void wait_task_inactive(struct task_struct *p) | |||
1899 | * return false if the runqueue has changed and p | 1906 | * return false if the runqueue has changed and p |
1900 | * is actually now running somewhere else! | 1907 | * is actually now running somewhere else! |
1901 | */ | 1908 | */ |
1902 | while (task_running(rq, p)) | 1909 | while (task_running(rq, p)) { |
1910 | if (match_state && unlikely(p->state != match_state)) | ||
1911 | return 0; | ||
1903 | cpu_relax(); | 1912 | cpu_relax(); |
1913 | } | ||
1904 | 1914 | ||
1905 | /* | 1915 | /* |
1906 | * Ok, time to look more closely! We need the rq | 1916 | * Ok, time to look more closely! We need the rq |
@@ -1910,9 +1920,21 @@ void wait_task_inactive(struct task_struct *p) | |||
1910 | rq = task_rq_lock(p, &flags); | 1920 | rq = task_rq_lock(p, &flags); |
1911 | running = task_running(rq, p); | 1921 | running = task_running(rq, p); |
1912 | on_rq = p->se.on_rq; | 1922 | on_rq = p->se.on_rq; |
1923 | ncsw = 0; | ||
1924 | if (!match_state || p->state == match_state) { | ||
1925 | ncsw = p->nivcsw + p->nvcsw; | ||
1926 | if (unlikely(!ncsw)) | ||
1927 | ncsw = 1; | ||
1928 | } | ||
1913 | task_rq_unlock(rq, &flags); | 1929 | task_rq_unlock(rq, &flags); |
1914 | 1930 | ||
1915 | /* | 1931 | /* |
1932 | * If it changed from the expected state, bail out now. | ||
1933 | */ | ||
1934 | if (unlikely(!ncsw)) | ||
1935 | break; | ||
1936 | |||
1937 | /* | ||
1916 | * Was it really running after all now that we | 1938 | * Was it really running after all now that we |
1917 | * checked with the proper locks actually held? | 1939 | * checked with the proper locks actually held? |
1918 | * | 1940 | * |
@@ -1944,6 +1966,8 @@ void wait_task_inactive(struct task_struct *p) | |||
1944 | */ | 1966 | */ |
1945 | break; | 1967 | break; |
1946 | } | 1968 | } |
1969 | |||
1970 | return ncsw; | ||
1947 | } | 1971 | } |
1948 | 1972 | ||
1949 | /*** | 1973 | /*** |
@@ -2734,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
2734 | } else { | 2758 | } else { |
2735 | if (rq1 < rq2) { | 2759 | if (rq1 < rq2) { |
2736 | spin_lock(&rq1->lock); | 2760 | spin_lock(&rq1->lock); |
2737 | spin_lock(&rq2->lock); | 2761 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
2738 | } else { | 2762 | } else { |
2739 | spin_lock(&rq2->lock); | 2763 | spin_lock(&rq2->lock); |
2740 | spin_lock(&rq1->lock); | 2764 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
2741 | } | 2765 | } |
2742 | } | 2766 | } |
2743 | update_rq_clock(rq1); | 2767 | update_rq_clock(rq1); |
@@ -2780,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
2780 | if (busiest < this_rq) { | 2804 | if (busiest < this_rq) { |
2781 | spin_unlock(&this_rq->lock); | 2805 | spin_unlock(&this_rq->lock); |
2782 | spin_lock(&busiest->lock); | 2806 | spin_lock(&busiest->lock); |
2783 | spin_lock(&this_rq->lock); | 2807 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); |
2784 | ret = 1; | 2808 | ret = 1; |
2785 | } else | 2809 | } else |
2786 | spin_lock(&busiest->lock); | 2810 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); |
2787 | } | 2811 | } |
2788 | return ret; | 2812 | return ret; |
2789 | } | 2813 | } |
2790 | 2814 | ||
2815 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2816 | __releases(busiest->lock) | ||
2817 | { | ||
2818 | spin_unlock(&busiest->lock); | ||
2819 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2820 | } | ||
2821 | |||
2791 | /* | 2822 | /* |
2792 | * If dest_cpu is allowed for this process, migrate the task to it. | 2823 | * If dest_cpu is allowed for this process, migrate the task to it. |
2793 | * This is accomplished by forcing the cpu_allowed mask to only | 2824 | * This is accomplished by forcing the cpu_allowed mask to only |
@@ -3612,7 +3643,7 @@ redo: | |||
3612 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3643 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
3613 | imbalance, sd, CPU_NEWLY_IDLE, | 3644 | imbalance, sd, CPU_NEWLY_IDLE, |
3614 | &all_pinned); | 3645 | &all_pinned); |
3615 | spin_unlock(&busiest->lock); | 3646 | double_unlock_balance(this_rq, busiest); |
3616 | 3647 | ||
3617 | if (unlikely(all_pinned)) { | 3648 | if (unlikely(all_pinned)) { |
3618 | cpu_clear(cpu_of(busiest), *cpus); | 3649 | cpu_clear(cpu_of(busiest), *cpus); |
@@ -3727,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3727 | else | 3758 | else |
3728 | schedstat_inc(sd, alb_failed); | 3759 | schedstat_inc(sd, alb_failed); |
3729 | } | 3760 | } |
3730 | spin_unlock(&target_rq->lock); | 3761 | double_unlock_balance(busiest_rq, target_rq); |
3731 | } | 3762 | } |
3732 | 3763 | ||
3733 | #ifdef CONFIG_NO_HZ | 3764 | #ifdef CONFIG_NO_HZ |
@@ -4046,6 +4077,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4046 | cpustat->nice = cputime64_add(cpustat->nice, tmp); | 4077 | cpustat->nice = cputime64_add(cpustat->nice, tmp); |
4047 | else | 4078 | else |
4048 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4079 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4080 | /* Account for user time used */ | ||
4081 | acct_update_integrals(p); | ||
4049 | } | 4082 | } |
4050 | 4083 | ||
4051 | /* | 4084 | /* |
@@ -4146,6 +4179,65 @@ void account_steal_time(struct task_struct *p, cputime_t steal) | |||
4146 | } | 4179 | } |
4147 | 4180 | ||
4148 | /* | 4181 | /* |
4182 | * Use precise platform statistics if available: | ||
4183 | */ | ||
4184 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
4185 | cputime_t task_utime(struct task_struct *p) | ||
4186 | { | ||
4187 | return p->utime; | ||
4188 | } | ||
4189 | |||
4190 | cputime_t task_stime(struct task_struct *p) | ||
4191 | { | ||
4192 | return p->stime; | ||
4193 | } | ||
4194 | #else | ||
4195 | cputime_t task_utime(struct task_struct *p) | ||
4196 | { | ||
4197 | clock_t utime = cputime_to_clock_t(p->utime), | ||
4198 | total = utime + cputime_to_clock_t(p->stime); | ||
4199 | u64 temp; | ||
4200 | |||
4201 | /* | ||
4202 | * Use CFS's precise accounting: | ||
4203 | */ | ||
4204 | temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime); | ||
4205 | |||
4206 | if (total) { | ||
4207 | temp *= utime; | ||
4208 | do_div(temp, total); | ||
4209 | } | ||
4210 | utime = (clock_t)temp; | ||
4211 | |||
4212 | p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime)); | ||
4213 | return p->prev_utime; | ||
4214 | } | ||
4215 | |||
4216 | cputime_t task_stime(struct task_struct *p) | ||
4217 | { | ||
4218 | clock_t stime; | ||
4219 | |||
4220 | /* | ||
4221 | * Use CFS's precise accounting. (we subtract utime from | ||
4222 | * the total, to make sure the total observed by userspace | ||
4223 | * grows monotonically - apps rely on that): | ||
4224 | */ | ||
4225 | stime = nsec_to_clock_t(p->se.sum_exec_runtime) - | ||
4226 | cputime_to_clock_t(task_utime(p)); | ||
4227 | |||
4228 | if (stime >= 0) | ||
4229 | p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime)); | ||
4230 | |||
4231 | return p->prev_stime; | ||
4232 | } | ||
4233 | #endif | ||
4234 | |||
4235 | inline cputime_t task_gtime(struct task_struct *p) | ||
4236 | { | ||
4237 | return p->gtime; | ||
4238 | } | ||
4239 | |||
4240 | /* | ||
4149 | * This function gets called by the timer code, with HZ frequency. | 4241 | * This function gets called by the timer code, with HZ frequency. |
4150 | * We call it with interrupts disabled. | 4242 | * We call it with interrupts disabled. |
4151 | * | 4243 | * |
@@ -4636,6 +4728,52 @@ int __sched wait_for_completion_killable(struct completion *x) | |||
4636 | } | 4728 | } |
4637 | EXPORT_SYMBOL(wait_for_completion_killable); | 4729 | EXPORT_SYMBOL(wait_for_completion_killable); |
4638 | 4730 | ||
4731 | /** | ||
4732 | * try_wait_for_completion - try to decrement a completion without blocking | ||
4733 | * @x: completion structure | ||
4734 | * | ||
4735 | * Returns: 0 if a decrement cannot be done without blocking | ||
4736 | * 1 if a decrement succeeded. | ||
4737 | * | ||
4738 | * If a completion is being used as a counting completion, | ||
4739 | * attempt to decrement the counter without blocking. This | ||
4740 | * enables us to avoid waiting if the resource the completion | ||
4741 | * is protecting is not available. | ||
4742 | */ | ||
4743 | bool try_wait_for_completion(struct completion *x) | ||
4744 | { | ||
4745 | int ret = 1; | ||
4746 | |||
4747 | spin_lock_irq(&x->wait.lock); | ||
4748 | if (!x->done) | ||
4749 | ret = 0; | ||
4750 | else | ||
4751 | x->done--; | ||
4752 | spin_unlock_irq(&x->wait.lock); | ||
4753 | return ret; | ||
4754 | } | ||
4755 | EXPORT_SYMBOL(try_wait_for_completion); | ||
4756 | |||
4757 | /** | ||
4758 | * completion_done - Test to see if a completion has any waiters | ||
4759 | * @x: completion structure | ||
4760 | * | ||
4761 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | ||
4762 | * 1 if there are no waiters. | ||
4763 | * | ||
4764 | */ | ||
4765 | bool completion_done(struct completion *x) | ||
4766 | { | ||
4767 | int ret = 1; | ||
4768 | |||
4769 | spin_lock_irq(&x->wait.lock); | ||
4770 | if (!x->done) | ||
4771 | ret = 0; | ||
4772 | spin_unlock_irq(&x->wait.lock); | ||
4773 | return ret; | ||
4774 | } | ||
4775 | EXPORT_SYMBOL(completion_done); | ||
4776 | |||
4639 | static long __sched | 4777 | static long __sched |
4640 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | 4778 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
4641 | { | 4779 | { |
@@ -4977,19 +5115,21 @@ recheck: | |||
4977 | return -EPERM; | 5115 | return -EPERM; |
4978 | } | 5116 | } |
4979 | 5117 | ||
5118 | if (user) { | ||
4980 | #ifdef CONFIG_RT_GROUP_SCHED | 5119 | #ifdef CONFIG_RT_GROUP_SCHED |
4981 | /* | 5120 | /* |
4982 | * Do not allow realtime tasks into groups that have no runtime | 5121 | * Do not allow realtime tasks into groups that have no runtime |
4983 | * assigned. | 5122 | * assigned. |
4984 | */ | 5123 | */ |
4985 | if (user | 5124 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) |
4986 | && rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | 5125 | return -EPERM; |
4987 | return -EPERM; | ||
4988 | #endif | 5126 | #endif |
4989 | 5127 | ||
4990 | retval = security_task_setscheduler(p, policy, param); | 5128 | retval = security_task_setscheduler(p, policy, param); |
4991 | if (retval) | 5129 | if (retval) |
4992 | return retval; | 5130 | return retval; |
5131 | } | ||
5132 | |||
4993 | /* | 5133 | /* |
4994 | * make sure no PI-waiters arrive (or leave) while we are | 5134 | * make sure no PI-waiters arrive (or leave) while we are |
4995 | * changing the priority of the task: | 5135 | * changing the priority of the task: |
@@ -5705,6 +5845,8 @@ static inline void sched_init_granularity(void) | |||
5705 | sysctl_sched_latency = limit; | 5845 | sysctl_sched_latency = limit; |
5706 | 5846 | ||
5707 | sysctl_sched_wakeup_granularity *= factor; | 5847 | sysctl_sched_wakeup_granularity *= factor; |
5848 | |||
5849 | sysctl_sched_shares_ratelimit *= factor; | ||
5708 | } | 5850 | } |
5709 | 5851 | ||
5710 | #ifdef CONFIG_SMP | 5852 | #ifdef CONFIG_SMP |
@@ -6387,7 +6529,7 @@ static struct notifier_block __cpuinitdata migration_notifier = { | |||
6387 | .priority = 10 | 6529 | .priority = 10 |
6388 | }; | 6530 | }; |
6389 | 6531 | ||
6390 | void __init migration_init(void) | 6532 | static int __init migration_init(void) |
6391 | { | 6533 | { |
6392 | void *cpu = (void *)(long)smp_processor_id(); | 6534 | void *cpu = (void *)(long)smp_processor_id(); |
6393 | int err; | 6535 | int err; |
@@ -6397,7 +6539,10 @@ void __init migration_init(void) | |||
6397 | BUG_ON(err == NOTIFY_BAD); | 6539 | BUG_ON(err == NOTIFY_BAD); |
6398 | migration_call(&migration_notifier, CPU_ONLINE, cpu); | 6540 | migration_call(&migration_notifier, CPU_ONLINE, cpu); |
6399 | register_cpu_notifier(&migration_notifier); | 6541 | register_cpu_notifier(&migration_notifier); |
6542 | |||
6543 | return err; | ||
6400 | } | 6544 | } |
6545 | early_initcall(migration_init); | ||
6401 | #endif | 6546 | #endif |
6402 | 6547 | ||
6403 | #ifdef CONFIG_SMP | 6548 | #ifdef CONFIG_SMP |
@@ -7551,24 +7696,27 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
7551 | * and partition_sched_domains() will fallback to the single partition | 7696 | * and partition_sched_domains() will fallback to the single partition |
7552 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7697 | * 'fallback_doms', it also forces the domains to be rebuilt. |
7553 | * | 7698 | * |
7699 | * If doms_new==NULL it will be replaced with cpu_online_map. | ||
7700 | * ndoms_new==0 is a special case for destroying existing domains. | ||
7701 | * It will not create the default domain. | ||
7702 | * | ||
7554 | * Call with hotplug lock held | 7703 | * Call with hotplug lock held |
7555 | */ | 7704 | */ |
7556 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | 7705 | void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, |
7557 | struct sched_domain_attr *dattr_new) | 7706 | struct sched_domain_attr *dattr_new) |
7558 | { | 7707 | { |
7559 | int i, j; | 7708 | int i, j, n; |
7560 | 7709 | ||
7561 | mutex_lock(&sched_domains_mutex); | 7710 | mutex_lock(&sched_domains_mutex); |
7562 | 7711 | ||
7563 | /* always unregister in case we don't destroy any domains */ | 7712 | /* always unregister in case we don't destroy any domains */ |
7564 | unregister_sched_domain_sysctl(); | 7713 | unregister_sched_domain_sysctl(); |
7565 | 7714 | ||
7566 | if (doms_new == NULL) | 7715 | n = doms_new ? ndoms_new : 0; |
7567 | ndoms_new = 0; | ||
7568 | 7716 | ||
7569 | /* Destroy deleted domains */ | 7717 | /* Destroy deleted domains */ |
7570 | for (i = 0; i < ndoms_cur; i++) { | 7718 | for (i = 0; i < ndoms_cur; i++) { |
7571 | for (j = 0; j < ndoms_new; j++) { | 7719 | for (j = 0; j < n; j++) { |
7572 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7720 | if (cpus_equal(doms_cur[i], doms_new[j]) |
7573 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7721 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7574 | goto match1; | 7722 | goto match1; |
@@ -7581,7 +7729,6 @@ match1: | |||
7581 | 7729 | ||
7582 | if (doms_new == NULL) { | 7730 | if (doms_new == NULL) { |
7583 | ndoms_cur = 0; | 7731 | ndoms_cur = 0; |
7584 | ndoms_new = 1; | ||
7585 | doms_new = &fallback_doms; | 7732 | doms_new = &fallback_doms; |
7586 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7733 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); |
7587 | dattr_new = NULL; | 7734 | dattr_new = NULL; |
@@ -7618,8 +7765,13 @@ match2: | |||
7618 | int arch_reinit_sched_domains(void) | 7765 | int arch_reinit_sched_domains(void) |
7619 | { | 7766 | { |
7620 | get_online_cpus(); | 7767 | get_online_cpus(); |
7768 | |||
7769 | /* Destroy domains first to force the rebuild */ | ||
7770 | partition_sched_domains(0, NULL, NULL); | ||
7771 | |||
7621 | rebuild_sched_domains(); | 7772 | rebuild_sched_domains(); |
7622 | put_online_cpus(); | 7773 | put_online_cpus(); |
7774 | |||
7623 | return 0; | 7775 | return 0; |
7624 | } | 7776 | } |
7625 | 7777 | ||
@@ -7641,34 +7793,34 @@ static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt) | |||
7641 | } | 7793 | } |
7642 | 7794 | ||
7643 | #ifdef CONFIG_SCHED_MC | 7795 | #ifdef CONFIG_SCHED_MC |
7644 | static ssize_t sched_mc_power_savings_show(struct sys_device *dev, | 7796 | static ssize_t sched_mc_power_savings_show(struct sysdev_class *class, |
7645 | struct sysdev_attribute *attr, char *page) | 7797 | char *page) |
7646 | { | 7798 | { |
7647 | return sprintf(page, "%u\n", sched_mc_power_savings); | 7799 | return sprintf(page, "%u\n", sched_mc_power_savings); |
7648 | } | 7800 | } |
7649 | static ssize_t sched_mc_power_savings_store(struct sys_device *dev, | 7801 | static ssize_t sched_mc_power_savings_store(struct sysdev_class *class, |
7650 | struct sysdev_attribute *attr, | ||
7651 | const char *buf, size_t count) | 7802 | const char *buf, size_t count) |
7652 | { | 7803 | { |
7653 | return sched_power_savings_store(buf, count, 0); | 7804 | return sched_power_savings_store(buf, count, 0); |
7654 | } | 7805 | } |
7655 | static SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show, | 7806 | static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644, |
7656 | sched_mc_power_savings_store); | 7807 | sched_mc_power_savings_show, |
7808 | sched_mc_power_savings_store); | ||
7657 | #endif | 7809 | #endif |
7658 | 7810 | ||
7659 | #ifdef CONFIG_SCHED_SMT | 7811 | #ifdef CONFIG_SCHED_SMT |
7660 | static ssize_t sched_smt_power_savings_show(struct sys_device *dev, | 7812 | static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev, |
7661 | struct sysdev_attribute *attr, char *page) | 7813 | char *page) |
7662 | { | 7814 | { |
7663 | return sprintf(page, "%u\n", sched_smt_power_savings); | 7815 | return sprintf(page, "%u\n", sched_smt_power_savings); |
7664 | } | 7816 | } |
7665 | static ssize_t sched_smt_power_savings_store(struct sys_device *dev, | 7817 | static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev, |
7666 | struct sysdev_attribute *attr, | ||
7667 | const char *buf, size_t count) | 7818 | const char *buf, size_t count) |
7668 | { | 7819 | { |
7669 | return sched_power_savings_store(buf, count, 1); | 7820 | return sched_power_savings_store(buf, count, 1); |
7670 | } | 7821 | } |
7671 | static SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show, | 7822 | static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644, |
7823 | sched_smt_power_savings_show, | ||
7672 | sched_smt_power_savings_store); | 7824 | sched_smt_power_savings_store); |
7673 | #endif | 7825 | #endif |
7674 | 7826 | ||
@@ -7703,7 +7855,7 @@ static int update_sched_domains(struct notifier_block *nfb, | |||
7703 | case CPU_ONLINE_FROZEN: | 7855 | case CPU_ONLINE_FROZEN: |
7704 | case CPU_DEAD: | 7856 | case CPU_DEAD: |
7705 | case CPU_DEAD_FROZEN: | 7857 | case CPU_DEAD_FROZEN: |
7706 | partition_sched_domains(0, NULL, NULL); | 7858 | partition_sched_domains(1, NULL, NULL); |
7707 | return NOTIFY_OK; | 7859 | return NOTIFY_OK; |
7708 | 7860 | ||
7709 | default: | 7861 | default: |
@@ -7968,7 +8120,6 @@ void __init sched_init(void) | |||
7968 | 8120 | ||
7969 | rq = cpu_rq(i); | 8121 | rq = cpu_rq(i); |
7970 | spin_lock_init(&rq->lock); | 8122 | spin_lock_init(&rq->lock); |
7971 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); | ||
7972 | rq->nr_running = 0; | 8123 | rq->nr_running = 0; |
7973 | init_cfs_rq(&rq->cfs, rq); | 8124 | init_cfs_rq(&rq->cfs, rq); |
7974 | init_rt_rq(&rq->rt, rq); | 8125 | init_rt_rq(&rq->rt, rq); |
@@ -8425,8 +8576,8 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
8425 | WARN_ON(!parent); /* root should already exist */ | 8576 | WARN_ON(!parent); /* root should already exist */ |
8426 | 8577 | ||
8427 | tg->parent = parent; | 8578 | tg->parent = parent; |
8428 | list_add_rcu(&tg->siblings, &parent->children); | ||
8429 | INIT_LIST_HEAD(&tg->children); | 8579 | INIT_LIST_HEAD(&tg->children); |
8580 | list_add_rcu(&tg->siblings, &parent->children); | ||
8430 | spin_unlock_irqrestore(&task_group_lock, flags); | 8581 | spin_unlock_irqrestore(&task_group_lock, flags); |
8431 | 8582 | ||
8432 | return tg; | 8583 | return tg; |
@@ -8758,6 +8909,9 @@ static int sched_rt_global_constraints(void) | |||
8758 | u64 rt_runtime, rt_period; | 8909 | u64 rt_runtime, rt_period; |
8759 | int ret = 0; | 8910 | int ret = 0; |
8760 | 8911 | ||
8912 | if (sysctl_sched_rt_period <= 0) | ||
8913 | return -EINVAL; | ||
8914 | |||
8761 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); | 8915 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); |
8762 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 8916 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
8763 | 8917 | ||
@@ -8774,6 +8928,9 @@ static int sched_rt_global_constraints(void) | |||
8774 | unsigned long flags; | 8928 | unsigned long flags; |
8775 | int i; | 8929 | int i; |
8776 | 8930 | ||
8931 | if (sysctl_sched_rt_period <= 0) | ||
8932 | return -EINVAL; | ||
8933 | |||
8777 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 8934 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
8778 | for_each_possible_cpu(i) { | 8935 | for_each_possible_cpu(i) { |
8779 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 8936 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |