diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-09-05 12:56:57 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-09-05 12:56:57 -0400 |
| commit | 616ad8c44281c0c6711a72b560e01ec335ff27e0 (patch) | |
| tree | 0a20453ffedb09db6fb41a0c2208ccc2c7751d3a /kernel/sched.c | |
| parent | 99809963c99e1ed868d9ebeb4a5e7ee1cbe0309f (diff) | |
| parent | b380b0d4f7dffcc235c0facefa537d4655619101 (diff) | |
Merge branch 'linus' into x86/defconfig
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 77 |
1 files changed, 65 insertions, 12 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 04160d277e7a..9a1ddb84e26d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -600,7 +600,6 @@ struct rq { | |||
| 600 | /* BKL stats */ | 600 | /* BKL stats */ |
| 601 | unsigned int bkl_count; | 601 | unsigned int bkl_count; |
| 602 | #endif | 602 | #endif |
| 603 | struct lock_class_key rq_lock_key; | ||
| 604 | }; | 603 | }; |
| 605 | 604 | ||
| 606 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 605 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| @@ -809,9 +808,9 @@ const_debug unsigned int sysctl_sched_nr_migrate = 32; | |||
| 809 | 808 | ||
| 810 | /* | 809 | /* |
| 811 | * ratelimit for updating the group shares. | 810 | * ratelimit for updating the group shares. |
| 812 | * default: 0.5ms | 811 | * default: 0.25ms |
| 813 | */ | 812 | */ |
| 814 | const_debug unsigned int sysctl_sched_shares_ratelimit = 500000; | 813 | unsigned int sysctl_sched_shares_ratelimit = 250000; |
| 815 | 814 | ||
| 816 | /* | 815 | /* |
| 817 | * period over which we measure -rt task cpu usage in us. | 816 | * period over which we measure -rt task cpu usage in us. |
| @@ -834,7 +833,7 @@ static inline u64 global_rt_period(void) | |||
| 834 | 833 | ||
| 835 | static inline u64 global_rt_runtime(void) | 834 | static inline u64 global_rt_runtime(void) |
| 836 | { | 835 | { |
| 837 | if (sysctl_sched_rt_period < 0) | 836 | if (sysctl_sched_rt_runtime < 0) |
| 838 | return RUNTIME_INF; | 837 | return RUNTIME_INF; |
| 839 | 838 | ||
| 840 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; | 839 | return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC; |
| @@ -2759,10 +2758,10 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2) | |||
| 2759 | } else { | 2758 | } else { |
| 2760 | if (rq1 < rq2) { | 2759 | if (rq1 < rq2) { |
| 2761 | spin_lock(&rq1->lock); | 2760 | spin_lock(&rq1->lock); |
| 2762 | spin_lock(&rq2->lock); | 2761 | spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING); |
| 2763 | } else { | 2762 | } else { |
| 2764 | spin_lock(&rq2->lock); | 2763 | spin_lock(&rq2->lock); |
| 2765 | spin_lock(&rq1->lock); | 2764 | spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING); |
| 2766 | } | 2765 | } |
| 2767 | } | 2766 | } |
| 2768 | update_rq_clock(rq1); | 2767 | update_rq_clock(rq1); |
| @@ -2805,14 +2804,21 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | |||
| 2805 | if (busiest < this_rq) { | 2804 | if (busiest < this_rq) { |
| 2806 | spin_unlock(&this_rq->lock); | 2805 | spin_unlock(&this_rq->lock); |
| 2807 | spin_lock(&busiest->lock); | 2806 | spin_lock(&busiest->lock); |
| 2808 | spin_lock(&this_rq->lock); | 2807 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); |
| 2809 | ret = 1; | 2808 | ret = 1; |
| 2810 | } else | 2809 | } else |
| 2811 | spin_lock(&busiest->lock); | 2810 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); |
| 2812 | } | 2811 | } |
| 2813 | return ret; | 2812 | return ret; |
| 2814 | } | 2813 | } |
| 2815 | 2814 | ||
| 2815 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
| 2816 | __releases(busiest->lock) | ||
| 2817 | { | ||
| 2818 | spin_unlock(&busiest->lock); | ||
| 2819 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
| 2820 | } | ||
| 2821 | |||
| 2816 | /* | 2822 | /* |
| 2817 | * If dest_cpu is allowed for this process, migrate the task to it. | 2823 | * If dest_cpu is allowed for this process, migrate the task to it. |
| 2818 | * This is accomplished by forcing the cpu_allowed mask to only | 2824 | * This is accomplished by forcing the cpu_allowed mask to only |
| @@ -3637,7 +3643,7 @@ redo: | |||
| 3637 | ld_moved = move_tasks(this_rq, this_cpu, busiest, | 3643 | ld_moved = move_tasks(this_rq, this_cpu, busiest, |
| 3638 | imbalance, sd, CPU_NEWLY_IDLE, | 3644 | imbalance, sd, CPU_NEWLY_IDLE, |
| 3639 | &all_pinned); | 3645 | &all_pinned); |
| 3640 | spin_unlock(&busiest->lock); | 3646 | double_unlock_balance(this_rq, busiest); |
| 3641 | 3647 | ||
| 3642 | if (unlikely(all_pinned)) { | 3648 | if (unlikely(all_pinned)) { |
| 3643 | cpu_clear(cpu_of(busiest), *cpus); | 3649 | cpu_clear(cpu_of(busiest), *cpus); |
| @@ -3752,7 +3758,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
| 3752 | else | 3758 | else |
| 3753 | schedstat_inc(sd, alb_failed); | 3759 | schedstat_inc(sd, alb_failed); |
| 3754 | } | 3760 | } |
| 3755 | spin_unlock(&target_rq->lock); | 3761 | double_unlock_balance(busiest_rq, target_rq); |
| 3756 | } | 3762 | } |
| 3757 | 3763 | ||
| 3758 | #ifdef CONFIG_NO_HZ | 3764 | #ifdef CONFIG_NO_HZ |
| @@ -4663,6 +4669,52 @@ int __sched wait_for_completion_killable(struct completion *x) | |||
| 4663 | } | 4669 | } |
| 4664 | EXPORT_SYMBOL(wait_for_completion_killable); | 4670 | EXPORT_SYMBOL(wait_for_completion_killable); |
| 4665 | 4671 | ||
| 4672 | /** | ||
| 4673 | * try_wait_for_completion - try to decrement a completion without blocking | ||
| 4674 | * @x: completion structure | ||
| 4675 | * | ||
| 4676 | * Returns: 0 if a decrement cannot be done without blocking | ||
| 4677 | * 1 if a decrement succeeded. | ||
| 4678 | * | ||
| 4679 | * If a completion is being used as a counting completion, | ||
| 4680 | * attempt to decrement the counter without blocking. This | ||
| 4681 | * enables us to avoid waiting if the resource the completion | ||
| 4682 | * is protecting is not available. | ||
| 4683 | */ | ||
| 4684 | bool try_wait_for_completion(struct completion *x) | ||
| 4685 | { | ||
| 4686 | int ret = 1; | ||
| 4687 | |||
| 4688 | spin_lock_irq(&x->wait.lock); | ||
| 4689 | if (!x->done) | ||
| 4690 | ret = 0; | ||
| 4691 | else | ||
| 4692 | x->done--; | ||
| 4693 | spin_unlock_irq(&x->wait.lock); | ||
| 4694 | return ret; | ||
| 4695 | } | ||
| 4696 | EXPORT_SYMBOL(try_wait_for_completion); | ||
| 4697 | |||
| 4698 | /** | ||
| 4699 | * completion_done - Test to see if a completion has any waiters | ||
| 4700 | * @x: completion structure | ||
| 4701 | * | ||
| 4702 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | ||
| 4703 | * 1 if there are no waiters. | ||
| 4704 | * | ||
| 4705 | */ | ||
| 4706 | bool completion_done(struct completion *x) | ||
| 4707 | { | ||
| 4708 | int ret = 1; | ||
| 4709 | |||
| 4710 | spin_lock_irq(&x->wait.lock); | ||
| 4711 | if (!x->done) | ||
| 4712 | ret = 0; | ||
| 4713 | spin_unlock_irq(&x->wait.lock); | ||
| 4714 | return ret; | ||
| 4715 | } | ||
| 4716 | EXPORT_SYMBOL(completion_done); | ||
| 4717 | |||
| 4666 | static long __sched | 4718 | static long __sched |
| 4667 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) | 4719 | sleep_on_common(wait_queue_head_t *q, int state, long timeout) |
| 4668 | { | 4720 | { |
| @@ -5734,6 +5786,8 @@ static inline void sched_init_granularity(void) | |||
| 5734 | sysctl_sched_latency = limit; | 5786 | sysctl_sched_latency = limit; |
| 5735 | 5787 | ||
| 5736 | sysctl_sched_wakeup_granularity *= factor; | 5788 | sysctl_sched_wakeup_granularity *= factor; |
| 5789 | |||
| 5790 | sysctl_sched_shares_ratelimit *= factor; | ||
| 5737 | } | 5791 | } |
| 5738 | 5792 | ||
| 5739 | #ifdef CONFIG_SMP | 5793 | #ifdef CONFIG_SMP |
| @@ -8000,7 +8054,6 @@ void __init sched_init(void) | |||
| 8000 | 8054 | ||
| 8001 | rq = cpu_rq(i); | 8055 | rq = cpu_rq(i); |
| 8002 | spin_lock_init(&rq->lock); | 8056 | spin_lock_init(&rq->lock); |
| 8003 | lockdep_set_class(&rq->lock, &rq->rq_lock_key); | ||
| 8004 | rq->nr_running = 0; | 8057 | rq->nr_running = 0; |
| 8005 | init_cfs_rq(&rq->cfs, rq); | 8058 | init_cfs_rq(&rq->cfs, rq); |
| 8006 | init_rt_rq(&rq->rt, rq); | 8059 | init_rt_rq(&rq->rt, rq); |
| @@ -8457,8 +8510,8 @@ struct task_group *sched_create_group(struct task_group *parent) | |||
| 8457 | WARN_ON(!parent); /* root should already exist */ | 8510 | WARN_ON(!parent); /* root should already exist */ |
| 8458 | 8511 | ||
| 8459 | tg->parent = parent; | 8512 | tg->parent = parent; |
| 8460 | list_add_rcu(&tg->siblings, &parent->children); | ||
| 8461 | INIT_LIST_HEAD(&tg->children); | 8513 | INIT_LIST_HEAD(&tg->children); |
| 8514 | list_add_rcu(&tg->siblings, &parent->children); | ||
| 8462 | spin_unlock_irqrestore(&task_group_lock, flags); | 8515 | spin_unlock_irqrestore(&task_group_lock, flags); |
| 8463 | 8516 | ||
| 8464 | return tg; | 8517 | return tg; |
