aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-12 07:48:57 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-12 07:48:57 -0500
commit45ab6b0c76d0e4cce5bd608ccf97b0f6b20f18df (patch)
tree4d51c73533c386aee16fde1e74b5e3bc22eedc53 /kernel/sched.c
parent81444a799550214f549caf579cf65a0ca55e70b7 (diff)
parentd65bd5ecb2bd166cea4952a59b7e16cc3ad6ef6c (diff)
Merge branch 'sched/core' into cpus4096
Conflicts: include/linux/ftrace.h kernel/sched.c
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c99
1 files changed, 61 insertions, 38 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4ed9f588faa6..e00c92d22655 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -267,6 +267,10 @@ struct task_group {
267 struct cgroup_subsys_state css; 267 struct cgroup_subsys_state css;
268#endif 268#endif
269 269
270#ifdef CONFIG_USER_SCHED
271 uid_t uid;
272#endif
273
270#ifdef CONFIG_FAIR_GROUP_SCHED 274#ifdef CONFIG_FAIR_GROUP_SCHED
271 /* schedulable entities of this group on each cpu */ 275 /* schedulable entities of this group on each cpu */
272 struct sched_entity **se; 276 struct sched_entity **se;
@@ -292,6 +296,12 @@ struct task_group {
292 296
293#ifdef CONFIG_USER_SCHED 297#ifdef CONFIG_USER_SCHED
294 298
299/* Helper function to pass uid information to create_sched_user() */
300void set_tg_uid(struct user_struct *user)
301{
302 user->tg->uid = user->uid;
303}
304
295/* 305/*
296 * Root task group. 306 * Root task group.
297 * Every UID task group (including init_task_group aka UID-0) will 307 * Every UID task group (including init_task_group aka UID-0) will
@@ -1587,6 +1597,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1587 1597
1588#endif 1598#endif
1589 1599
1600/*
1601 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1602 */
1603static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1604 __releases(this_rq->lock)
1605 __acquires(busiest->lock)
1606 __acquires(this_rq->lock)
1607{
1608 int ret = 0;
1609
1610 if (unlikely(!irqs_disabled())) {
1611 /* printk() doesn't work good under rq->lock */
1612 spin_unlock(&this_rq->lock);
1613 BUG_ON(1);
1614 }
1615 if (unlikely(!spin_trylock(&busiest->lock))) {
1616 if (busiest < this_rq) {
1617 spin_unlock(&this_rq->lock);
1618 spin_lock(&busiest->lock);
1619 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
1620 ret = 1;
1621 } else
1622 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
1623 }
1624 return ret;
1625}
1626
1627static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1628 __releases(busiest->lock)
1629{
1630 spin_unlock(&busiest->lock);
1631 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
1632}
1590#endif 1633#endif
1591 1634
1592#ifdef CONFIG_FAIR_GROUP_SCHED 1635#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -2784,40 +2827,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
2784} 2827}
2785 2828
2786/* 2829/*
2787 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
2788 */
2789static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
2790 __releases(this_rq->lock)
2791 __acquires(busiest->lock)
2792 __acquires(this_rq->lock)
2793{
2794 int ret = 0;
2795
2796 if (unlikely(!irqs_disabled())) {
2797 /* printk() doesn't work good under rq->lock */
2798 spin_unlock(&this_rq->lock);
2799 BUG_ON(1);
2800 }
2801 if (unlikely(!spin_trylock(&busiest->lock))) {
2802 if (busiest < this_rq) {
2803 spin_unlock(&this_rq->lock);
2804 spin_lock(&busiest->lock);
2805 spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
2806 ret = 1;
2807 } else
2808 spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
2809 }
2810 return ret;
2811}
2812
2813static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
2814 __releases(busiest->lock)
2815{
2816 spin_unlock(&busiest->lock);
2817 lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
2818}
2819
2820/*
2821 * If dest_cpu is allowed for this process, migrate the task to it. 2830 * If dest_cpu is allowed for this process, migrate the task to it.
2822 * This is accomplished by forcing the cpu_allowed mask to only 2831 * This is accomplished by forcing the cpu_allowed mask to only
2823 * allow dest_cpu, which will force the cpu onto dest_cpu. Then 2832 * allow dest_cpu, which will force the cpu onto dest_cpu. Then
@@ -3676,7 +3685,7 @@ out_balanced:
3676static void idle_balance(int this_cpu, struct rq *this_rq) 3685static void idle_balance(int this_cpu, struct rq *this_rq)
3677{ 3686{
3678 struct sched_domain *sd; 3687 struct sched_domain *sd;
3679 int pulled_task = -1; 3688 int pulled_task = 0;
3680 unsigned long next_balance = jiffies + HZ; 3689 unsigned long next_balance = jiffies + HZ;
3681 cpumask_var_t tmpmask; 3690 cpumask_var_t tmpmask;
3682 3691
@@ -6577,7 +6586,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6577 req = list_entry(rq->migration_queue.next, 6586 req = list_entry(rq->migration_queue.next,
6578 struct migration_req, list); 6587 struct migration_req, list);
6579 list_del_init(&req->list); 6588 list_del_init(&req->list);
6589 spin_unlock_irq(&rq->lock);
6580 complete(&req->done); 6590 complete(&req->done);
6591 spin_lock_irq(&rq->lock);
6581 } 6592 }
6582 spin_unlock_irq(&rq->lock); 6593 spin_unlock_irq(&rq->lock);
6583 break; 6594 break;
@@ -6781,6 +6792,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6781 SD_BALANCE_EXEC | 6792 SD_BALANCE_EXEC |
6782 SD_SHARE_CPUPOWER | 6793 SD_SHARE_CPUPOWER |
6783 SD_SHARE_PKG_RESOURCES); 6794 SD_SHARE_PKG_RESOURCES);
6795 if (nr_node_ids == 1)
6796 pflags &= ~SD_SERIALIZE;
6784 } 6797 }
6785 if (~cflags & pflags) 6798 if (~cflags & pflags)
6786 return 0; 6799 return 0;
@@ -7716,8 +7729,14 @@ static struct sched_domain_attr *dattr_cur;
7716 */ 7729 */
7717static cpumask_var_t fallback_doms; 7730static cpumask_var_t fallback_doms;
7718 7731
7719void __attribute__((weak)) arch_update_cpu_topology(void) 7732/*
7733 * arch_update_cpu_topology lets virtualized architectures update the
7734 * cpu core maps. It is supposed to return 1 if the topology changed
7735 * or 0 if it stayed the same.
7736 */
7737int __attribute__((weak)) arch_update_cpu_topology(void)
7720{ 7738{
7739 return 0;
7721} 7740}
7722 7741
7723/* 7742/*
@@ -7811,17 +7830,21 @@ void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
7811 struct sched_domain_attr *dattr_new) 7830 struct sched_domain_attr *dattr_new)
7812{ 7831{
7813 int i, j, n; 7832 int i, j, n;
7833 int new_topology;
7814 7834
7815 mutex_lock(&sched_domains_mutex); 7835 mutex_lock(&sched_domains_mutex);
7816 7836
7817 /* always unregister in case we don't destroy any domains */ 7837 /* always unregister in case we don't destroy any domains */
7818 unregister_sched_domain_sysctl(); 7838 unregister_sched_domain_sysctl();
7819 7839
7840 /* Let architecture update cpu core mappings. */
7841 new_topology = arch_update_cpu_topology();
7842
7820 n = doms_new ? ndoms_new : 0; 7843 n = doms_new ? ndoms_new : 0;
7821 7844
7822 /* Destroy deleted domains */ 7845 /* Destroy deleted domains */
7823 for (i = 0; i < ndoms_cur; i++) { 7846 for (i = 0; i < ndoms_cur; i++) {
7824 for (j = 0; j < n; j++) { 7847 for (j = 0; j < n && !new_topology; j++) {
7825 if (cpumask_equal(&doms_cur[i], &doms_new[j]) 7848 if (cpumask_equal(&doms_cur[i], &doms_new[j])
7826 && dattrs_equal(dattr_cur, i, dattr_new, j)) 7849 && dattrs_equal(dattr_cur, i, dattr_new, j))
7827 goto match1; 7850 goto match1;
@@ -7841,7 +7864,7 @@ match1:
7841 7864
7842 /* Build new domains */ 7865 /* Build new domains */
7843 for (i = 0; i < ndoms_new; i++) { 7866 for (i = 0; i < ndoms_new; i++) {
7844 for (j = 0; j < ndoms_cur; j++) { 7867 for (j = 0; j < ndoms_cur && !new_topology; j++) {
7845 if (cpumask_equal(&doms_new[i], &doms_cur[j]) 7868 if (cpumask_equal(&doms_new[i], &doms_cur[j])
7846 && dattrs_equal(dattr_new, i, dattr_cur, j)) 7869 && dattrs_equal(dattr_new, i, dattr_cur, j))
7847 goto match2; 7870 goto match2;