diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2008-02-13 09:45:40 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-02-13 09:45:40 -0500 |
commit | bccbe08a60973c873e6af6fdb9ec11ffb1a6e4de (patch) | |
tree | c0bc3eb67de4cb85f0de7d5b9c699ed5cc9386ff /kernel/sched.c | |
parent | 052f1dc7eb02300b05170ae341ccd03b76207778 (diff) |
sched: rt-group: clean up the ifdeffery
Clean up some of the excessive ifdeffery introduces in the last patch.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 210 |
1 files changed, 139 insertions, 71 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 5edc549edae8..d2f4398c5e6f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7559,57 +7559,29 @@ static int load_balance_monitor(void *unused) | |||
7559 | } | 7559 | } |
7560 | #endif /* CONFIG_SMP */ | 7560 | #endif /* CONFIG_SMP */ |
7561 | 7561 | ||
7562 | static void free_sched_group(struct task_group *tg) | 7562 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7563 | static void free_fair_sched_group(struct task_group *tg) | ||
7563 | { | 7564 | { |
7564 | int i; | 7565 | int i; |
7565 | 7566 | ||
7566 | for_each_possible_cpu(i) { | 7567 | for_each_possible_cpu(i) { |
7567 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7568 | if (tg->cfs_rq) | 7568 | if (tg->cfs_rq) |
7569 | kfree(tg->cfs_rq[i]); | 7569 | kfree(tg->cfs_rq[i]); |
7570 | if (tg->se) | 7570 | if (tg->se) |
7571 | kfree(tg->se[i]); | 7571 | kfree(tg->se[i]); |
7572 | #endif | ||
7573 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7574 | if (tg->rt_rq) | ||
7575 | kfree(tg->rt_rq[i]); | ||
7576 | if (tg->rt_se) | ||
7577 | kfree(tg->rt_se[i]); | ||
7578 | #endif | ||
7579 | } | 7572 | } |
7580 | 7573 | ||
7581 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7582 | kfree(tg->cfs_rq); | 7574 | kfree(tg->cfs_rq); |
7583 | kfree(tg->se); | 7575 | kfree(tg->se); |
7584 | #endif | ||
7585 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7586 | kfree(tg->rt_rq); | ||
7587 | kfree(tg->rt_se); | ||
7588 | #endif | ||
7589 | kfree(tg); | ||
7590 | } | 7576 | } |
7591 | 7577 | ||
7592 | /* allocate runqueue etc for a new task group */ | 7578 | static int alloc_fair_sched_group(struct task_group *tg) |
7593 | struct task_group *sched_create_group(void) | ||
7594 | { | 7579 | { |
7595 | struct task_group *tg; | ||
7596 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7597 | struct cfs_rq *cfs_rq; | 7580 | struct cfs_rq *cfs_rq; |
7598 | struct sched_entity *se; | 7581 | struct sched_entity *se; |
7599 | #endif | ||
7600 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7601 | struct rt_rq *rt_rq; | ||
7602 | struct sched_rt_entity *rt_se; | ||
7603 | #endif | ||
7604 | struct rq *rq; | 7582 | struct rq *rq; |
7605 | unsigned long flags; | ||
7606 | int i; | 7583 | int i; |
7607 | 7584 | ||
7608 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); | ||
7609 | if (!tg) | ||
7610 | return ERR_PTR(-ENOMEM); | ||
7611 | |||
7612 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7613 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL); | 7585 | tg->cfs_rq = kzalloc(sizeof(cfs_rq) * NR_CPUS, GFP_KERNEL); |
7614 | if (!tg->cfs_rq) | 7586 | if (!tg->cfs_rq) |
7615 | goto err; | 7587 | goto err; |
@@ -7618,23 +7590,10 @@ struct task_group *sched_create_group(void) | |||
7618 | goto err; | 7590 | goto err; |
7619 | 7591 | ||
7620 | tg->shares = NICE_0_LOAD; | 7592 | tg->shares = NICE_0_LOAD; |
7621 | #endif | ||
7622 | |||
7623 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7624 | tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL); | ||
7625 | if (!tg->rt_rq) | ||
7626 | goto err; | ||
7627 | tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL); | ||
7628 | if (!tg->rt_se) | ||
7629 | goto err; | ||
7630 | |||
7631 | tg->rt_runtime = 0; | ||
7632 | #endif | ||
7633 | 7593 | ||
7634 | for_each_possible_cpu(i) { | 7594 | for_each_possible_cpu(i) { |
7635 | rq = cpu_rq(i); | 7595 | rq = cpu_rq(i); |
7636 | 7596 | ||
7637 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7638 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 7597 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), |
7639 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 7598 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); |
7640 | if (!cfs_rq) | 7599 | if (!cfs_rq) |
@@ -7646,9 +7605,78 @@ struct task_group *sched_create_group(void) | |||
7646 | goto err; | 7605 | goto err; |
7647 | 7606 | ||
7648 | init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0); | 7607 | init_tg_cfs_entry(rq, tg, cfs_rq, se, i, 0); |
7608 | } | ||
7609 | |||
7610 | return 1; | ||
7611 | |||
7612 | err: | ||
7613 | return 0; | ||
7614 | } | ||
7615 | |||
7616 | static inline void register_fair_sched_group(struct task_group *tg, int cpu) | ||
7617 | { | ||
7618 | list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list, | ||
7619 | &cpu_rq(cpu)->leaf_cfs_rq_list); | ||
7620 | } | ||
7621 | |||
7622 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) | ||
7623 | { | ||
7624 | list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); | ||
7625 | } | ||
7626 | #else | ||
7627 | static inline void free_fair_sched_group(struct task_group *tg) | ||
7628 | { | ||
7629 | } | ||
7630 | |||
7631 | static inline int alloc_fair_sched_group(struct task_group *tg) | ||
7632 | { | ||
7633 | return 1; | ||
7634 | } | ||
7635 | |||
7636 | static inline void register_fair_sched_group(struct task_group *tg, int cpu) | ||
7637 | { | ||
7638 | } | ||
7639 | |||
7640 | static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) | ||
7641 | { | ||
7642 | } | ||
7649 | #endif | 7643 | #endif |
7650 | 7644 | ||
7651 | #ifdef CONFIG_RT_GROUP_SCHED | 7645 | #ifdef CONFIG_RT_GROUP_SCHED |
7646 | static void free_rt_sched_group(struct task_group *tg) | ||
7647 | { | ||
7648 | int i; | ||
7649 | |||
7650 | for_each_possible_cpu(i) { | ||
7651 | if (tg->rt_rq) | ||
7652 | kfree(tg->rt_rq[i]); | ||
7653 | if (tg->rt_se) | ||
7654 | kfree(tg->rt_se[i]); | ||
7655 | } | ||
7656 | |||
7657 | kfree(tg->rt_rq); | ||
7658 | kfree(tg->rt_se); | ||
7659 | } | ||
7660 | |||
7661 | static int alloc_rt_sched_group(struct task_group *tg) | ||
7662 | { | ||
7663 | struct rt_rq *rt_rq; | ||
7664 | struct sched_rt_entity *rt_se; | ||
7665 | struct rq *rq; | ||
7666 | int i; | ||
7667 | |||
7668 | tg->rt_rq = kzalloc(sizeof(rt_rq) * NR_CPUS, GFP_KERNEL); | ||
7669 | if (!tg->rt_rq) | ||
7670 | goto err; | ||
7671 | tg->rt_se = kzalloc(sizeof(rt_se) * NR_CPUS, GFP_KERNEL); | ||
7672 | if (!tg->rt_se) | ||
7673 | goto err; | ||
7674 | |||
7675 | tg->rt_runtime = 0; | ||
7676 | |||
7677 | for_each_possible_cpu(i) { | ||
7678 | rq = cpu_rq(i); | ||
7679 | |||
7652 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 7680 | rt_rq = kmalloc_node(sizeof(struct rt_rq), |
7653 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 7681 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); |
7654 | if (!rt_rq) | 7682 | if (!rt_rq) |
@@ -7660,20 +7688,71 @@ struct task_group *sched_create_group(void) | |||
7660 | goto err; | 7688 | goto err; |
7661 | 7689 | ||
7662 | init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0); | 7690 | init_tg_rt_entry(rq, tg, rt_rq, rt_se, i, 0); |
7663 | #endif | ||
7664 | } | 7691 | } |
7665 | 7692 | ||
7693 | return 1; | ||
7694 | |||
7695 | err: | ||
7696 | return 0; | ||
7697 | } | ||
7698 | |||
7699 | static inline void register_rt_sched_group(struct task_group *tg, int cpu) | ||
7700 | { | ||
7701 | list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list, | ||
7702 | &cpu_rq(cpu)->leaf_rt_rq_list); | ||
7703 | } | ||
7704 | |||
7705 | static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) | ||
7706 | { | ||
7707 | list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); | ||
7708 | } | ||
7709 | #else | ||
7710 | static inline void free_rt_sched_group(struct task_group *tg) | ||
7711 | { | ||
7712 | } | ||
7713 | |||
7714 | static inline int alloc_rt_sched_group(struct task_group *tg) | ||
7715 | { | ||
7716 | return 1; | ||
7717 | } | ||
7718 | |||
7719 | static inline void register_rt_sched_group(struct task_group *tg, int cpu) | ||
7720 | { | ||
7721 | } | ||
7722 | |||
7723 | static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) | ||
7724 | { | ||
7725 | } | ||
7726 | #endif | ||
7727 | |||
7728 | static void free_sched_group(struct task_group *tg) | ||
7729 | { | ||
7730 | free_fair_sched_group(tg); | ||
7731 | free_rt_sched_group(tg); | ||
7732 | kfree(tg); | ||
7733 | } | ||
7734 | |||
7735 | /* allocate runqueue etc for a new task group */ | ||
7736 | struct task_group *sched_create_group(void) | ||
7737 | { | ||
7738 | struct task_group *tg; | ||
7739 | unsigned long flags; | ||
7740 | int i; | ||
7741 | |||
7742 | tg = kzalloc(sizeof(*tg), GFP_KERNEL); | ||
7743 | if (!tg) | ||
7744 | return ERR_PTR(-ENOMEM); | ||
7745 | |||
7746 | if (!alloc_fair_sched_group(tg)) | ||
7747 | goto err; | ||
7748 | |||
7749 | if (!alloc_rt_sched_group(tg)) | ||
7750 | goto err; | ||
7751 | |||
7666 | spin_lock_irqsave(&task_group_lock, flags); | 7752 | spin_lock_irqsave(&task_group_lock, flags); |
7667 | for_each_possible_cpu(i) { | 7753 | for_each_possible_cpu(i) { |
7668 | rq = cpu_rq(i); | 7754 | register_fair_sched_group(tg, i); |
7669 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7755 | register_rt_sched_group(tg, i); |
7670 | cfs_rq = tg->cfs_rq[i]; | ||
7671 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); | ||
7672 | #endif | ||
7673 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7674 | rt_rq = tg->rt_rq[i]; | ||
7675 | list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list); | ||
7676 | #endif | ||
7677 | } | 7756 | } |
7678 | list_add_rcu(&tg->list, &task_groups); | 7757 | list_add_rcu(&tg->list, &task_groups); |
7679 | spin_unlock_irqrestore(&task_group_lock, flags); | 7758 | spin_unlock_irqrestore(&task_group_lock, flags); |
@@ -7700,12 +7779,8 @@ void sched_destroy_group(struct task_group *tg) | |||
7700 | 7779 | ||
7701 | spin_lock_irqsave(&task_group_lock, flags); | 7780 | spin_lock_irqsave(&task_group_lock, flags); |
7702 | for_each_possible_cpu(i) { | 7781 | for_each_possible_cpu(i) { |
7703 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7782 | unregister_fair_sched_group(tg, i); |
7704 | list_del_rcu(&tg->cfs_rq[i]->leaf_cfs_rq_list); | 7783 | unregister_rt_sched_group(tg, i); |
7705 | #endif | ||
7706 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7707 | list_del_rcu(&tg->rt_rq[i]->leaf_rt_rq_list); | ||
7708 | #endif | ||
7709 | } | 7784 | } |
7710 | list_del_rcu(&tg->list); | 7785 | list_del_rcu(&tg->list); |
7711 | spin_unlock_irqrestore(&task_group_lock, flags); | 7786 | spin_unlock_irqrestore(&task_group_lock, flags); |
@@ -7780,8 +7855,6 @@ static DEFINE_MUTEX(shares_mutex); | |||
7780 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) | 7855 | int sched_group_set_shares(struct task_group *tg, unsigned long shares) |
7781 | { | 7856 | { |
7782 | int i; | 7857 | int i; |
7783 | struct cfs_rq *cfs_rq; | ||
7784 | struct rq *rq; | ||
7785 | unsigned long flags; | 7858 | unsigned long flags; |
7786 | 7859 | ||
7787 | mutex_lock(&shares_mutex); | 7860 | mutex_lock(&shares_mutex); |
@@ -7797,10 +7870,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7797 | * by taking it off the rq->leaf_cfs_rq_list on each cpu. | 7870 | * by taking it off the rq->leaf_cfs_rq_list on each cpu. |
7798 | */ | 7871 | */ |
7799 | spin_lock_irqsave(&task_group_lock, flags); | 7872 | spin_lock_irqsave(&task_group_lock, flags); |
7800 | for_each_possible_cpu(i) { | 7873 | for_each_possible_cpu(i) |
7801 | cfs_rq = tg->cfs_rq[i]; | 7874 | unregister_fair_sched_group(tg, i); |
7802 | list_del_rcu(&cfs_rq->leaf_cfs_rq_list); | ||
7803 | } | ||
7804 | spin_unlock_irqrestore(&task_group_lock, flags); | 7875 | spin_unlock_irqrestore(&task_group_lock, flags); |
7805 | 7876 | ||
7806 | /* wait for any ongoing reference to this group to finish */ | 7877 | /* wait for any ongoing reference to this group to finish */ |
@@ -7822,11 +7893,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares) | |||
7822 | * each cpu's rq->leaf_cfs_rq_list. | 7893 | * each cpu's rq->leaf_cfs_rq_list. |
7823 | */ | 7894 | */ |
7824 | spin_lock_irqsave(&task_group_lock, flags); | 7895 | spin_lock_irqsave(&task_group_lock, flags); |
7825 | for_each_possible_cpu(i) { | 7896 | for_each_possible_cpu(i) |
7826 | rq = cpu_rq(i); | 7897 | register_fair_sched_group(tg, i); |
7827 | cfs_rq = tg->cfs_rq[i]; | ||
7828 | list_add_rcu(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list); | ||
7829 | } | ||
7830 | spin_unlock_irqrestore(&task_group_lock, flags); | 7898 | spin_unlock_irqrestore(&task_group_lock, flags); |
7831 | done: | 7899 | done: |
7832 | mutex_unlock(&shares_mutex); | 7900 | mutex_unlock(&shares_mutex); |