aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorDhaval Giani <dhaval@linux.vnet.ibm.com>2008-05-30 08:23:45 -0400
committerIngo Molnar <mingo@elte.hu>2008-06-06 09:19:38 -0400
commit6d6bc0ad867c46896d0994bb039e7550ecb9b51d (patch)
tree09d9d2cb0e8a6c344feb5ee441e8d470387303fa /kernel/sched.c
parente21f5b153b9b4a6775d7d41964e372e13a9178ab (diff)
sched: add comments for ifdefs in sched.c
make sched.c easier to read. Signed-off-by: Dhaval Giani <dhaval@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c76
1 files changed, 38 insertions, 38 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 84a360670b9d..ef4e25604bbe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -292,15 +292,15 @@ struct task_group root_task_group;
292static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 292static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
293/* Default task group's cfs_rq on each cpu */ 293/* Default task group's cfs_rq on each cpu */
294static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; 294static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
295#endif 295#endif /* CONFIG_FAIR_GROUP_SCHED */
296 296
297#ifdef CONFIG_RT_GROUP_SCHED 297#ifdef CONFIG_RT_GROUP_SCHED
298static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 298static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
299static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 299static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
300#endif 300#endif /* CONFIG_RT_GROUP_SCHED */
301#else 301#else /* !CONFIG_FAIR_GROUP_SCHED */
302#define root_task_group init_task_group 302#define root_task_group init_task_group
303#endif 303#endif /* CONFIG_FAIR_GROUP_SCHED */
304 304
305/* task_group_lock serializes add/remove of task groups and also changes to 305/* task_group_lock serializes add/remove of task groups and also changes to
306 * a task group's cpu shares. 306 * a task group's cpu shares.
@@ -310,9 +310,9 @@ static DEFINE_SPINLOCK(task_group_lock);
310#ifdef CONFIG_FAIR_GROUP_SCHED 310#ifdef CONFIG_FAIR_GROUP_SCHED
311#ifdef CONFIG_USER_SCHED 311#ifdef CONFIG_USER_SCHED
312# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 312# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
313#else 313#else /* !CONFIG_USER_SCHED */
314# define INIT_TASK_GROUP_LOAD NICE_0_LOAD 314# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
315#endif 315#endif /* CONFIG_USER_SCHED */
316 316
317/* 317/*
318 * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems. 318 * A weight of 0, 1 or ULONG_MAX can cause arithmetics problems.
@@ -1316,15 +1316,15 @@ void wake_up_idle_cpu(int cpu)
1316 if (!tsk_is_polling(rq->idle)) 1316 if (!tsk_is_polling(rq->idle))
1317 smp_send_reschedule(cpu); 1317 smp_send_reschedule(cpu);
1318} 1318}
1319#endif 1319#endif /* CONFIG_NO_HZ */
1320 1320
1321#else 1321#else /* !CONFIG_SMP */
1322static void __resched_task(struct task_struct *p, int tif_bit) 1322static void __resched_task(struct task_struct *p, int tif_bit)
1323{ 1323{
1324 assert_spin_locked(&task_rq(p)->lock); 1324 assert_spin_locked(&task_rq(p)->lock);
1325 set_tsk_thread_flag(p, tif_bit); 1325 set_tsk_thread_flag(p, tif_bit);
1326} 1326}
1327#endif 1327#endif /* CONFIG_SMP */
1328 1328
1329#if BITS_PER_LONG == 32 1329#if BITS_PER_LONG == 32
1330# define WMULT_CONST (~0UL) 1330# define WMULT_CONST (~0UL)
@@ -2129,7 +2129,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2129 } 2129 }
2130 } 2130 }
2131 } 2131 }
2132#endif 2132#endif /* CONFIG_SCHEDSTATS */
2133 2133
2134out_activate: 2134out_activate:
2135#endif /* CONFIG_SMP */ 2135#endif /* CONFIG_SMP */
@@ -2329,7 +2329,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
2329 notifier->ops->sched_out(notifier, next); 2329 notifier->ops->sched_out(notifier, next);
2330} 2330}
2331 2331
2332#else 2332#else /* !CONFIG_PREEMPT_NOTIFIERS */
2333 2333
2334static void fire_sched_in_preempt_notifiers(struct task_struct *curr) 2334static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
2335{ 2335{
@@ -2341,7 +2341,7 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
2341{ 2341{
2342} 2342}
2343 2343
2344#endif 2344#endif /* CONFIG_PREEMPT_NOTIFIERS */
2345 2345
2346/** 2346/**
2347 * prepare_task_switch - prepare to switch tasks 2347 * prepare_task_switch - prepare to switch tasks
@@ -6300,9 +6300,9 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
6300 } 6300 }
6301 kfree(groupmask); 6301 kfree(groupmask);
6302} 6302}
6303#else 6303#else /* !CONFIG_SCHED_DEBUG */
6304# define sched_domain_debug(sd, cpu) do { } while (0) 6304# define sched_domain_debug(sd, cpu) do { } while (0)
6305#endif 6305#endif /* CONFIG_SCHED_DEBUG */
6306 6306
6307static int sd_degenerate(struct sched_domain *sd) 6307static int sd_degenerate(struct sched_domain *sd)
6308{ 6308{
@@ -6598,7 +6598,7 @@ static void sched_domain_node_span(int node, cpumask_t *span)
6598 cpus_or(*span, *span, *nodemask); 6598 cpus_or(*span, *span, *nodemask);
6599 } 6599 }
6600} 6600}
6601#endif 6601#endif /* CONFIG_NUMA */
6602 6602
6603int sched_smt_power_savings = 0, sched_mc_power_savings = 0; 6603int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
6604 6604
@@ -6617,7 +6617,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6617 *sg = &per_cpu(sched_group_cpus, cpu); 6617 *sg = &per_cpu(sched_group_cpus, cpu);
6618 return cpu; 6618 return cpu;
6619} 6619}
6620#endif 6620#endif /* CONFIG_SCHED_SMT */
6621 6621
6622/* 6622/*
6623 * multi-core sched-domains: 6623 * multi-core sched-domains:
@@ -6625,7 +6625,7 @@ cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
6625#ifdef CONFIG_SCHED_MC 6625#ifdef CONFIG_SCHED_MC
6626static DEFINE_PER_CPU(struct sched_domain, core_domains); 6626static DEFINE_PER_CPU(struct sched_domain, core_domains);
6627static DEFINE_PER_CPU(struct sched_group, sched_group_core); 6627static DEFINE_PER_CPU(struct sched_group, sched_group_core);
6628#endif 6628#endif /* CONFIG_SCHED_MC */
6629 6629
6630#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT) 6630#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
6631static int 6631static int
@@ -6727,7 +6727,7 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
6727 sg = sg->next; 6727 sg = sg->next;
6728 } while (sg != group_head); 6728 } while (sg != group_head);
6729} 6729}
6730#endif 6730#endif /* CONFIG_NUMA */
6731 6731
6732#ifdef CONFIG_NUMA 6732#ifdef CONFIG_NUMA
6733/* Free memory allocated for various sched_group structures */ 6733/* Free memory allocated for various sched_group structures */
@@ -6764,11 +6764,11 @@ next_sg:
6764 sched_group_nodes_bycpu[cpu] = NULL; 6764 sched_group_nodes_bycpu[cpu] = NULL;
6765 } 6765 }
6766} 6766}
6767#else 6767#else /* !CONFIG_NUMA */
6768static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask) 6768static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
6769{ 6769{
6770} 6770}
6771#endif 6771#endif /* CONFIG_NUMA */
6772 6772
6773/* 6773/*
6774 * Initialize sched groups cpu_power. 6774 * Initialize sched groups cpu_power.
@@ -7459,7 +7459,7 @@ int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
7459#endif 7459#endif
7460 return err; 7460 return err;
7461} 7461}
7462#endif 7462#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
7463 7463
7464/* 7464/*
7465 * Force a reinitialization of the sched domains hierarchy. The domains 7465 * Force a reinitialization of the sched domains hierarchy. The domains
@@ -7677,8 +7677,8 @@ void __init sched_init(void)
7677 7677
7678 root_task_group.cfs_rq = (struct cfs_rq **)ptr; 7678 root_task_group.cfs_rq = (struct cfs_rq **)ptr;
7679 ptr += nr_cpu_ids * sizeof(void **); 7679 ptr += nr_cpu_ids * sizeof(void **);
7680#endif 7680#endif /* CONFIG_USER_SCHED */
7681#endif 7681#endif /* CONFIG_FAIR_GROUP_SCHED */
7682#ifdef CONFIG_RT_GROUP_SCHED 7682#ifdef CONFIG_RT_GROUP_SCHED
7683 init_task_group.rt_se = (struct sched_rt_entity **)ptr; 7683 init_task_group.rt_se = (struct sched_rt_entity **)ptr;
7684 ptr += nr_cpu_ids * sizeof(void **); 7684 ptr += nr_cpu_ids * sizeof(void **);
@@ -7692,8 +7692,8 @@ void __init sched_init(void)
7692 7692
7693 root_task_group.rt_rq = (struct rt_rq **)ptr; 7693 root_task_group.rt_rq = (struct rt_rq **)ptr;
7694 ptr += nr_cpu_ids * sizeof(void **); 7694 ptr += nr_cpu_ids * sizeof(void **);
7695#endif 7695#endif /* CONFIG_USER_SCHED */
7696#endif 7696#endif /* CONFIG_RT_GROUP_SCHED */
7697 } 7697 }
7698 7698
7699#ifdef CONFIG_SMP 7699#ifdef CONFIG_SMP
@@ -7709,8 +7709,8 @@ void __init sched_init(void)
7709#ifdef CONFIG_USER_SCHED 7709#ifdef CONFIG_USER_SCHED
7710 init_rt_bandwidth(&root_task_group.rt_bandwidth, 7710 init_rt_bandwidth(&root_task_group.rt_bandwidth,
7711 global_rt_period(), RUNTIME_INF); 7711 global_rt_period(), RUNTIME_INF);
7712#endif 7712#endif /* CONFIG_USER_SCHED */
7713#endif 7713#endif /* CONFIG_RT_GROUP_SCHED */
7714 7714
7715#ifdef CONFIG_GROUP_SCHED 7715#ifdef CONFIG_GROUP_SCHED
7716 list_add(&init_task_group.list, &task_groups); 7716 list_add(&init_task_group.list, &task_groups);
@@ -7720,8 +7720,8 @@ void __init sched_init(void)
7720 INIT_LIST_HEAD(&root_task_group.children); 7720 INIT_LIST_HEAD(&root_task_group.children);
7721 init_task_group.parent = &root_task_group; 7721 init_task_group.parent = &root_task_group;
7722 list_add(&init_task_group.siblings, &root_task_group.children); 7722 list_add(&init_task_group.siblings, &root_task_group.children);
7723#endif 7723#endif /* CONFIG_USER_SCHED */
7724#endif 7724#endif /* CONFIG_GROUP_SCHED */
7725 7725
7726 for_each_possible_cpu(i) { 7726 for_each_possible_cpu(i) {
7727 struct rq *rq; 7727 struct rq *rq;
@@ -8040,7 +8040,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8040{ 8040{
8041 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); 8041 list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
8042} 8042}
8043#else 8043#else /* !CONFG_FAIR_GROUP_SCHED */
8044static inline void free_fair_sched_group(struct task_group *tg) 8044static inline void free_fair_sched_group(struct task_group *tg)
8045{ 8045{
8046} 8046}
@@ -8058,7 +8058,7 @@ static inline void register_fair_sched_group(struct task_group *tg, int cpu)
8058static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) 8058static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
8059{ 8059{
8060} 8060}
8061#endif 8061#endif /* CONFIG_FAIR_GROUP_SCHED */
8062 8062
8063#ifdef CONFIG_RT_GROUP_SCHED 8063#ifdef CONFIG_RT_GROUP_SCHED
8064static void free_rt_sched_group(struct task_group *tg) 8064static void free_rt_sched_group(struct task_group *tg)
@@ -8129,7 +8129,7 @@ static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8129{ 8129{
8130 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); 8130 list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
8131} 8131}
8132#else 8132#else /* !CONFIG_RT_GROUP_SCHED */
8133static inline void free_rt_sched_group(struct task_group *tg) 8133static inline void free_rt_sched_group(struct task_group *tg)
8134{ 8134{
8135} 8135}
@@ -8147,7 +8147,7 @@ static inline void register_rt_sched_group(struct task_group *tg, int cpu)
8147static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) 8147static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
8148{ 8148{
8149} 8149}
8150#endif 8150#endif /* CONFIG_RT_GROUP_SCHED */
8151 8151
8152#ifdef CONFIG_GROUP_SCHED 8152#ifdef CONFIG_GROUP_SCHED
8153static void free_sched_group(struct task_group *tg) 8153static void free_sched_group(struct task_group *tg)
@@ -8258,7 +8258,7 @@ void sched_move_task(struct task_struct *tsk)
8258 8258
8259 task_rq_unlock(rq, &flags); 8259 task_rq_unlock(rq, &flags);
8260} 8260}
8261#endif 8261#endif /* CONFIG_GROUP_SCHED */
8262 8262
8263#ifdef CONFIG_FAIR_GROUP_SCHED 8263#ifdef CONFIG_FAIR_GROUP_SCHED
8264static void set_se_shares(struct sched_entity *se, unsigned long shares) 8264static void set_se_shares(struct sched_entity *se, unsigned long shares)
@@ -8508,7 +8508,7 @@ static int sched_rt_global_constraints(void)
8508 8508
8509 return ret; 8509 return ret;
8510} 8510}
8511#else 8511#else /* !CONFIG_RT_GROUP_SCHED */
8512static int sched_rt_global_constraints(void) 8512static int sched_rt_global_constraints(void)
8513{ 8513{
8514 unsigned long flags; 8514 unsigned long flags;
@@ -8526,7 +8526,7 @@ static int sched_rt_global_constraints(void)
8526 8526
8527 return 0; 8527 return 0;
8528} 8528}
8529#endif 8529#endif /* CONFIG_RT_GROUP_SCHED */
8530 8530
8531int sched_rt_handler(struct ctl_table *table, int write, 8531int sched_rt_handler(struct ctl_table *table, int write,
8532 struct file *filp, void __user *buffer, size_t *lenp, 8532 struct file *filp, void __user *buffer, size_t *lenp,
@@ -8634,7 +8634,7 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
8634 8634
8635 return (u64) tg->shares; 8635 return (u64) tg->shares;
8636} 8636}
8637#endif 8637#endif /* CONFIG_FAIR_GROUP_SCHED */
8638 8638
8639#ifdef CONFIG_RT_GROUP_SCHED 8639#ifdef CONFIG_RT_GROUP_SCHED
8640static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, 8640static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
@@ -8658,7 +8658,7 @@ static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
8658{ 8658{
8659 return sched_group_rt_period(cgroup_tg(cgrp)); 8659 return sched_group_rt_period(cgroup_tg(cgrp));
8660} 8660}
8661#endif 8661#endif /* CONFIG_RT_GROUP_SCHED */
8662 8662
8663static struct cftype cpu_files[] = { 8663static struct cftype cpu_files[] = {
8664#ifdef CONFIG_FAIR_GROUP_SCHED 8664#ifdef CONFIG_FAIR_GROUP_SCHED