aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-07-09 12:51:57 -0400
committerIngo Molnar <mingo@elte.hu>2007-07-09 12:51:57 -0400
commitd15bcfdbe1818478891d714343f037cfe60875f0 (patch)
tree62362e16a1b189161d0c163085898043e6844a5e /kernel/sched.c
parent7dcca30a32aadb0520417521b0c44f42d09fe05c (diff)
sched: rename idle_type/SCHED_IDLE
enum idle_type (used by the load-balancer) clashes with the SCHED_IDLE name that we want to introduce. 'CPU_IDLE' instead of 'SCHED_IDLE' is more descriptive as well. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 50e1a3122699..ac054d9a0719 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -496,12 +496,12 @@ static int show_schedstat(struct seq_file *seq, void *v)
496 /* domain-specific stats */ 496 /* domain-specific stats */
497 preempt_disable(); 497 preempt_disable();
498 for_each_domain(cpu, sd) { 498 for_each_domain(cpu, sd) {
499 enum idle_type itype; 499 enum cpu_idle_type itype;
500 char mask_str[NR_CPUS]; 500 char mask_str[NR_CPUS];
501 501
502 cpumask_scnprintf(mask_str, NR_CPUS, sd->span); 502 cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
503 seq_printf(seq, "domain%d %s", dcnt++, mask_str); 503 seq_printf(seq, "domain%d %s", dcnt++, mask_str);
504 for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; 504 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
505 itype++) { 505 itype++) {
506 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " 506 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu "
507 "%lu", 507 "%lu",
@@ -2208,7 +2208,7 @@ static void pull_task(struct rq *src_rq, struct prio_array *src_array,
2208 */ 2208 */
2209static 2209static
2210int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, 2210int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2211 struct sched_domain *sd, enum idle_type idle, 2211 struct sched_domain *sd, enum cpu_idle_type idle,
2212 int *all_pinned) 2212 int *all_pinned)
2213{ 2213{
2214 /* 2214 /*
@@ -2254,7 +2254,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2254 */ 2254 */
2255static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, 2255static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2256 unsigned long max_nr_move, unsigned long max_load_move, 2256 unsigned long max_nr_move, unsigned long max_load_move,
2257 struct sched_domain *sd, enum idle_type idle, 2257 struct sched_domain *sd, enum cpu_idle_type idle,
2258 int *all_pinned) 2258 int *all_pinned)
2259{ 2259{
2260 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, 2260 int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
@@ -2372,7 +2372,7 @@ out:
2372 */ 2372 */
2373static struct sched_group * 2373static struct sched_group *
2374find_busiest_group(struct sched_domain *sd, int this_cpu, 2374find_busiest_group(struct sched_domain *sd, int this_cpu,
2375 unsigned long *imbalance, enum idle_type idle, int *sd_idle, 2375 unsigned long *imbalance, enum cpu_idle_type idle, int *sd_idle,
2376 cpumask_t *cpus, int *balance) 2376 cpumask_t *cpus, int *balance)
2377{ 2377{
2378 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 2378 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
@@ -2391,9 +2391,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2391 max_load = this_load = total_load = total_pwr = 0; 2391 max_load = this_load = total_load = total_pwr = 0;
2392 busiest_load_per_task = busiest_nr_running = 0; 2392 busiest_load_per_task = busiest_nr_running = 0;
2393 this_load_per_task = this_nr_running = 0; 2393 this_load_per_task = this_nr_running = 0;
2394 if (idle == NOT_IDLE) 2394 if (idle == CPU_NOT_IDLE)
2395 load_idx = sd->busy_idx; 2395 load_idx = sd->busy_idx;
2396 else if (idle == NEWLY_IDLE) 2396 else if (idle == CPU_NEWLY_IDLE)
2397 load_idx = sd->newidle_idx; 2397 load_idx = sd->newidle_idx;
2398 else 2398 else
2399 load_idx = sd->idle_idx; 2399 load_idx = sd->idle_idx;
@@ -2477,7 +2477,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
2477 * Busy processors will not participate in power savings 2477 * Busy processors will not participate in power savings
2478 * balance. 2478 * balance.
2479 */ 2479 */
2480 if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) 2480 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2481 goto group_next; 2481 goto group_next;
2482 2482
2483 /* 2483 /*
@@ -2639,7 +2639,7 @@ small_imbalance:
2639 2639
2640out_balanced: 2640out_balanced:
2641#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 2641#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2642 if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) 2642 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2643 goto ret; 2643 goto ret;
2644 2644
2645 if (this == group_leader && group_leader != group_min) { 2645 if (this == group_leader && group_leader != group_min) {
@@ -2656,7 +2656,7 @@ ret:
2656 * find_busiest_queue - find the busiest runqueue among the cpus in group. 2656 * find_busiest_queue - find the busiest runqueue among the cpus in group.
2657 */ 2657 */
2658static struct rq * 2658static struct rq *
2659find_busiest_queue(struct sched_group *group, enum idle_type idle, 2659find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
2660 unsigned long imbalance, cpumask_t *cpus) 2660 unsigned long imbalance, cpumask_t *cpus)
2661{ 2661{
2662 struct rq *busiest = NULL, *rq; 2662 struct rq *busiest = NULL, *rq;
@@ -2698,7 +2698,7 @@ static inline unsigned long minus_1_or_zero(unsigned long n)
2698 * tasks if there is an imbalance. 2698 * tasks if there is an imbalance.
2699 */ 2699 */
2700static int load_balance(int this_cpu, struct rq *this_rq, 2700static int load_balance(int this_cpu, struct rq *this_rq,
2701 struct sched_domain *sd, enum idle_type idle, 2701 struct sched_domain *sd, enum cpu_idle_type idle,
2702 int *balance) 2702 int *balance)
2703{ 2703{
2704 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; 2704 int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
@@ -2712,9 +2712,9 @@ static int load_balance(int this_cpu, struct rq *this_rq,
2712 * When power savings policy is enabled for the parent domain, idle 2712 * When power savings policy is enabled for the parent domain, idle
2713 * sibling can pick up load irrespective of busy siblings. In this case, 2713 * sibling can pick up load irrespective of busy siblings. In this case,
2714 * let the state of idle sibling percolate up as IDLE, instead of 2714 * let the state of idle sibling percolate up as IDLE, instead of
2715 * portraying it as NOT_IDLE. 2715 * portraying it as CPU_NOT_IDLE.
2716 */ 2716 */
2717 if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && 2717 if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
2718 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 2718 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2719 sd_idle = 1; 2719 sd_idle = 1;
2720 2720
@@ -2848,7 +2848,7 @@ out_one_pinned:
2848 * Check this_cpu to ensure it is balanced within domain. Attempt to move 2848 * Check this_cpu to ensure it is balanced within domain. Attempt to move
2849 * tasks if there is an imbalance. 2849 * tasks if there is an imbalance.
2850 * 2850 *
2851 * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). 2851 * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
2852 * this_rq is locked. 2852 * this_rq is locked.
2853 */ 2853 */
2854static int 2854static int
@@ -2865,31 +2865,31 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
2865 * When power savings policy is enabled for the parent domain, idle 2865 * When power savings policy is enabled for the parent domain, idle
2866 * sibling can pick up load irrespective of busy siblings. In this case, 2866 * sibling can pick up load irrespective of busy siblings. In this case,
2867 * let the state of idle sibling percolate up as IDLE, instead of 2867 * let the state of idle sibling percolate up as IDLE, instead of
2868 * portraying it as NOT_IDLE. 2868 * portraying it as CPU_NOT_IDLE.
2869 */ 2869 */
2870 if (sd->flags & SD_SHARE_CPUPOWER && 2870 if (sd->flags & SD_SHARE_CPUPOWER &&
2871 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 2871 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2872 sd_idle = 1; 2872 sd_idle = 1;
2873 2873
2874 schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); 2874 schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
2875redo: 2875redo:
2876 group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, 2876 group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
2877 &sd_idle, &cpus, NULL); 2877 &sd_idle, &cpus, NULL);
2878 if (!group) { 2878 if (!group) {
2879 schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); 2879 schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
2880 goto out_balanced; 2880 goto out_balanced;
2881 } 2881 }
2882 2882
2883 busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance, 2883 busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
2884 &cpus); 2884 &cpus);
2885 if (!busiest) { 2885 if (!busiest) {
2886 schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); 2886 schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
2887 goto out_balanced; 2887 goto out_balanced;
2888 } 2888 }
2889 2889
2890 BUG_ON(busiest == this_rq); 2890 BUG_ON(busiest == this_rq);
2891 2891
2892 schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); 2892 schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
2893 2893
2894 nr_moved = 0; 2894 nr_moved = 0;
2895 if (busiest->nr_running > 1) { 2895 if (busiest->nr_running > 1) {
@@ -2897,7 +2897,7 @@ redo:
2897 double_lock_balance(this_rq, busiest); 2897 double_lock_balance(this_rq, busiest);
2898 nr_moved = move_tasks(this_rq, this_cpu, busiest, 2898 nr_moved = move_tasks(this_rq, this_cpu, busiest,
2899 minus_1_or_zero(busiest->nr_running), 2899 minus_1_or_zero(busiest->nr_running),
2900 imbalance, sd, NEWLY_IDLE, NULL); 2900 imbalance, sd, CPU_NEWLY_IDLE, NULL);
2901 spin_unlock(&busiest->lock); 2901 spin_unlock(&busiest->lock);
2902 2902
2903 if (!nr_moved) { 2903 if (!nr_moved) {
@@ -2908,7 +2908,7 @@ redo:
2908 } 2908 }
2909 2909
2910 if (!nr_moved) { 2910 if (!nr_moved) {
2911 schedstat_inc(sd, lb_failed[NEWLY_IDLE]); 2911 schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
2912 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && 2912 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2913 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 2913 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2914 return -1; 2914 return -1;
@@ -2918,7 +2918,7 @@ redo:
2918 return nr_moved; 2918 return nr_moved;
2919 2919
2920out_balanced: 2920out_balanced:
2921 schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); 2921 schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
2922 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && 2922 if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
2923 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) 2923 !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
2924 return -1; 2924 return -1;
@@ -3003,7 +3003,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3003 schedstat_inc(sd, alb_cnt); 3003 schedstat_inc(sd, alb_cnt);
3004 3004
3005 if (move_tasks(target_rq, target_cpu, busiest_rq, 1, 3005 if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
3006 RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, 3006 RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE,
3007 NULL)) 3007 NULL))
3008 schedstat_inc(sd, alb_pushed); 3008 schedstat_inc(sd, alb_pushed);
3009 else 3009 else
@@ -3120,7 +3120,7 @@ static DEFINE_SPINLOCK(balancing);
3120 * 3120 *
3121 * Balancing parameters are set up in arch_init_sched_domains. 3121 * Balancing parameters are set up in arch_init_sched_domains.
3122 */ 3122 */
3123static inline void rebalance_domains(int cpu, enum idle_type idle) 3123static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
3124{ 3124{
3125 int balance = 1; 3125 int balance = 1;
3126 struct rq *rq = cpu_rq(cpu); 3126 struct rq *rq = cpu_rq(cpu);
@@ -3134,7 +3134,7 @@ static inline void rebalance_domains(int cpu, enum idle_type idle)
3134 continue; 3134 continue;
3135 3135
3136 interval = sd->balance_interval; 3136 interval = sd->balance_interval;
3137 if (idle != SCHED_IDLE) 3137 if (idle != CPU_IDLE)
3138 interval *= sd->busy_factor; 3138 interval *= sd->busy_factor;
3139 3139
3140 /* scale ms to jiffies */ 3140 /* scale ms to jiffies */
@@ -3154,7 +3154,7 @@ static inline void rebalance_domains(int cpu, enum idle_type idle)
3154 * longer idle, or one of our SMT siblings is 3154 * longer idle, or one of our SMT siblings is
3155 * not idle. 3155 * not idle.
3156 */ 3156 */
3157 idle = NOT_IDLE; 3157 idle = CPU_NOT_IDLE;
3158 } 3158 }
3159 sd->last_balance = jiffies; 3159 sd->last_balance = jiffies;
3160 } 3160 }
@@ -3184,7 +3184,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3184{ 3184{
3185 int local_cpu = smp_processor_id(); 3185 int local_cpu = smp_processor_id();
3186 struct rq *local_rq = cpu_rq(local_cpu); 3186 struct rq *local_rq = cpu_rq(local_cpu);
3187 enum idle_type idle = local_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE; 3187 enum cpu_idle_type idle = local_rq->idle_at_tick ? CPU_IDLE : CPU_NOT_IDLE;
3188 3188
3189 rebalance_domains(local_cpu, idle); 3189 rebalance_domains(local_cpu, idle);
3190 3190
@@ -3210,7 +3210,7 @@ static void run_rebalance_domains(struct softirq_action *h)
3210 if (need_resched()) 3210 if (need_resched())
3211 break; 3211 break;
3212 3212
3213 rebalance_domains(balance_cpu, SCHED_IDLE); 3213 rebalance_domains(balance_cpu, CPU_IDLE);
3214 3214
3215 rq = cpu_rq(balance_cpu); 3215 rq = cpu_rq(balance_cpu);
3216 if (time_after(local_rq->next_balance, rq->next_balance)) 3216 if (time_after(local_rq->next_balance, rq->next_balance))