diff options
-rw-r--r-- | include/linux/sched.h | 27 | ||||
-rw-r--r-- | kernel/sched.c | 60 |
2 files changed, 43 insertions, 44 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 693f0e6c54d4..2acfb23f3681 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -639,12 +639,11 @@ static inline int sched_info_on(void) | |||
639 | #endif | 639 | #endif |
640 | } | 640 | } |
641 | 641 | ||
642 | enum idle_type | 642 | enum cpu_idle_type { |
643 | { | 643 | CPU_IDLE, |
644 | SCHED_IDLE, | 644 | CPU_NOT_IDLE, |
645 | NOT_IDLE, | 645 | CPU_NEWLY_IDLE, |
646 | NEWLY_IDLE, | 646 | CPU_MAX_IDLE_TYPES |
647 | MAX_IDLE_TYPES | ||
648 | }; | 647 | }; |
649 | 648 | ||
650 | /* | 649 | /* |
@@ -719,14 +718,14 @@ struct sched_domain { | |||
719 | 718 | ||
720 | #ifdef CONFIG_SCHEDSTATS | 719 | #ifdef CONFIG_SCHEDSTATS |
721 | /* load_balance() stats */ | 720 | /* load_balance() stats */ |
722 | unsigned long lb_cnt[MAX_IDLE_TYPES]; | 721 | unsigned long lb_cnt[CPU_MAX_IDLE_TYPES]; |
723 | unsigned long lb_failed[MAX_IDLE_TYPES]; | 722 | unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; |
724 | unsigned long lb_balanced[MAX_IDLE_TYPES]; | 723 | unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; |
725 | unsigned long lb_imbalance[MAX_IDLE_TYPES]; | 724 | unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; |
726 | unsigned long lb_gained[MAX_IDLE_TYPES]; | 725 | unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; |
727 | unsigned long lb_hot_gained[MAX_IDLE_TYPES]; | 726 | unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; |
728 | unsigned long lb_nobusyg[MAX_IDLE_TYPES]; | 727 | unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; |
729 | unsigned long lb_nobusyq[MAX_IDLE_TYPES]; | 728 | unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; |
730 | 729 | ||
731 | /* Active load balancing */ | 730 | /* Active load balancing */ |
732 | unsigned long alb_cnt; | 731 | unsigned long alb_cnt; |
diff --git a/kernel/sched.c b/kernel/sched.c index 50e1a3122699..ac054d9a0719 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -496,12 +496,12 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
496 | /* domain-specific stats */ | 496 | /* domain-specific stats */ |
497 | preempt_disable(); | 497 | preempt_disable(); |
498 | for_each_domain(cpu, sd) { | 498 | for_each_domain(cpu, sd) { |
499 | enum idle_type itype; | 499 | enum cpu_idle_type itype; |
500 | char mask_str[NR_CPUS]; | 500 | char mask_str[NR_CPUS]; |
501 | 501 | ||
502 | cpumask_scnprintf(mask_str, NR_CPUS, sd->span); | 502 | cpumask_scnprintf(mask_str, NR_CPUS, sd->span); |
503 | seq_printf(seq, "domain%d %s", dcnt++, mask_str); | 503 | seq_printf(seq, "domain%d %s", dcnt++, mask_str); |
504 | for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES; | 504 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; |
505 | itype++) { | 505 | itype++) { |
506 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " | 506 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " |
507 | "%lu", | 507 | "%lu", |
@@ -2208,7 +2208,7 @@ static void pull_task(struct rq *src_rq, struct prio_array *src_array, | |||
2208 | */ | 2208 | */ |
2209 | static | 2209 | static |
2210 | int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | 2210 | int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, |
2211 | struct sched_domain *sd, enum idle_type idle, | 2211 | struct sched_domain *sd, enum cpu_idle_type idle, |
2212 | int *all_pinned) | 2212 | int *all_pinned) |
2213 | { | 2213 | { |
2214 | /* | 2214 | /* |
@@ -2254,7 +2254,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2254 | */ | 2254 | */ |
2255 | static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, | 2255 | static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest, |
2256 | unsigned long max_nr_move, unsigned long max_load_move, | 2256 | unsigned long max_nr_move, unsigned long max_load_move, |
2257 | struct sched_domain *sd, enum idle_type idle, | 2257 | struct sched_domain *sd, enum cpu_idle_type idle, |
2258 | int *all_pinned) | 2258 | int *all_pinned) |
2259 | { | 2259 | { |
2260 | int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, | 2260 | int idx, pulled = 0, pinned = 0, this_best_prio, best_prio, |
@@ -2372,7 +2372,7 @@ out: | |||
2372 | */ | 2372 | */ |
2373 | static struct sched_group * | 2373 | static struct sched_group * |
2374 | find_busiest_group(struct sched_domain *sd, int this_cpu, | 2374 | find_busiest_group(struct sched_domain *sd, int this_cpu, |
2375 | unsigned long *imbalance, enum idle_type idle, int *sd_idle, | 2375 | unsigned long *imbalance, enum cpu_idle_type idle, int *sd_idle, |
2376 | cpumask_t *cpus, int *balance) | 2376 | cpumask_t *cpus, int *balance) |
2377 | { | 2377 | { |
2378 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; | 2378 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
@@ -2391,9 +2391,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2391 | max_load = this_load = total_load = total_pwr = 0; | 2391 | max_load = this_load = total_load = total_pwr = 0; |
2392 | busiest_load_per_task = busiest_nr_running = 0; | 2392 | busiest_load_per_task = busiest_nr_running = 0; |
2393 | this_load_per_task = this_nr_running = 0; | 2393 | this_load_per_task = this_nr_running = 0; |
2394 | if (idle == NOT_IDLE) | 2394 | if (idle == CPU_NOT_IDLE) |
2395 | load_idx = sd->busy_idx; | 2395 | load_idx = sd->busy_idx; |
2396 | else if (idle == NEWLY_IDLE) | 2396 | else if (idle == CPU_NEWLY_IDLE) |
2397 | load_idx = sd->newidle_idx; | 2397 | load_idx = sd->newidle_idx; |
2398 | else | 2398 | else |
2399 | load_idx = sd->idle_idx; | 2399 | load_idx = sd->idle_idx; |
@@ -2477,7 +2477,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
2477 | * Busy processors will not participate in power savings | 2477 | * Busy processors will not participate in power savings |
2478 | * balance. | 2478 | * balance. |
2479 | */ | 2479 | */ |
2480 | if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | 2480 | if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) |
2481 | goto group_next; | 2481 | goto group_next; |
2482 | 2482 | ||
2483 | /* | 2483 | /* |
@@ -2639,7 +2639,7 @@ small_imbalance: | |||
2639 | 2639 | ||
2640 | out_balanced: | 2640 | out_balanced: |
2641 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 2641 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
2642 | if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) | 2642 | if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE)) |
2643 | goto ret; | 2643 | goto ret; |
2644 | 2644 | ||
2645 | if (this == group_leader && group_leader != group_min) { | 2645 | if (this == group_leader && group_leader != group_min) { |
@@ -2656,7 +2656,7 @@ ret: | |||
2656 | * find_busiest_queue - find the busiest runqueue among the cpus in group. | 2656 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
2657 | */ | 2657 | */ |
2658 | static struct rq * | 2658 | static struct rq * |
2659 | find_busiest_queue(struct sched_group *group, enum idle_type idle, | 2659 | find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle, |
2660 | unsigned long imbalance, cpumask_t *cpus) | 2660 | unsigned long imbalance, cpumask_t *cpus) |
2661 | { | 2661 | { |
2662 | struct rq *busiest = NULL, *rq; | 2662 | struct rq *busiest = NULL, *rq; |
@@ -2698,7 +2698,7 @@ static inline unsigned long minus_1_or_zero(unsigned long n) | |||
2698 | * tasks if there is an imbalance. | 2698 | * tasks if there is an imbalance. |
2699 | */ | 2699 | */ |
2700 | static int load_balance(int this_cpu, struct rq *this_rq, | 2700 | static int load_balance(int this_cpu, struct rq *this_rq, |
2701 | struct sched_domain *sd, enum idle_type idle, | 2701 | struct sched_domain *sd, enum cpu_idle_type idle, |
2702 | int *balance) | 2702 | int *balance) |
2703 | { | 2703 | { |
2704 | int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; | 2704 | int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0; |
@@ -2712,9 +2712,9 @@ static int load_balance(int this_cpu, struct rq *this_rq, | |||
2712 | * When power savings policy is enabled for the parent domain, idle | 2712 | * When power savings policy is enabled for the parent domain, idle |
2713 | * sibling can pick up load irrespective of busy siblings. In this case, | 2713 | * sibling can pick up load irrespective of busy siblings. In this case, |
2714 | * let the state of idle sibling percolate up as IDLE, instead of | 2714 | * let the state of idle sibling percolate up as IDLE, instead of |
2715 | * portraying it as NOT_IDLE. | 2715 | * portraying it as CPU_NOT_IDLE. |
2716 | */ | 2716 | */ |
2717 | if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && | 2717 | if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER && |
2718 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2718 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
2719 | sd_idle = 1; | 2719 | sd_idle = 1; |
2720 | 2720 | ||
@@ -2848,7 +2848,7 @@ out_one_pinned: | |||
2848 | * Check this_cpu to ensure it is balanced within domain. Attempt to move | 2848 | * Check this_cpu to ensure it is balanced within domain. Attempt to move |
2849 | * tasks if there is an imbalance. | 2849 | * tasks if there is an imbalance. |
2850 | * | 2850 | * |
2851 | * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). | 2851 | * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE). |
2852 | * this_rq is locked. | 2852 | * this_rq is locked. |
2853 | */ | 2853 | */ |
2854 | static int | 2854 | static int |
@@ -2865,31 +2865,31 @@ load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd) | |||
2865 | * When power savings policy is enabled for the parent domain, idle | 2865 | * When power savings policy is enabled for the parent domain, idle |
2866 | * sibling can pick up load irrespective of busy siblings. In this case, | 2866 | * sibling can pick up load irrespective of busy siblings. In this case, |
2867 | * let the state of idle sibling percolate up as IDLE, instead of | 2867 | * let the state of idle sibling percolate up as IDLE, instead of |
2868 | * portraying it as NOT_IDLE. | 2868 | * portraying it as CPU_NOT_IDLE. |
2869 | */ | 2869 | */ |
2870 | if (sd->flags & SD_SHARE_CPUPOWER && | 2870 | if (sd->flags & SD_SHARE_CPUPOWER && |
2871 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2871 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
2872 | sd_idle = 1; | 2872 | sd_idle = 1; |
2873 | 2873 | ||
2874 | schedstat_inc(sd, lb_cnt[NEWLY_IDLE]); | 2874 | schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]); |
2875 | redo: | 2875 | redo: |
2876 | group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE, | 2876 | group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE, |
2877 | &sd_idle, &cpus, NULL); | 2877 | &sd_idle, &cpus, NULL); |
2878 | if (!group) { | 2878 | if (!group) { |
2879 | schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]); | 2879 | schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]); |
2880 | goto out_balanced; | 2880 | goto out_balanced; |
2881 | } | 2881 | } |
2882 | 2882 | ||
2883 | busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance, | 2883 | busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, |
2884 | &cpus); | 2884 | &cpus); |
2885 | if (!busiest) { | 2885 | if (!busiest) { |
2886 | schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]); | 2886 | schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]); |
2887 | goto out_balanced; | 2887 | goto out_balanced; |
2888 | } | 2888 | } |
2889 | 2889 | ||
2890 | BUG_ON(busiest == this_rq); | 2890 | BUG_ON(busiest == this_rq); |
2891 | 2891 | ||
2892 | schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); | 2892 | schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance); |
2893 | 2893 | ||
2894 | nr_moved = 0; | 2894 | nr_moved = 0; |
2895 | if (busiest->nr_running > 1) { | 2895 | if (busiest->nr_running > 1) { |
@@ -2897,7 +2897,7 @@ redo: | |||
2897 | double_lock_balance(this_rq, busiest); | 2897 | double_lock_balance(this_rq, busiest); |
2898 | nr_moved = move_tasks(this_rq, this_cpu, busiest, | 2898 | nr_moved = move_tasks(this_rq, this_cpu, busiest, |
2899 | minus_1_or_zero(busiest->nr_running), | 2899 | minus_1_or_zero(busiest->nr_running), |
2900 | imbalance, sd, NEWLY_IDLE, NULL); | 2900 | imbalance, sd, CPU_NEWLY_IDLE, NULL); |
2901 | spin_unlock(&busiest->lock); | 2901 | spin_unlock(&busiest->lock); |
2902 | 2902 | ||
2903 | if (!nr_moved) { | 2903 | if (!nr_moved) { |
@@ -2908,7 +2908,7 @@ redo: | |||
2908 | } | 2908 | } |
2909 | 2909 | ||
2910 | if (!nr_moved) { | 2910 | if (!nr_moved) { |
2911 | schedstat_inc(sd, lb_failed[NEWLY_IDLE]); | 2911 | schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]); |
2912 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 2912 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2913 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2913 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
2914 | return -1; | 2914 | return -1; |
@@ -2918,7 +2918,7 @@ redo: | |||
2918 | return nr_moved; | 2918 | return nr_moved; |
2919 | 2919 | ||
2920 | out_balanced: | 2920 | out_balanced: |
2921 | schedstat_inc(sd, lb_balanced[NEWLY_IDLE]); | 2921 | schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]); |
2922 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && | 2922 | if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && |
2923 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) | 2923 | !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE)) |
2924 | return -1; | 2924 | return -1; |
@@ -3003,7 +3003,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu) | |||
3003 | schedstat_inc(sd, alb_cnt); | 3003 | schedstat_inc(sd, alb_cnt); |
3004 | 3004 | ||
3005 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, | 3005 | if (move_tasks(target_rq, target_cpu, busiest_rq, 1, |
3006 | RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, | 3006 | RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE, |
3007 | NULL)) | 3007 | NULL)) |
3008 | schedstat_inc(sd, alb_pushed); | 3008 | schedstat_inc(sd, alb_pushed); |
3009 | else | 3009 | else |
@@ -3120,7 +3120,7 @@ static DEFINE_SPINLOCK(balancing); | |||
3120 | * | 3120 | * |
3121 | * Balancing parameters are set up in arch_init_sched_domains. | 3121 | * Balancing parameters are set up in arch_init_sched_domains. |
3122 | */ | 3122 | */ |
3123 | static inline void rebalance_domains(int cpu, enum idle_type idle) | 3123 | static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) |
3124 | { | 3124 | { |
3125 | int balance = 1; | 3125 | int balance = 1; |
3126 | struct rq *rq = cpu_rq(cpu); | 3126 | struct rq *rq = cpu_rq(cpu); |
@@ -3134,7 +3134,7 @@ static inline void rebalance_domains(int cpu, enum idle_type idle) | |||
3134 | continue; | 3134 | continue; |
3135 | 3135 | ||
3136 | interval = sd->balance_interval; | 3136 | interval = sd->balance_interval; |
3137 | if (idle != SCHED_IDLE) | 3137 | if (idle != CPU_IDLE) |
3138 | interval *= sd->busy_factor; | 3138 | interval *= sd->busy_factor; |
3139 | 3139 | ||
3140 | /* scale ms to jiffies */ | 3140 | /* scale ms to jiffies */ |
@@ -3154,7 +3154,7 @@ static inline void rebalance_domains(int cpu, enum idle_type idle) | |||
3154 | * longer idle, or one of our SMT siblings is | 3154 | * longer idle, or one of our SMT siblings is |
3155 | * not idle. | 3155 | * not idle. |
3156 | */ | 3156 | */ |
3157 | idle = NOT_IDLE; | 3157 | idle = CPU_NOT_IDLE; |
3158 | } | 3158 | } |
3159 | sd->last_balance = jiffies; | 3159 | sd->last_balance = jiffies; |
3160 | } | 3160 | } |
@@ -3184,7 +3184,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3184 | { | 3184 | { |
3185 | int local_cpu = smp_processor_id(); | 3185 | int local_cpu = smp_processor_id(); |
3186 | struct rq *local_rq = cpu_rq(local_cpu); | 3186 | struct rq *local_rq = cpu_rq(local_cpu); |
3187 | enum idle_type idle = local_rq->idle_at_tick ? SCHED_IDLE : NOT_IDLE; | 3187 | enum cpu_idle_type idle = local_rq->idle_at_tick ? CPU_IDLE : CPU_NOT_IDLE; |
3188 | 3188 | ||
3189 | rebalance_domains(local_cpu, idle); | 3189 | rebalance_domains(local_cpu, idle); |
3190 | 3190 | ||
@@ -3210,7 +3210,7 @@ static void run_rebalance_domains(struct softirq_action *h) | |||
3210 | if (need_resched()) | 3210 | if (need_resched()) |
3211 | break; | 3211 | break; |
3212 | 3212 | ||
3213 | rebalance_domains(balance_cpu, SCHED_IDLE); | 3213 | rebalance_domains(balance_cpu, CPU_IDLE); |
3214 | 3214 | ||
3215 | rq = cpu_rq(balance_cpu); | 3215 | rq = cpu_rq(balance_cpu); |
3216 | if (time_after(local_rq->next_balance, rq->next_balance)) | 3216 | if (time_after(local_rq->next_balance, rq->next_balance)) |