diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index a837b20ba190..14a6c7b545de 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -808,7 +808,7 @@ enum cpu_idle_type { | |||
808 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | 808 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the |
809 | * increased costs. | 809 | * increased costs. |
810 | */ | 810 | */ |
811 | #if BITS_PER_LONG > 32 | 811 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ |
812 | # define SCHED_LOAD_RESOLUTION 10 | 812 | # define SCHED_LOAD_RESOLUTION 10 |
813 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | 813 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) |
814 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | 814 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) |
@@ -844,6 +844,7 @@ enum cpu_idle_type { | |||
844 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 844 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
845 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | 845 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ |
846 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 846 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
847 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | ||
847 | 848 | ||
848 | enum powersavings_balance_level { | 849 | enum powersavings_balance_level { |
849 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ | 850 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ |
@@ -893,16 +894,21 @@ static inline int sd_power_saving_flags(void) | |||
893 | return 0; | 894 | return 0; |
894 | } | 895 | } |
895 | 896 | ||
896 | struct sched_group { | 897 | struct sched_group_power { |
897 | struct sched_group *next; /* Must be a circular list */ | ||
898 | atomic_t ref; | 898 | atomic_t ref; |
899 | |||
900 | /* | 899 | /* |
901 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a | 900 | * CPU power of this group, SCHED_LOAD_SCALE being max power for a |
902 | * single CPU. | 901 | * single CPU. |
903 | */ | 902 | */ |
904 | unsigned int cpu_power, cpu_power_orig; | 903 | unsigned int power, power_orig; |
904 | }; | ||
905 | |||
906 | struct sched_group { | ||
907 | struct sched_group *next; /* Must be a circular list */ | ||
908 | atomic_t ref; | ||
909 | |||
905 | unsigned int group_weight; | 910 | unsigned int group_weight; |
911 | struct sched_group_power *sgp; | ||
906 | 912 | ||
907 | /* | 913 | /* |
908 | * The CPUs this group covers. | 914 | * The CPUs this group covers. |
@@ -1254,6 +1260,9 @@ struct task_struct { | |||
1254 | #ifdef CONFIG_PREEMPT_RCU | 1260 | #ifdef CONFIG_PREEMPT_RCU |
1255 | int rcu_read_lock_nesting; | 1261 | int rcu_read_lock_nesting; |
1256 | char rcu_read_unlock_special; | 1262 | char rcu_read_unlock_special; |
1263 | #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) | ||
1264 | int rcu_boosted; | ||
1265 | #endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */ | ||
1257 | struct list_head rcu_node_entry; | 1266 | struct list_head rcu_node_entry; |
1258 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | 1267 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ |
1259 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1268 | #ifdef CONFIG_TREE_PREEMPT_RCU |