diff options
author | Peter Zijlstra <peterz@infradead.org> | 2012-01-09 05:28:35 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2012-05-17 07:48:56 -0400 |
commit | 8e7fbcbc22c12414bcc9dfdd683637f58fb32759 (patch) | |
tree | a438021ddeadddd8f0745293aeb8c80dbe3c999c /include/linux/sched.h | |
parent | fac536f7e4927f34d480dc066f3a578c743b8f0e (diff) |
sched: Remove stale power aware scheduling remnants and dysfunctional knobs
It's been broken forever (i.e. it's not scheduling in a power
aware fashion), as reported by Suresh and others sending
patches, and nobody cares enough to fix it properly ...
so remove it to make space free for something better.
There's various problems with the code as it stands today, first
and foremost the user interface which is bound to topology
levels and has multiple values per level. This results in a
state explosion which the administrator or distro needs to
master and almost nobody does.
Furthermore large configuration state spaces aren't good, it
means the thing doesn't just work right because it's either
under so many impossibe to meet constraints, or even if
there's an achievable state workloads have to be aware of
it precisely and can never meet it for dynamic workloads.
So pushing this kind of decision to user-space was a bad idea
even with a single knob - it's exponentially worse with knobs
on every node of the topology.
There is a proposal to replace the user interface with a single
3 state knob:
sched_balance_policy := { performance, power, auto }
where 'auto' would be the preferred default which looks at things
like Battery/AC mode and possible cpufreq state or whatever the hw
exposes to show us power use expectations - but there's been no
progress on it in the past many months.
Aside from that, the actual implementation of the various knobs
is known to be broken. There have been sporadic attempts at
fixing things but these always stop short of reaching a mergable
state.
Therefore this wholesale removal with the hopes of spurring
people who care to come forward once again and work on a
coherent replacement.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/1326104915.2442.53.camel@twins
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 47 |
1 files changed, 0 insertions, 47 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4a559bf0622f..3d644809c9db 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -855,61 +855,14 @@ enum cpu_idle_type { | |||
855 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ | 855 | #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ |
856 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ | 856 | #define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ |
857 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ | 857 | #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ |
858 | #define SD_POWERSAVINGS_BALANCE 0x0100 /* Balance for power savings */ | ||
859 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ | 858 | #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ |
860 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ | 859 | #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ |
861 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ | 860 | #define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */ |
862 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ | 861 | #define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */ |
863 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | 862 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ |
864 | 863 | ||
865 | enum powersavings_balance_level { | ||
866 | POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */ | ||
867 | POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package | ||
868 | * first for long running threads | ||
869 | */ | ||
870 | POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle | ||
871 | * cpu package for power savings | ||
872 | */ | ||
873 | MAX_POWERSAVINGS_BALANCE_LEVELS | ||
874 | }; | ||
875 | |||
876 | extern int sched_mc_power_savings, sched_smt_power_savings; | ||
877 | |||
878 | static inline int sd_balance_for_mc_power(void) | ||
879 | { | ||
880 | if (sched_smt_power_savings) | ||
881 | return SD_POWERSAVINGS_BALANCE; | ||
882 | |||
883 | if (!sched_mc_power_savings) | ||
884 | return SD_PREFER_SIBLING; | ||
885 | |||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | static inline int sd_balance_for_package_power(void) | ||
890 | { | ||
891 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
892 | return SD_POWERSAVINGS_BALANCE; | ||
893 | |||
894 | return SD_PREFER_SIBLING; | ||
895 | } | ||
896 | |||
897 | extern int __weak arch_sd_sibiling_asym_packing(void); | 864 | extern int __weak arch_sd_sibiling_asym_packing(void); |
898 | 865 | ||
899 | /* | ||
900 | * Optimise SD flags for power savings: | ||
901 | * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings. | ||
902 | * Keep default SD flags if sched_{smt,mc}_power_saving=0 | ||
903 | */ | ||
904 | |||
905 | static inline int sd_power_saving_flags(void) | ||
906 | { | ||
907 | if (sched_mc_power_savings | sched_smt_power_savings) | ||
908 | return SD_BALANCE_NEWIDLE; | ||
909 | |||
910 | return 0; | ||
911 | } | ||
912 | |||
913 | struct sched_group_power { | 866 | struct sched_group_power { |
914 | atomic_t ref; | 867 | atomic_t ref; |
915 | /* | 868 | /* |