aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2012-02-24 02:31:31 -0500
committerIngo Molnar <mingo@elte.hu>2012-02-24 04:05:59 -0500
commitc5905afb0ee6550b42c49213da1c22d67316c194 (patch)
tree253fdb322e6e5b257ffda3b9b66bce90a473a6f7 /kernel/sched
parent1cfa60dc7d7c7cc774a44eee47ff135a644a1f31 (diff)
static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
So here's a boot tested patch on top of Jason's series that does all the cleanups I talked about and turns jump labels into a more intuitive to use facility. It should also address the various misconceptions and confusions that surround jump labels. Typical usage scenarios: #include <linux/static_key.h> struct static_key key = STATIC_KEY_INIT_TRUE; if (static_key_false(&key)) do unlikely code else do likely code Or: if (static_key_true(&key)) do likely code else do unlikely code The static key is modified via: static_key_slow_inc(&key); ... static_key_slow_dec(&key); The 'slow' prefix makes it abundantly clear that this is an expensive operation. I've updated all in-kernel code to use this everywhere. Note that I (intentionally) have not pushed through the rename blindly through to the lowest levels: the actual jump-label patching arch facility should be named like that, so we want to decouple jump labels from the static-key facility a bit. On non-jump-label enabled architectures static keys default to likely()/unlikely() branches. Signed-off-by: Ingo Molnar <mingo@elte.hu> Acked-by: Jason Baron <jbaron@redhat.com> Acked-by: Steven Rostedt <rostedt@goodmis.org> Cc: a.p.zijlstra@chello.nl Cc: mathieu.desnoyers@efficios.com Cc: davem@davemloft.net Cc: ddaney.cavm@gmail.com Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/fair.c8
-rw-r--r--kernel/sched/sched.h14
3 files changed, 20 insertions, 20 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5255c9d2e053..112c6824476b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -162,13 +162,13 @@ static int sched_feat_show(struct seq_file *m, void *v)
162 162
163#ifdef HAVE_JUMP_LABEL 163#ifdef HAVE_JUMP_LABEL
164 164
165#define jump_label_key__true jump_label_key_enabled 165#define jump_label_key__true STATIC_KEY_INIT_TRUE
166#define jump_label_key__false jump_label_key_disabled 166#define jump_label_key__false STATIC_KEY_INIT_FALSE
167 167
168#define SCHED_FEAT(name, enabled) \ 168#define SCHED_FEAT(name, enabled) \
169 jump_label_key__##enabled , 169 jump_label_key__##enabled ,
170 170
171struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = { 171struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
172#include "features.h" 172#include "features.h"
173}; 173};
174 174
@@ -176,14 +176,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
176 176
177static void sched_feat_disable(int i) 177static void sched_feat_disable(int i)
178{ 178{
179 if (jump_label_enabled(&sched_feat_keys[i])) 179 if (static_key_enabled(&sched_feat_keys[i]))
180 jump_label_dec(&sched_feat_keys[i]); 180 static_key_slow_dec(&sched_feat_keys[i]);
181} 181}
182 182
183static void sched_feat_enable(int i) 183static void sched_feat_enable(int i)
184{ 184{
185 if (!jump_label_enabled(&sched_feat_keys[i])) 185 if (!static_key_enabled(&sched_feat_keys[i]))
186 jump_label_inc(&sched_feat_keys[i]); 186 static_key_slow_inc(&sched_feat_keys[i]);
187} 187}
188#else 188#else
189static void sched_feat_disable(int i) { }; 189static void sched_feat_disable(int i) { };
@@ -894,7 +894,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
894 delta -= irq_delta; 894 delta -= irq_delta;
895#endif 895#endif
896#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING 896#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
897 if (static_branch((&paravirt_steal_rq_enabled))) { 897 if (static_key_false((&paravirt_steal_rq_enabled))) {
898 u64 st; 898 u64 st;
899 899
900 steal = paravirt_steal_clock(cpu_of(rq)); 900 steal = paravirt_steal_clock(cpu_of(rq));
@@ -2756,7 +2756,7 @@ void account_idle_time(cputime_t cputime)
2756static __always_inline bool steal_account_process_tick(void) 2756static __always_inline bool steal_account_process_tick(void)
2757{ 2757{
2758#ifdef CONFIG_PARAVIRT 2758#ifdef CONFIG_PARAVIRT
2759 if (static_branch(&paravirt_steal_enabled)) { 2759 if (static_key_false(&paravirt_steal_enabled)) {
2760 u64 steal, st = 0; 2760 u64 steal, st = 0;
2761 2761
2762 steal = paravirt_steal_clock(smp_processor_id()); 2762 steal = paravirt_steal_clock(smp_processor_id());
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7c6414fc669d..423547ada38a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1399,20 +1399,20 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
1399#ifdef CONFIG_CFS_BANDWIDTH 1399#ifdef CONFIG_CFS_BANDWIDTH
1400 1400
1401#ifdef HAVE_JUMP_LABEL 1401#ifdef HAVE_JUMP_LABEL
1402static struct jump_label_key __cfs_bandwidth_used; 1402static struct static_key __cfs_bandwidth_used;
1403 1403
1404static inline bool cfs_bandwidth_used(void) 1404static inline bool cfs_bandwidth_used(void)
1405{ 1405{
1406 return static_branch(&__cfs_bandwidth_used); 1406 return static_key_false(&__cfs_bandwidth_used);
1407} 1407}
1408 1408
1409void account_cfs_bandwidth_used(int enabled, int was_enabled) 1409void account_cfs_bandwidth_used(int enabled, int was_enabled)
1410{ 1410{
1411 /* only need to count groups transitioning between enabled/!enabled */ 1411 /* only need to count groups transitioning between enabled/!enabled */
1412 if (enabled && !was_enabled) 1412 if (enabled && !was_enabled)
1413 jump_label_inc(&__cfs_bandwidth_used); 1413 static_key_slow_inc(&__cfs_bandwidth_used);
1414 else if (!enabled && was_enabled) 1414 else if (!enabled && was_enabled)
1415 jump_label_dec(&__cfs_bandwidth_used); 1415 static_key_slow_dec(&__cfs_bandwidth_used);
1416} 1416}
1417#else /* HAVE_JUMP_LABEL */ 1417#else /* HAVE_JUMP_LABEL */
1418static bool cfs_bandwidth_used(void) 1418static bool cfs_bandwidth_used(void)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 98c0c2623db8..b4cd6d8ea150 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -611,7 +611,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
611 * Tunables that become constants when CONFIG_SCHED_DEBUG is off: 611 * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
612 */ 612 */
613#ifdef CONFIG_SCHED_DEBUG 613#ifdef CONFIG_SCHED_DEBUG
614# include <linux/jump_label.h> 614# include <linux/static_key.h>
615# define const_debug __read_mostly 615# define const_debug __read_mostly
616#else 616#else
617# define const_debug const 617# define const_debug const
@@ -630,18 +630,18 @@ enum {
630#undef SCHED_FEAT 630#undef SCHED_FEAT
631 631
632#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL) 632#if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
633static __always_inline bool static_branch__true(struct jump_label_key *key) 633static __always_inline bool static_branch__true(struct static_key *key)
634{ 634{
635 return likely(static_branch(key)); /* Not out of line branch. */ 635 return static_key_true(key); /* Not out of line branch. */
636} 636}
637 637
638static __always_inline bool static_branch__false(struct jump_label_key *key) 638static __always_inline bool static_branch__false(struct static_key *key)
639{ 639{
640 return unlikely(static_branch(key)); /* Out of line branch. */ 640 return static_key_false(key); /* Out of line branch. */
641} 641}
642 642
643#define SCHED_FEAT(name, enabled) \ 643#define SCHED_FEAT(name, enabled) \
644static __always_inline bool static_branch_##name(struct jump_label_key *key) \ 644static __always_inline bool static_branch_##name(struct static_key *key) \
645{ \ 645{ \
646 return static_branch__##enabled(key); \ 646 return static_branch__##enabled(key); \
647} 647}
@@ -650,7 +650,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \
650 650
651#undef SCHED_FEAT 651#undef SCHED_FEAT
652 652
653extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR]; 653extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
654#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x])) 654#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
655#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */ 655#else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
656#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x)) 656#define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))