diff options
author | Ingo Molnar <mingo@elte.hu> | 2012-02-24 02:31:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-02-24 04:05:59 -0500 |
commit | c5905afb0ee6550b42c49213da1c22d67316c194 (patch) | |
tree | 253fdb322e6e5b257ffda3b9b66bce90a473a6f7 /include/linux/perf_event.h | |
parent | 1cfa60dc7d7c7cc774a44eee47ff135a644a1f31 (diff) |
static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
So here's a boot tested patch on top of Jason's series that does
all the cleanups I talked about and turns jump labels into a
more intuitive to use facility. It should also address the
various misconceptions and confusions that surround jump labels.
Typical usage scenarios:
#include <linux/static_key.h>
struct static_key key = STATIC_KEY_INIT_TRUE;
if (static_key_false(&key))
do unlikely code
else
do likely code
Or:
if (static_key_true(&key))
do likely code
else
do unlikely code
The static key is modified via:
static_key_slow_inc(&key);
...
static_key_slow_dec(&key);
The 'slow' prefix makes it abundantly clear that this is an
expensive operation.
I've updated all in-kernel code to use this everywhere. Note
that I (intentionally) have not pushed through the rename
blindly through to the lowest levels: the actual jump-label
patching arch facility should be named like that, so we want to
decouple jump labels from the static-key facility a bit.
On non-jump-label enabled architectures static keys default to
likely()/unlikely() branches.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: a.p.zijlstra@chello.nl
Cc: mathieu.desnoyers@efficios.com
Cc: davem@davemloft.net
Cc: ddaney.cavm@gmail.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 412b790f5da6..0d21e6f1cf53 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -514,7 +514,7 @@ struct perf_guest_info_callbacks { | |||
514 | #include <linux/ftrace.h> | 514 | #include <linux/ftrace.h> |
515 | #include <linux/cpu.h> | 515 | #include <linux/cpu.h> |
516 | #include <linux/irq_work.h> | 516 | #include <linux/irq_work.h> |
517 | #include <linux/jump_label.h> | 517 | #include <linux/static_key.h> |
518 | #include <linux/atomic.h> | 518 | #include <linux/atomic.h> |
519 | #include <asm/local.h> | 519 | #include <asm/local.h> |
520 | 520 | ||
@@ -1038,7 +1038,7 @@ static inline int is_software_event(struct perf_event *event) | |||
1038 | return event->pmu->task_ctx_nr == perf_sw_context; | 1038 | return event->pmu->task_ctx_nr == perf_sw_context; |
1039 | } | 1039 | } |
1040 | 1040 | ||
1041 | extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 1041 | extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
1042 | 1042 | ||
1043 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); | 1043 | extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); |
1044 | 1044 | ||
@@ -1066,7 +1066,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |||
1066 | { | 1066 | { |
1067 | struct pt_regs hot_regs; | 1067 | struct pt_regs hot_regs; |
1068 | 1068 | ||
1069 | if (static_branch(&perf_swevent_enabled[event_id])) { | 1069 | if (static_key_false(&perf_swevent_enabled[event_id])) { |
1070 | if (!regs) { | 1070 | if (!regs) { |
1071 | perf_fetch_caller_regs(&hot_regs); | 1071 | perf_fetch_caller_regs(&hot_regs); |
1072 | regs = &hot_regs; | 1072 | regs = &hot_regs; |
@@ -1075,12 +1075,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) | |||
1075 | } | 1075 | } |
1076 | } | 1076 | } |
1077 | 1077 | ||
1078 | extern struct jump_label_key_deferred perf_sched_events; | 1078 | extern struct static_key_deferred perf_sched_events; |
1079 | 1079 | ||
1080 | static inline void perf_event_task_sched_in(struct task_struct *prev, | 1080 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
1081 | struct task_struct *task) | 1081 | struct task_struct *task) |
1082 | { | 1082 | { |
1083 | if (static_branch(&perf_sched_events.key)) | 1083 | if (static_key_false(&perf_sched_events.key)) |
1084 | __perf_event_task_sched_in(prev, task); | 1084 | __perf_event_task_sched_in(prev, task); |
1085 | } | 1085 | } |
1086 | 1086 | ||
@@ -1089,7 +1089,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, | |||
1089 | { | 1089 | { |
1090 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); | 1090 | perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0); |
1091 | 1091 | ||
1092 | if (static_branch(&perf_sched_events.key)) | 1092 | if (static_key_false(&perf_sched_events.key)) |
1093 | __perf_event_task_sched_out(prev, next); | 1093 | __perf_event_task_sched_out(prev, next); |
1094 | } | 1094 | } |
1095 | 1095 | ||