diff options
author | Ingo Molnar <mingo@elte.hu> | 2012-02-24 02:31:31 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2012-02-24 04:05:59 -0500 |
commit | c5905afb0ee6550b42c49213da1c22d67316c194 (patch) | |
tree | 253fdb322e6e5b257ffda3b9b66bce90a473a6f7 /kernel/events | |
parent | 1cfa60dc7d7c7cc774a44eee47ff135a644a1f31 (diff) |
static keys: Introduce 'struct static_key', static_key_true()/false() and static_key_slow_[inc|dec]()
So here's a boot tested patch on top of Jason's series that does
all the cleanups I talked about and turns jump labels into a
more intuitive to use facility. It should also address the
various misconceptions and confusions that surround jump labels.
Typical usage scenarios:
#include <linux/static_key.h>
struct static_key key = STATIC_KEY_INIT_TRUE;
if (static_key_false(&key))
do unlikely code
else
do likely code
Or:
if (static_key_true(&key))
do likely code
else
do unlikely code
The static key is modified via:
static_key_slow_inc(&key);
...
static_key_slow_dec(&key);
The 'slow' prefix makes it abundantly clear that this is an
expensive operation.
I've updated all in-kernel code to use this everywhere. Note
that I (intentionally) have not pushed through the rename
blindly through to the lowest levels: the actual jump-label
patching arch facility should be named like that, so we want to
decouple jump labels from the static-key facility a bit.
On non-jump-label enabled architectures static keys default to
likely()/unlikely() branches.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: a.p.zijlstra@chello.nl
Cc: mathieu.desnoyers@efficios.com
Cc: davem@davemloft.net
Cc: ddaney.cavm@gmail.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/20120222085809.GA26397@elte.hu
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 7c3b9de55f6b..5e0f8bb89b2b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -128,7 +128,7 @@ enum event_type_t { | |||
128 | * perf_sched_events : >0 events exist | 128 | * perf_sched_events : >0 events exist |
129 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu | 129 | * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu |
130 | */ | 130 | */ |
131 | struct jump_label_key_deferred perf_sched_events __read_mostly; | 131 | struct static_key_deferred perf_sched_events __read_mostly; |
132 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); | 132 | static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); |
133 | 133 | ||
134 | static atomic_t nr_mmap_events __read_mostly; | 134 | static atomic_t nr_mmap_events __read_mostly; |
@@ -2769,7 +2769,7 @@ static void free_event(struct perf_event *event) | |||
2769 | 2769 | ||
2770 | if (!event->parent) { | 2770 | if (!event->parent) { |
2771 | if (event->attach_state & PERF_ATTACH_TASK) | 2771 | if (event->attach_state & PERF_ATTACH_TASK) |
2772 | jump_label_dec_deferred(&perf_sched_events); | 2772 | static_key_slow_dec_deferred(&perf_sched_events); |
2773 | if (event->attr.mmap || event->attr.mmap_data) | 2773 | if (event->attr.mmap || event->attr.mmap_data) |
2774 | atomic_dec(&nr_mmap_events); | 2774 | atomic_dec(&nr_mmap_events); |
2775 | if (event->attr.comm) | 2775 | if (event->attr.comm) |
@@ -2780,7 +2780,7 @@ static void free_event(struct perf_event *event) | |||
2780 | put_callchain_buffers(); | 2780 | put_callchain_buffers(); |
2781 | if (is_cgroup_event(event)) { | 2781 | if (is_cgroup_event(event)) { |
2782 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); | 2782 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); |
2783 | jump_label_dec_deferred(&perf_sched_events); | 2783 | static_key_slow_dec_deferred(&perf_sched_events); |
2784 | } | 2784 | } |
2785 | } | 2785 | } |
2786 | 2786 | ||
@@ -4982,7 +4982,7 @@ fail: | |||
4982 | return err; | 4982 | return err; |
4983 | } | 4983 | } |
4984 | 4984 | ||
4985 | struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 4985 | struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
4986 | 4986 | ||
4987 | static void sw_perf_event_destroy(struct perf_event *event) | 4987 | static void sw_perf_event_destroy(struct perf_event *event) |
4988 | { | 4988 | { |
@@ -4990,7 +4990,7 @@ static void sw_perf_event_destroy(struct perf_event *event) | |||
4990 | 4990 | ||
4991 | WARN_ON(event->parent); | 4991 | WARN_ON(event->parent); |
4992 | 4992 | ||
4993 | jump_label_dec(&perf_swevent_enabled[event_id]); | 4993 | static_key_slow_dec(&perf_swevent_enabled[event_id]); |
4994 | swevent_hlist_put(event); | 4994 | swevent_hlist_put(event); |
4995 | } | 4995 | } |
4996 | 4996 | ||
@@ -5020,7 +5020,7 @@ static int perf_swevent_init(struct perf_event *event) | |||
5020 | if (err) | 5020 | if (err) |
5021 | return err; | 5021 | return err; |
5022 | 5022 | ||
5023 | jump_label_inc(&perf_swevent_enabled[event_id]); | 5023 | static_key_slow_inc(&perf_swevent_enabled[event_id]); |
5024 | event->destroy = sw_perf_event_destroy; | 5024 | event->destroy = sw_perf_event_destroy; |
5025 | } | 5025 | } |
5026 | 5026 | ||
@@ -5843,7 +5843,7 @@ done: | |||
5843 | 5843 | ||
5844 | if (!event->parent) { | 5844 | if (!event->parent) { |
5845 | if (event->attach_state & PERF_ATTACH_TASK) | 5845 | if (event->attach_state & PERF_ATTACH_TASK) |
5846 | jump_label_inc(&perf_sched_events.key); | 5846 | static_key_slow_inc(&perf_sched_events.key); |
5847 | if (event->attr.mmap || event->attr.mmap_data) | 5847 | if (event->attr.mmap || event->attr.mmap_data) |
5848 | atomic_inc(&nr_mmap_events); | 5848 | atomic_inc(&nr_mmap_events); |
5849 | if (event->attr.comm) | 5849 | if (event->attr.comm) |
@@ -6081,7 +6081,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
6081 | * - that may need work on context switch | 6081 | * - that may need work on context switch |
6082 | */ | 6082 | */ |
6083 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); | 6083 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); |
6084 | jump_label_inc(&perf_sched_events.key); | 6084 | static_key_slow_inc(&perf_sched_events.key); |
6085 | } | 6085 | } |
6086 | 6086 | ||
6087 | /* | 6087 | /* |