diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-02-28 10:52:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-02-28 10:52:00 -0500 |
commit | 1b9540ce033ad15802e36ad1cd1c36bdad98eeea (patch) | |
tree | 4b6d5484b15a9a9ca8ff64f7444705600d0cbb68 /include/linux/perf_event.h | |
parent | 4b696dcb1a55e40648ad0eec4af991c72f945a85 (diff) | |
parent | 0da4cf3e0a68c97ef811569804616a811f786729 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Thomas Gleixner:
"A rather largish series of 12 patches addressing a maze of race
conditions in the perf core code from Peter Zijlstra"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf: Robustify task_function_call()
perf: Fix scaling vs. perf_install_in_context()
perf: Fix scaling vs. perf_event_enable()
perf: Fix scaling vs. perf_event_enable_on_exec()
perf: Fix ctx time tracking by introducing EVENT_TIME
perf: Cure event->pending_disable race
perf: Fix race between event install and jump_labels
perf: Fix cloning
perf: Only update context time when active
perf: Allow perf_release() with !event->ctx
perf: Do not double free
perf: Close install vs. exit race
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r-- | include/linux/perf_event.h | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b35a61a481fa..f5c5a3fa2c81 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -397,6 +397,7 @@ struct pmu { | |||
397 | * enum perf_event_active_state - the states of a event | 397 | * enum perf_event_active_state - the states of a event |
398 | */ | 398 | */ |
399 | enum perf_event_active_state { | 399 | enum perf_event_active_state { |
400 | PERF_EVENT_STATE_DEAD = -4, | ||
400 | PERF_EVENT_STATE_EXIT = -3, | 401 | PERF_EVENT_STATE_EXIT = -3, |
401 | PERF_EVENT_STATE_ERROR = -2, | 402 | PERF_EVENT_STATE_ERROR = -2, |
402 | PERF_EVENT_STATE_OFF = -1, | 403 | PERF_EVENT_STATE_OFF = -1, |
@@ -905,7 +906,7 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) | |||
905 | } | 906 | } |
906 | } | 907 | } |
907 | 908 | ||
908 | extern struct static_key_deferred perf_sched_events; | 909 | extern struct static_key_false perf_sched_events; |
909 | 910 | ||
910 | static __always_inline bool | 911 | static __always_inline bool |
911 | perf_sw_migrate_enabled(void) | 912 | perf_sw_migrate_enabled(void) |
@@ -924,7 +925,7 @@ static inline void perf_event_task_migrate(struct task_struct *task) | |||
924 | static inline void perf_event_task_sched_in(struct task_struct *prev, | 925 | static inline void perf_event_task_sched_in(struct task_struct *prev, |
925 | struct task_struct *task) | 926 | struct task_struct *task) |
926 | { | 927 | { |
927 | if (static_key_false(&perf_sched_events.key)) | 928 | if (static_branch_unlikely(&perf_sched_events)) |
928 | __perf_event_task_sched_in(prev, task); | 929 | __perf_event_task_sched_in(prev, task); |
929 | 930 | ||
930 | if (perf_sw_migrate_enabled() && task->sched_migrated) { | 931 | if (perf_sw_migrate_enabled() && task->sched_migrated) { |
@@ -941,7 +942,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, | |||
941 | { | 942 | { |
942 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); | 943 | perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0); |
943 | 944 | ||
944 | if (static_key_false(&perf_sched_events.key)) | 945 | if (static_branch_unlikely(&perf_sched_events)) |
945 | __perf_event_task_sched_out(prev, next); | 946 | __perf_event_task_sched_out(prev, next); |
946 | } | 947 | } |
947 | 948 | ||