diff options
-rw-r--r-- | include/linux/perf_event.h | 3 | ||||
-rw-r--r-- | kernel/events/core.c | 12 |
2 files changed, 8 insertions, 7 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index a79e59fc3b7d..6cb5d483ab34 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -54,6 +54,7 @@ struct perf_guest_info_callbacks { | |||
54 | #include <linux/sysfs.h> | 54 | #include <linux/sysfs.h> |
55 | #include <linux/perf_regs.h> | 55 | #include <linux/perf_regs.h> |
56 | #include <linux/cgroup.h> | 56 | #include <linux/cgroup.h> |
57 | #include <linux/refcount.h> | ||
57 | #include <asm/local.h> | 58 | #include <asm/local.h> |
58 | 59 | ||
59 | struct perf_callchain_entry { | 60 | struct perf_callchain_entry { |
@@ -737,7 +738,7 @@ struct perf_event_context { | |||
737 | int nr_stat; | 738 | int nr_stat; |
738 | int nr_freq; | 739 | int nr_freq; |
739 | int rotate_disable; | 740 | int rotate_disable; |
740 | atomic_t refcount; | 741 | refcount_t refcount; |
741 | struct task_struct *task; | 742 | struct task_struct *task; |
742 | 743 | ||
743 | /* | 744 | /* |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 5b89de7918d0..677164d54547 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1172,7 +1172,7 @@ static void perf_event_ctx_deactivate(struct perf_event_context *ctx) | |||
1172 | 1172 | ||
1173 | static void get_ctx(struct perf_event_context *ctx) | 1173 | static void get_ctx(struct perf_event_context *ctx) |
1174 | { | 1174 | { |
1175 | WARN_ON(!atomic_inc_not_zero(&ctx->refcount)); | 1175 | refcount_inc(&ctx->refcount); |
1176 | } | 1176 | } |
1177 | 1177 | ||
1178 | static void free_ctx(struct rcu_head *head) | 1178 | static void free_ctx(struct rcu_head *head) |
@@ -1186,7 +1186,7 @@ static void free_ctx(struct rcu_head *head) | |||
1186 | 1186 | ||
1187 | static void put_ctx(struct perf_event_context *ctx) | 1187 | static void put_ctx(struct perf_event_context *ctx) |
1188 | { | 1188 | { |
1189 | if (atomic_dec_and_test(&ctx->refcount)) { | 1189 | if (refcount_dec_and_test(&ctx->refcount)) { |
1190 | if (ctx->parent_ctx) | 1190 | if (ctx->parent_ctx) |
1191 | put_ctx(ctx->parent_ctx); | 1191 | put_ctx(ctx->parent_ctx); |
1192 | if (ctx->task && ctx->task != TASK_TOMBSTONE) | 1192 | if (ctx->task && ctx->task != TASK_TOMBSTONE) |
@@ -1268,7 +1268,7 @@ perf_event_ctx_lock_nested(struct perf_event *event, int nesting) | |||
1268 | again: | 1268 | again: |
1269 | rcu_read_lock(); | 1269 | rcu_read_lock(); |
1270 | ctx = READ_ONCE(event->ctx); | 1270 | ctx = READ_ONCE(event->ctx); |
1271 | if (!atomic_inc_not_zero(&ctx->refcount)) { | 1271 | if (!refcount_inc_not_zero(&ctx->refcount)) { |
1272 | rcu_read_unlock(); | 1272 | rcu_read_unlock(); |
1273 | goto again; | 1273 | goto again; |
1274 | } | 1274 | } |
@@ -1401,7 +1401,7 @@ retry: | |||
1401 | } | 1401 | } |
1402 | 1402 | ||
1403 | if (ctx->task == TASK_TOMBSTONE || | 1403 | if (ctx->task == TASK_TOMBSTONE || |
1404 | !atomic_inc_not_zero(&ctx->refcount)) { | 1404 | !refcount_inc_not_zero(&ctx->refcount)) { |
1405 | raw_spin_unlock(&ctx->lock); | 1405 | raw_spin_unlock(&ctx->lock); |
1406 | ctx = NULL; | 1406 | ctx = NULL; |
1407 | } else { | 1407 | } else { |
@@ -4057,7 +4057,7 @@ static void __perf_event_init_context(struct perf_event_context *ctx) | |||
4057 | INIT_LIST_HEAD(&ctx->event_list); | 4057 | INIT_LIST_HEAD(&ctx->event_list); |
4058 | INIT_LIST_HEAD(&ctx->pinned_active); | 4058 | INIT_LIST_HEAD(&ctx->pinned_active); |
4059 | INIT_LIST_HEAD(&ctx->flexible_active); | 4059 | INIT_LIST_HEAD(&ctx->flexible_active); |
4060 | atomic_set(&ctx->refcount, 1); | 4060 | refcount_set(&ctx->refcount, 1); |
4061 | } | 4061 | } |
4062 | 4062 | ||
4063 | static struct perf_event_context * | 4063 | static struct perf_event_context * |
@@ -10613,7 +10613,7 @@ __perf_event_ctx_lock_double(struct perf_event *group_leader, | |||
10613 | again: | 10613 | again: |
10614 | rcu_read_lock(); | 10614 | rcu_read_lock(); |
10615 | gctx = READ_ONCE(group_leader->ctx); | 10615 | gctx = READ_ONCE(group_leader->ctx); |
10616 | if (!atomic_inc_not_zero(&gctx->refcount)) { | 10616 | if (!refcount_inc_not_zero(&gctx->refcount)) { |
10617 | rcu_read_unlock(); | 10617 | rcu_read_unlock(); |
10618 | goto again; | 10618 | goto again; |
10619 | } | 10619 | } |