diff options
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r-- | kernel/events/core.c | 33 |
1 files changed, 12 insertions, 21 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 1538df9b2b65..426c2ffba16d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1452,6 +1452,13 @@ static enum event_type_t get_event_type(struct perf_event *event) | |||
1452 | 1452 | ||
1453 | lockdep_assert_held(&ctx->lock); | 1453 | lockdep_assert_held(&ctx->lock); |
1454 | 1454 | ||
1455 | /* | ||
1456 | * It's 'group type', really, because if our group leader is | ||
1457 | * pinned, so are we. | ||
1458 | */ | ||
1459 | if (event->group_leader != event) | ||
1460 | event = event->group_leader; | ||
1461 | |||
1455 | event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; | 1462 | event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; |
1456 | if (!ctx->task) | 1463 | if (!ctx->task) |
1457 | event_type |= EVENT_CPU; | 1464 | event_type |= EVENT_CPU; |
@@ -4378,7 +4385,9 @@ EXPORT_SYMBOL_GPL(perf_event_read_value); | |||
4378 | static int __perf_read_group_add(struct perf_event *leader, | 4385 | static int __perf_read_group_add(struct perf_event *leader, |
4379 | u64 read_format, u64 *values) | 4386 | u64 read_format, u64 *values) |
4380 | { | 4387 | { |
4388 | struct perf_event_context *ctx = leader->ctx; | ||
4381 | struct perf_event *sub; | 4389 | struct perf_event *sub; |
4390 | unsigned long flags; | ||
4382 | int n = 1; /* skip @nr */ | 4391 | int n = 1; /* skip @nr */ |
4383 | int ret; | 4392 | int ret; |
4384 | 4393 | ||
@@ -4408,12 +4417,15 @@ static int __perf_read_group_add(struct perf_event *leader, | |||
4408 | if (read_format & PERF_FORMAT_ID) | 4417 | if (read_format & PERF_FORMAT_ID) |
4409 | values[n++] = primary_event_id(leader); | 4418 | values[n++] = primary_event_id(leader); |
4410 | 4419 | ||
4420 | raw_spin_lock_irqsave(&ctx->lock, flags); | ||
4421 | |||
4411 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { | 4422 | list_for_each_entry(sub, &leader->sibling_list, group_entry) { |
4412 | values[n++] += perf_event_count(sub); | 4423 | values[n++] += perf_event_count(sub); |
4413 | if (read_format & PERF_FORMAT_ID) | 4424 | if (read_format & PERF_FORMAT_ID) |
4414 | values[n++] = primary_event_id(sub); | 4425 | values[n++] = primary_event_id(sub); |
4415 | } | 4426 | } |
4416 | 4427 | ||
4428 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | ||
4417 | return 0; | 4429 | return 0; |
4418 | } | 4430 | } |
4419 | 4431 | ||
@@ -7321,21 +7333,6 @@ int perf_event_account_interrupt(struct perf_event *event) | |||
7321 | return __perf_event_account_interrupt(event, 1); | 7333 | return __perf_event_account_interrupt(event, 1); |
7322 | } | 7334 | } |
7323 | 7335 | ||
7324 | static bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) | ||
7325 | { | ||
7326 | /* | ||
7327 | * Due to interrupt latency (AKA "skid"), we may enter the | ||
7328 | * kernel before taking an overflow, even if the PMU is only | ||
7329 | * counting user events. | ||
7330 | * To avoid leaking information to userspace, we must always | ||
7331 | * reject kernel samples when exclude_kernel is set. | ||
7332 | */ | ||
7333 | if (event->attr.exclude_kernel && !user_mode(regs)) | ||
7334 | return false; | ||
7335 | |||
7336 | return true; | ||
7337 | } | ||
7338 | |||
7339 | /* | 7336 | /* |
7340 | * Generic event overflow handling, sampling. | 7337 | * Generic event overflow handling, sampling. |
7341 | */ | 7338 | */ |
@@ -7357,12 +7354,6 @@ static int __perf_event_overflow(struct perf_event *event, | |||
7357 | ret = __perf_event_account_interrupt(event, throttle); | 7354 | ret = __perf_event_account_interrupt(event, throttle); |
7358 | 7355 | ||
7359 | /* | 7356 | /* |
7360 | * For security, drop the skid kernel samples if necessary. | ||
7361 | */ | ||
7362 | if (!sample_is_allowed(event, regs)) | ||
7363 | return ret; | ||
7364 | |||
7365 | /* | ||
7366 | * XXX event_limit might not quite work as expected on inherited | 7357 | * XXX event_limit might not quite work as expected on inherited |
7367 | * events | 7358 | * events |
7368 | */ | 7359 | */ |