diff options
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r-- | kernel/events/core.c | 94 |
1 files changed, 61 insertions, 33 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 110b38a58493..e235bb991bdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) | |||
1469 | static void | 1469 | static void |
1470 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) | 1470 | list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
1471 | { | 1471 | { |
1472 | |||
1473 | lockdep_assert_held(&ctx->lock); | 1472 | lockdep_assert_held(&ctx->lock); |
1474 | 1473 | ||
1475 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | 1474 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); |
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event) | |||
1624 | { | 1623 | { |
1625 | struct perf_event *group_leader = event->group_leader, *pos; | 1624 | struct perf_event *group_leader = event->group_leader, *pos; |
1626 | 1625 | ||
1626 | lockdep_assert_held(&event->ctx->lock); | ||
1627 | |||
1627 | /* | 1628 | /* |
1628 | * We can have double attach due to group movement in perf_event_open. | 1629 | * We can have double attach due to group movement in perf_event_open. |
1629 | */ | 1630 | */ |
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event) | |||
1697 | struct perf_event *sibling, *tmp; | 1698 | struct perf_event *sibling, *tmp; |
1698 | struct list_head *list = NULL; | 1699 | struct list_head *list = NULL; |
1699 | 1700 | ||
1701 | lockdep_assert_held(&event->ctx->lock); | ||
1702 | |||
1700 | /* | 1703 | /* |
1701 | * We can have double detach due to exit/hot-unplug + close. | 1704 | * We can have double detach due to exit/hot-unplug + close. |
1702 | */ | 1705 | */ |
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event, | |||
1895 | */ | 1898 | */ |
1896 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) | 1899 | static void perf_remove_from_context(struct perf_event *event, unsigned long flags) |
1897 | { | 1900 | { |
1898 | lockdep_assert_held(&event->ctx->mutex); | 1901 | struct perf_event_context *ctx = event->ctx; |
1902 | |||
1903 | lockdep_assert_held(&ctx->mutex); | ||
1899 | 1904 | ||
1900 | event_function_call(event, __perf_remove_from_context, (void *)flags); | 1905 | event_function_call(event, __perf_remove_from_context, (void *)flags); |
1906 | |||
1907 | /* | ||
1908 | * The above event_function_call() can NO-OP when it hits | ||
1909 | * TASK_TOMBSTONE. In that case we must already have been detached | ||
1910 | * from the context (by perf_event_exit_event()) but the grouping | ||
1911 | * might still be in-tact. | ||
1912 | */ | ||
1913 | WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); | ||
1914 | if ((flags & DETACH_GROUP) && | ||
1915 | (event->attach_state & PERF_ATTACH_GROUP)) { | ||
1916 | /* | ||
1917 | * Since in that case we cannot possibly be scheduled, simply | ||
1918 | * detach now. | ||
1919 | */ | ||
1920 | raw_spin_lock_irq(&ctx->lock); | ||
1921 | perf_group_detach(event); | ||
1922 | raw_spin_unlock_irq(&ctx->lock); | ||
1923 | } | ||
1901 | } | 1924 | } |
1902 | 1925 | ||
1903 | /* | 1926 | /* |
@@ -3464,14 +3487,15 @@ struct perf_read_data { | |||
3464 | int ret; | 3487 | int ret; |
3465 | }; | 3488 | }; |
3466 | 3489 | ||
3467 | static int find_cpu_to_read(struct perf_event *event, int local_cpu) | 3490 | static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) |
3468 | { | 3491 | { |
3469 | int event_cpu = event->oncpu; | ||
3470 | u16 local_pkg, event_pkg; | 3492 | u16 local_pkg, event_pkg; |
3471 | 3493 | ||
3472 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { | 3494 | if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { |
3473 | event_pkg = topology_physical_package_id(event_cpu); | 3495 | int local_cpu = smp_processor_id(); |
3474 | local_pkg = topology_physical_package_id(local_cpu); | 3496 | |
3497 | event_pkg = topology_physical_package_id(event_cpu); | ||
3498 | local_pkg = topology_physical_package_id(local_cpu); | ||
3475 | 3499 | ||
3476 | if (event_pkg == local_pkg) | 3500 | if (event_pkg == local_pkg) |
3477 | return local_cpu; | 3501 | return local_cpu; |
@@ -3601,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event) | |||
3601 | 3625 | ||
3602 | static int perf_event_read(struct perf_event *event, bool group) | 3626 | static int perf_event_read(struct perf_event *event, bool group) |
3603 | { | 3627 | { |
3604 | int ret = 0, cpu_to_read, local_cpu; | 3628 | int event_cpu, ret = 0; |
3605 | 3629 | ||
3606 | /* | 3630 | /* |
3607 | * If event is enabled and currently active on a CPU, update the | 3631 | * If event is enabled and currently active on a CPU, update the |
@@ -3614,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group) | |||
3614 | .ret = 0, | 3638 | .ret = 0, |
3615 | }; | 3639 | }; |
3616 | 3640 | ||
3617 | local_cpu = get_cpu(); | 3641 | event_cpu = READ_ONCE(event->oncpu); |
3618 | cpu_to_read = find_cpu_to_read(event, local_cpu); | 3642 | if ((unsigned)event_cpu >= nr_cpu_ids) |
3619 | put_cpu(); | 3643 | return 0; |
3644 | |||
3645 | preempt_disable(); | ||
3646 | event_cpu = __perf_event_read_cpu(event, event_cpu); | ||
3620 | 3647 | ||
3621 | /* | 3648 | /* |
3622 | * Purposely ignore the smp_call_function_single() return | 3649 | * Purposely ignore the smp_call_function_single() return |
3623 | * value. | 3650 | * value. |
3624 | * | 3651 | * |
3625 | * If event->oncpu isn't a valid CPU it means the event got | 3652 | * If event_cpu isn't a valid CPU it means the event got |
3626 | * scheduled out and that will have updated the event count. | 3653 | * scheduled out and that will have updated the event count. |
3627 | * | 3654 | * |
3628 | * Therefore, either way, we'll have an up-to-date event count | 3655 | * Therefore, either way, we'll have an up-to-date event count |
3629 | * after this. | 3656 | * after this. |
3630 | */ | 3657 | */ |
3631 | (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1); | 3658 | (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1); |
3659 | preempt_enable(); | ||
3632 | ret = data.ret; | 3660 | ret = data.ret; |
3633 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { | 3661 | } else if (event->state == PERF_EVENT_STATE_INACTIVE) { |
3634 | struct perf_event_context *ctx = event->ctx; | 3662 | struct perf_event_context *ctx = event->ctx; |
@@ -6609,6 +6637,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
6609 | char *buf = NULL; | 6637 | char *buf = NULL; |
6610 | char *name; | 6638 | char *name; |
6611 | 6639 | ||
6640 | if (vma->vm_flags & VM_READ) | ||
6641 | prot |= PROT_READ; | ||
6642 | if (vma->vm_flags & VM_WRITE) | ||
6643 | prot |= PROT_WRITE; | ||
6644 | if (vma->vm_flags & VM_EXEC) | ||
6645 | prot |= PROT_EXEC; | ||
6646 | |||
6647 | if (vma->vm_flags & VM_MAYSHARE) | ||
6648 | flags = MAP_SHARED; | ||
6649 | else | ||
6650 | flags = MAP_PRIVATE; | ||
6651 | |||
6652 | if (vma->vm_flags & VM_DENYWRITE) | ||
6653 | flags |= MAP_DENYWRITE; | ||
6654 | if (vma->vm_flags & VM_MAYEXEC) | ||
6655 | flags |= MAP_EXECUTABLE; | ||
6656 | if (vma->vm_flags & VM_LOCKED) | ||
6657 | flags |= MAP_LOCKED; | ||
6658 | if (vma->vm_flags & VM_HUGETLB) | ||
6659 | flags |= MAP_HUGETLB; | ||
6660 | |||
6612 | if (file) { | 6661 | if (file) { |
6613 | struct inode *inode; | 6662 | struct inode *inode; |
6614 | dev_t dev; | 6663 | dev_t dev; |
@@ -6635,27 +6684,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
6635 | maj = MAJOR(dev); | 6684 | maj = MAJOR(dev); |
6636 | min = MINOR(dev); | 6685 | min = MINOR(dev); |
6637 | 6686 | ||
6638 | if (vma->vm_flags & VM_READ) | ||
6639 | prot |= PROT_READ; | ||
6640 | if (vma->vm_flags & VM_WRITE) | ||
6641 | prot |= PROT_WRITE; | ||
6642 | if (vma->vm_flags & VM_EXEC) | ||
6643 | prot |= PROT_EXEC; | ||
6644 | |||
6645 | if (vma->vm_flags & VM_MAYSHARE) | ||
6646 | flags = MAP_SHARED; | ||
6647 | else | ||
6648 | flags = MAP_PRIVATE; | ||
6649 | |||
6650 | if (vma->vm_flags & VM_DENYWRITE) | ||
6651 | flags |= MAP_DENYWRITE; | ||
6652 | if (vma->vm_flags & VM_MAYEXEC) | ||
6653 | flags |= MAP_EXECUTABLE; | ||
6654 | if (vma->vm_flags & VM_LOCKED) | ||
6655 | flags |= MAP_LOCKED; | ||
6656 | if (vma->vm_flags & VM_HUGETLB) | ||
6657 | flags |= MAP_HUGETLB; | ||
6658 | |||
6659 | goto got_name; | 6687 | goto got_name; |
6660 | } else { | 6688 | } else { |
6661 | if (vma->vm_ops && vma->vm_ops->name) { | 6689 | if (vma->vm_ops && vma->vm_ops->name) { |