summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c69
1 files changed, 46 insertions, 23 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 110b38a58493..e5aaa806702d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1469,7 +1469,6 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1469static void 1469static void
1470list_add_event(struct perf_event *event, struct perf_event_context *ctx) 1470list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1471{ 1471{
1472
1473 lockdep_assert_held(&ctx->lock); 1472 lockdep_assert_held(&ctx->lock);
1474 1473
1475 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); 1474 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
@@ -1624,6 +1623,8 @@ static void perf_group_attach(struct perf_event *event)
1624{ 1623{
1625 struct perf_event *group_leader = event->group_leader, *pos; 1624 struct perf_event *group_leader = event->group_leader, *pos;
1626 1625
1626 lockdep_assert_held(&event->ctx->lock);
1627
1627 /* 1628 /*
1628 * We can have double attach due to group movement in perf_event_open. 1629 * We can have double attach due to group movement in perf_event_open.
1629 */ 1630 */
@@ -1697,6 +1698,8 @@ static void perf_group_detach(struct perf_event *event)
1697 struct perf_event *sibling, *tmp; 1698 struct perf_event *sibling, *tmp;
1698 struct list_head *list = NULL; 1699 struct list_head *list = NULL;
1699 1700
1701 lockdep_assert_held(&event->ctx->lock);
1702
1700 /* 1703 /*
1701 * We can have double detach due to exit/hot-unplug + close. 1704 * We can have double detach due to exit/hot-unplug + close.
1702 */ 1705 */
@@ -1895,9 +1898,29 @@ __perf_remove_from_context(struct perf_event *event,
1895 */ 1898 */
1896static void perf_remove_from_context(struct perf_event *event, unsigned long flags) 1899static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
1897{ 1900{
1898 lockdep_assert_held(&event->ctx->mutex); 1901 struct perf_event_context *ctx = event->ctx;
1902
1903 lockdep_assert_held(&ctx->mutex);
1899 1904
1900 event_function_call(event, __perf_remove_from_context, (void *)flags); 1905 event_function_call(event, __perf_remove_from_context, (void *)flags);
1906
1907 /*
1908 * The above event_function_call() can NO-OP when it hits
1909 * TASK_TOMBSTONE. In that case we must already have been detached
1910 * from the context (by perf_event_exit_event()) but the grouping
1911 * might still be in-tact.
1912 */
1913 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1914 if ((flags & DETACH_GROUP) &&
1915 (event->attach_state & PERF_ATTACH_GROUP)) {
1916 /*
1917 * Since in that case we cannot possibly be scheduled, simply
1918 * detach now.
1919 */
1920 raw_spin_lock_irq(&ctx->lock);
1921 perf_group_detach(event);
1922 raw_spin_unlock_irq(&ctx->lock);
1923 }
1901} 1924}
1902 1925
1903/* 1926/*
@@ -6609,6 +6632,27 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6609 char *buf = NULL; 6632 char *buf = NULL;
6610 char *name; 6633 char *name;
6611 6634
6635 if (vma->vm_flags & VM_READ)
6636 prot |= PROT_READ;
6637 if (vma->vm_flags & VM_WRITE)
6638 prot |= PROT_WRITE;
6639 if (vma->vm_flags & VM_EXEC)
6640 prot |= PROT_EXEC;
6641
6642 if (vma->vm_flags & VM_MAYSHARE)
6643 flags = MAP_SHARED;
6644 else
6645 flags = MAP_PRIVATE;
6646
6647 if (vma->vm_flags & VM_DENYWRITE)
6648 flags |= MAP_DENYWRITE;
6649 if (vma->vm_flags & VM_MAYEXEC)
6650 flags |= MAP_EXECUTABLE;
6651 if (vma->vm_flags & VM_LOCKED)
6652 flags |= MAP_LOCKED;
6653 if (vma->vm_flags & VM_HUGETLB)
6654 flags |= MAP_HUGETLB;
6655
6612 if (file) { 6656 if (file) {
6613 struct inode *inode; 6657 struct inode *inode;
6614 dev_t dev; 6658 dev_t dev;
@@ -6635,27 +6679,6 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
6635 maj = MAJOR(dev); 6679 maj = MAJOR(dev);
6636 min = MINOR(dev); 6680 min = MINOR(dev);
6637 6681
6638 if (vma->vm_flags & VM_READ)
6639 prot |= PROT_READ;
6640 if (vma->vm_flags & VM_WRITE)
6641 prot |= PROT_WRITE;
6642 if (vma->vm_flags & VM_EXEC)
6643 prot |= PROT_EXEC;
6644
6645 if (vma->vm_flags & VM_MAYSHARE)
6646 flags = MAP_SHARED;
6647 else
6648 flags = MAP_PRIVATE;
6649
6650 if (vma->vm_flags & VM_DENYWRITE)
6651 flags |= MAP_DENYWRITE;
6652 if (vma->vm_flags & VM_MAYEXEC)
6653 flags |= MAP_EXECUTABLE;
6654 if (vma->vm_flags & VM_LOCKED)
6655 flags |= MAP_LOCKED;
6656 if (vma->vm_flags & VM_HUGETLB)
6657 flags |= MAP_HUGETLB;
6658
6659 goto got_name; 6682 goto got_name;
6660 } else { 6683 } else {
6661 if (vma->vm_ops && vma->vm_ops->name) { 6684 if (vma->vm_ops && vma->vm_ops->name) {