aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-10-12 18:15:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-10-12 18:15:17 -0400
commit465a7e291fd4f056d81baf5d5ed557bdb44c5457 (patch)
treebd0db709f63d89ad2b57484d9da7c5ad8febd1bf /kernel
parent9b4e40c8fe1e120fef93985de7ff6a97fe9e7dd3 (diff)
parent52e92f409dede388b7dc3ee13491fbf7a80db935 (diff)
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "Mostly tooling fixes, but also a couple of updates for new Intel models (which are technically hw-enablement, but to users it's a fix to perf behavior on those new CPUs - hope this is fine), an AUX inheritance fix, event time-sharing fix, and a fix for lost non-perf NMI events on AMD systems" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) perf/x86/cstate: Add Tiger Lake CPU support perf/x86/msr: Add Tiger Lake CPU support perf/x86/intel: Add Tiger Lake CPU support perf/x86/cstate: Update C-state counters for Ice Lake perf/x86/msr: Add new CPU model numbers for Ice Lake perf/x86/cstate: Add Comet Lake CPU support perf/x86/msr: Add Comet Lake CPU support perf/x86/intel: Add Comet Lake CPU support perf/x86/amd: Change/fix NMI latency mitigation to use a timestamp perf/core: Fix corner case in perf_rotate_context() perf/core: Rework memory accounting in perf_mmap() perf/core: Fix inheritance of aux_output groups perf annotate: Don't return -1 for error when doing BPF disassembly perf annotate: Return appropriate error code for allocation failures perf annotate: Fix arch specific ->init() failure errors perf annotate: Propagate the symbol__annotate() error return perf annotate: Fix the signedness of failure returns perf annotate: Propagate perf_env__arch() error perf evsel: Fall back to global 'perf_env' in perf_evsel__env() perf tools: Propagate get_cpuid() error ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c43
1 files changed, 36 insertions, 7 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 3f0cb82e4fbc..9ec0b0bfddbd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3779,11 +3779,23 @@ static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
3779 perf_event_groups_insert(&ctx->flexible_groups, event); 3779 perf_event_groups_insert(&ctx->flexible_groups, event);
3780} 3780}
3781 3781
3782/* pick an event from the flexible_groups to rotate */
3782static inline struct perf_event * 3783static inline struct perf_event *
3783ctx_first_active(struct perf_event_context *ctx) 3784ctx_event_to_rotate(struct perf_event_context *ctx)
3784{ 3785{
3785 return list_first_entry_or_null(&ctx->flexible_active, 3786 struct perf_event *event;
3786 struct perf_event, active_list); 3787
3788 /* pick the first active flexible event */
3789 event = list_first_entry_or_null(&ctx->flexible_active,
3790 struct perf_event, active_list);
3791
3792 /* if no active flexible event, pick the first event */
3793 if (!event) {
3794 event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree),
3795 typeof(*event), group_node);
3796 }
3797
3798 return event;
3787} 3799}
3788 3800
3789static bool perf_rotate_context(struct perf_cpu_context *cpuctx) 3801static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
@@ -3808,9 +3820,9 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
3808 perf_pmu_disable(cpuctx->ctx.pmu); 3820 perf_pmu_disable(cpuctx->ctx.pmu);
3809 3821
3810 if (task_rotate) 3822 if (task_rotate)
3811 task_event = ctx_first_active(task_ctx); 3823 task_event = ctx_event_to_rotate(task_ctx);
3812 if (cpu_rotate) 3824 if (cpu_rotate)
3813 cpu_event = ctx_first_active(&cpuctx->ctx); 3825 cpu_event = ctx_event_to_rotate(&cpuctx->ctx);
3814 3826
3815 /* 3827 /*
3816 * As per the order given at ctx_resched() first 'pop' task flexible 3828 * As per the order given at ctx_resched() first 'pop' task flexible
@@ -5668,7 +5680,8 @@ again:
5668 * undo the VM accounting. 5680 * undo the VM accounting.
5669 */ 5681 */
5670 5682
5671 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); 5683 atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
5684 &mmap_user->locked_vm);
5672 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm); 5685 atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
5673 free_uid(mmap_user); 5686 free_uid(mmap_user);
5674 5687
@@ -5812,8 +5825,20 @@ accounting:
5812 5825
5813 user_locked = atomic_long_read(&user->locked_vm) + user_extra; 5826 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
5814 5827
5815 if (user_locked > user_lock_limit) 5828 if (user_locked <= user_lock_limit) {
5829 /* charge all to locked_vm */
5830 } else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) {
5831 /* charge all to pinned_vm */
5832 extra = user_extra;
5833 user_extra = 0;
5834 } else {
5835 /*
5836 * charge locked_vm until it hits user_lock_limit;
5837 * charge the rest from pinned_vm
5838 */
5816 extra = user_locked - user_lock_limit; 5839 extra = user_locked - user_lock_limit;
5840 user_extra -= extra;
5841 }
5817 5842
5818 lock_limit = rlimit(RLIMIT_MEMLOCK); 5843 lock_limit = rlimit(RLIMIT_MEMLOCK);
5819 lock_limit >>= PAGE_SHIFT; 5844 lock_limit >>= PAGE_SHIFT;
@@ -11862,6 +11887,10 @@ static int inherit_group(struct perf_event *parent_event,
11862 child, leader, child_ctx); 11887 child, leader, child_ctx);
11863 if (IS_ERR(child_ctr)) 11888 if (IS_ERR(child_ctr))
11864 return PTR_ERR(child_ctr); 11889 return PTR_ERR(child_ctr);
11890
11891 if (sub->aux_event == parent_event &&
11892 !perf_get_aux_event(child_ctr, leader))
11893 return -EINVAL;
11865 } 11894 }
11866 return 0; 11895 return 0;
11867} 11896}