aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-16 17:58:12 -0500
commit37507717de51a8332a34ee07fd88700be88df5bf (patch)
treed6eb5d00a798a4b1ce40c8c4c8ca74b0d22fe1df /kernel/events
parenta68fb48380bb993306dd62a58cbd946b4348222a (diff)
parenta66734297f78707ce39d756b656bfae861d53f62 (diff)
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 perf updates from Ingo Molnar: "This series tightens up RDPMC permissions: currently even highly sandboxed x86 execution environments (such as seccomp) have permission to execute RDPMC, which may leak various perf events / PMU state such as timing information and other CPU execution details. This 'all is allowed' RDPMC mode is still preserved as the (non-default) /sys/devices/cpu/rdpmc=2 setting. The new default is that RDPMC access is only allowed if a perf event is mmap-ed (which is needed to correctly interpret RDPMC counter values in any case). As a side effect of these changes CR4 handling is cleaned up in the x86 code and a shadow copy of the CR4 value is added. The extra CR4 manipulation adds ~ <50ns to the context switch cost between rdpmc-capable and rdpmc-non-capable mms" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf/x86: Add /sys/devices/cpu/rdpmc=2 to allow rdpmc for all tasks perf/x86: Only allow rdpmc if a perf_event is mapped perf: Pass the event to arch_perf_update_userpage() perf: Add pmu callbacks to track event mapping and unmapping x86: Add a comment clarifying LDT context switching x86: Store a per-cpu shadow copy of CR4 x86: Clean up cr4 manipulation
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 8812d8e35f5b..f04daabfd1cf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4101,7 +4101,8 @@ unlock:
4101 rcu_read_unlock(); 4101 rcu_read_unlock();
4102} 4102}
4103 4103
4104void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) 4104void __weak arch_perf_update_userpage(
4105 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now)
4105{ 4106{
4106} 4107}
4107 4108
@@ -4151,7 +4152,7 @@ void perf_event_update_userpage(struct perf_event *event)
4151 userpg->time_running = running + 4152 userpg->time_running = running +
4152 atomic64_read(&event->child_total_time_running); 4153 atomic64_read(&event->child_total_time_running);
4153 4154
4154 arch_perf_update_userpage(userpg, now); 4155 arch_perf_update_userpage(event, userpg, now);
4155 4156
4156 barrier(); 4157 barrier();
4157 ++userpg->lock; 4158 ++userpg->lock;
@@ -4293,6 +4294,9 @@ static void perf_mmap_open(struct vm_area_struct *vma)
4293 4294
4294 atomic_inc(&event->mmap_count); 4295 atomic_inc(&event->mmap_count);
4295 atomic_inc(&event->rb->mmap_count); 4296 atomic_inc(&event->rb->mmap_count);
4297
4298 if (event->pmu->event_mapped)
4299 event->pmu->event_mapped(event);
4296} 4300}
4297 4301
4298/* 4302/*
@@ -4312,6 +4316,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
4312 int mmap_locked = rb->mmap_locked; 4316 int mmap_locked = rb->mmap_locked;
4313 unsigned long size = perf_data_size(rb); 4317 unsigned long size = perf_data_size(rb);
4314 4318
4319 if (event->pmu->event_unmapped)
4320 event->pmu->event_unmapped(event);
4321
4315 atomic_dec(&rb->mmap_count); 4322 atomic_dec(&rb->mmap_count);
4316 4323
4317 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) 4324 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
@@ -4513,6 +4520,9 @@ unlock:
4513 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP; 4520 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4514 vma->vm_ops = &perf_mmap_vmops; 4521 vma->vm_ops = &perf_mmap_vmops;
4515 4522
4523 if (event->pmu->event_mapped)
4524 event->pmu->event_mapped(event);
4525
4516 return ret; 4526 return ret;
4517} 4527}
4518 4528