diff options
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 103 |
1 files changed, 5 insertions, 98 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 92b8811f223..0e8457da6f9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/hardirq.h> | 30 | #include <linux/hardirq.h> |
31 | #include <linux/rculist.h> | 31 | #include <linux/rculist.h> |
32 | #include <linux/uaccess.h> | 32 | #include <linux/uaccess.h> |
33 | #include <linux/suspend.h> | ||
34 | #include <linux/syscalls.h> | 33 | #include <linux/syscalls.h> |
35 | #include <linux/anon_inodes.h> | 34 | #include <linux/anon_inodes.h> |
36 | #include <linux/kernel_stat.h> | 35 | #include <linux/kernel_stat.h> |
@@ -3545,7 +3544,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) | |||
3545 | struct ring_buffer *rb = event->rb; | 3544 | struct ring_buffer *rb = event->rb; |
3546 | 3545 | ||
3547 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 3546 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
3548 | vma->vm_mm->locked_vm -= event->mmap_locked; | 3547 | vma->vm_mm->pinned_vm -= event->mmap_locked; |
3549 | rcu_assign_pointer(event->rb, NULL); | 3548 | rcu_assign_pointer(event->rb, NULL); |
3550 | mutex_unlock(&event->mmap_mutex); | 3549 | mutex_unlock(&event->mmap_mutex); |
3551 | 3550 | ||
@@ -3626,7 +3625,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
3626 | 3625 | ||
3627 | lock_limit = rlimit(RLIMIT_MEMLOCK); | 3626 | lock_limit = rlimit(RLIMIT_MEMLOCK); |
3628 | lock_limit >>= PAGE_SHIFT; | 3627 | lock_limit >>= PAGE_SHIFT; |
3629 | locked = vma->vm_mm->locked_vm + extra; | 3628 | locked = vma->vm_mm->pinned_vm + extra; |
3630 | 3629 | ||
3631 | if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && | 3630 | if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() && |
3632 | !capable(CAP_IPC_LOCK)) { | 3631 | !capable(CAP_IPC_LOCK)) { |
@@ -3652,7 +3651,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
3652 | atomic_long_add(user_extra, &user->locked_vm); | 3651 | atomic_long_add(user_extra, &user->locked_vm); |
3653 | event->mmap_locked = extra; | 3652 | event->mmap_locked = extra; |
3654 | event->mmap_user = get_current_user(); | 3653 | event->mmap_user = get_current_user(); |
3655 | vma->vm_mm->locked_vm += event->mmap_locked; | 3654 | vma->vm_mm->pinned_vm += event->mmap_locked; |
3656 | 3655 | ||
3657 | unlock: | 3656 | unlock: |
3658 | if (!ret) | 3657 | if (!ret) |
@@ -6854,7 +6853,7 @@ static void __cpuinit perf_event_init_cpu(int cpu) | |||
6854 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 6853 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
6855 | 6854 | ||
6856 | mutex_lock(&swhash->hlist_mutex); | 6855 | mutex_lock(&swhash->hlist_mutex); |
6857 | if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) { | 6856 | if (swhash->hlist_refcount > 0) { |
6858 | struct swevent_hlist *hlist; | 6857 | struct swevent_hlist *hlist; |
6859 | 6858 | ||
6860 | hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); | 6859 | hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu)); |
@@ -6943,14 +6942,7 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
6943 | { | 6942 | { |
6944 | unsigned int cpu = (long)hcpu; | 6943 | unsigned int cpu = (long)hcpu; |
6945 | 6944 | ||
6946 | /* | 6945 | switch (action & ~CPU_TASKS_FROZEN) { |
6947 | * Ignore suspend/resume action, the perf_pm_notifier will | ||
6948 | * take care of that. | ||
6949 | */ | ||
6950 | if (action & CPU_TASKS_FROZEN) | ||
6951 | return NOTIFY_OK; | ||
6952 | |||
6953 | switch (action) { | ||
6954 | 6946 | ||
6955 | case CPU_UP_PREPARE: | 6947 | case CPU_UP_PREPARE: |
6956 | case CPU_DOWN_FAILED: | 6948 | case CPU_DOWN_FAILED: |
@@ -6969,90 +6961,6 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | |||
6969 | return NOTIFY_OK; | 6961 | return NOTIFY_OK; |
6970 | } | 6962 | } |
6971 | 6963 | ||
6972 | static void perf_pm_resume_cpu(void *unused) | ||
6973 | { | ||
6974 | struct perf_cpu_context *cpuctx; | ||
6975 | struct perf_event_context *ctx; | ||
6976 | struct pmu *pmu; | ||
6977 | int idx; | ||
6978 | |||
6979 | idx = srcu_read_lock(&pmus_srcu); | ||
6980 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
6981 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | ||
6982 | ctx = cpuctx->task_ctx; | ||
6983 | |||
6984 | perf_ctx_lock(cpuctx, ctx); | ||
6985 | perf_pmu_disable(cpuctx->ctx.pmu); | ||
6986 | |||
6987 | cpu_ctx_sched_out(cpuctx, EVENT_ALL); | ||
6988 | if (ctx) | ||
6989 | ctx_sched_out(ctx, cpuctx, EVENT_ALL); | ||
6990 | |||
6991 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
6992 | perf_ctx_unlock(cpuctx, ctx); | ||
6993 | } | ||
6994 | srcu_read_unlock(&pmus_srcu, idx); | ||
6995 | } | ||
6996 | |||
6997 | static void perf_pm_suspend_cpu(void *unused) | ||
6998 | { | ||
6999 | struct perf_cpu_context *cpuctx; | ||
7000 | struct perf_event_context *ctx; | ||
7001 | struct pmu *pmu; | ||
7002 | int idx; | ||
7003 | |||
7004 | idx = srcu_read_lock(&pmus_srcu); | ||
7005 | list_for_each_entry_rcu(pmu, &pmus, entry) { | ||
7006 | cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); | ||
7007 | ctx = cpuctx->task_ctx; | ||
7008 | |||
7009 | perf_ctx_lock(cpuctx, ctx); | ||
7010 | perf_pmu_disable(cpuctx->ctx.pmu); | ||
7011 | |||
7012 | perf_event_sched_in(cpuctx, ctx, current); | ||
7013 | |||
7014 | perf_pmu_enable(cpuctx->ctx.pmu); | ||
7015 | perf_ctx_unlock(cpuctx, ctx); | ||
7016 | } | ||
7017 | srcu_read_unlock(&pmus_srcu, idx); | ||
7018 | } | ||
7019 | |||
7020 | static int perf_resume(void) | ||
7021 | { | ||
7022 | get_online_cpus(); | ||
7023 | smp_call_function(perf_pm_resume_cpu, NULL, 1); | ||
7024 | put_online_cpus(); | ||
7025 | |||
7026 | return NOTIFY_OK; | ||
7027 | } | ||
7028 | |||
7029 | static int perf_suspend(void) | ||
7030 | { | ||
7031 | get_online_cpus(); | ||
7032 | smp_call_function(perf_pm_suspend_cpu, NULL, 1); | ||
7033 | put_online_cpus(); | ||
7034 | |||
7035 | return NOTIFY_OK; | ||
7036 | } | ||
7037 | |||
7038 | static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr) | ||
7039 | { | ||
7040 | switch (action) { | ||
7041 | case PM_POST_HIBERNATION: | ||
7042 | case PM_POST_SUSPEND: | ||
7043 | return perf_resume(); | ||
7044 | case PM_HIBERNATION_PREPARE: | ||
7045 | case PM_SUSPEND_PREPARE: | ||
7046 | return perf_suspend(); | ||
7047 | default: | ||
7048 | return NOTIFY_DONE; | ||
7049 | } | ||
7050 | } | ||
7051 | |||
7052 | static struct notifier_block perf_pm_notifier = { | ||
7053 | .notifier_call = perf_pm, | ||
7054 | }; | ||
7055 | |||
7056 | void __init perf_event_init(void) | 6964 | void __init perf_event_init(void) |
7057 | { | 6965 | { |
7058 | int ret; | 6966 | int ret; |
@@ -7067,7 +6975,6 @@ void __init perf_event_init(void) | |||
7067 | perf_tp_register(); | 6975 | perf_tp_register(); |
7068 | perf_cpu_notifier(perf_cpu_notify); | 6976 | perf_cpu_notifier(perf_cpu_notify); |
7069 | register_reboot_notifier(&perf_reboot_notifier); | 6977 | register_reboot_notifier(&perf_reboot_notifier); |
7070 | register_pm_notifier(&perf_pm_notifier); | ||
7071 | 6978 | ||
7072 | ret = init_hw_breakpoint(); | 6979 | ret = init_hw_breakpoint(); |
7073 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); | 6980 | WARN(ret, "hw_breakpoint initialization failed with: %d", ret); |