diff options
| -rw-r--r-- | arch/x86/events/core.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e07b36c5588a..183a972f9210 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
| @@ -2109,6 +2109,18 @@ static void x86_pmu_event_mapped(struct perf_event *event) | |||
| 2109 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) | 2109 | if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) |
| 2110 | return; | 2110 | return; |
| 2111 | 2111 | ||
| 2112 | /* | ||
| 2113 | * This function relies on not being called concurrently in two | ||
| 2114 | * tasks in the same mm. Otherwise one task could observe | ||
| 2115 | * perf_rdpmc_allowed > 1 and return all the way back to | ||
| 2116 | * userspace with CR4.PCE clear while another task is still | ||
| 2117 | * doing on_each_cpu_mask() to propagate CR4.PCE. | ||
| 2118 | * | ||
| 2119 | * For now, this can't happen because all callers hold mmap_sem | ||
| 2120 | * for write. If this changes, we'll need a different solution. | ||
| 2121 | */ | ||
| 2122 | lockdep_assert_held_exclusive(¤t->mm->mmap_sem); | ||
| 2123 | |||
| 2112 | if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) | 2124 | if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) |
| 2113 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); | 2125 | on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); |
| 2114 | } | 2126 | } |
