aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
commit0429fbc0bdc297d64188483ba029a23773ae07b0 (patch)
tree67de46978c90f37540dd6ded1db20eb53a569030 /kernel/events/core.c
parent6929c358972facf2999f8768815c40dd88514fc2 (diff)
parent513d1a2884a49654f368b5fa25ef186e976bdada (diff)
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 094df8c0742d..1425d07018de 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -249,7 +249,7 @@ static void perf_duration_warn(struct irq_work *w)
249 u64 avg_local_sample_len; 249 u64 avg_local_sample_len;
250 u64 local_samples_len; 250 u64 local_samples_len;
251 251
252 local_samples_len = __get_cpu_var(running_sample_length); 252 local_samples_len = __this_cpu_read(running_sample_length);
253 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 253 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
254 254
255 printk_ratelimited(KERN_WARNING 255 printk_ratelimited(KERN_WARNING
@@ -271,10 +271,10 @@ void perf_sample_event_took(u64 sample_len_ns)
271 return; 271 return;
272 272
273 /* decay the counter by 1 average sample */ 273 /* decay the counter by 1 average sample */
274 local_samples_len = __get_cpu_var(running_sample_length); 274 local_samples_len = __this_cpu_read(running_sample_length);
275 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 275 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
276 local_samples_len += sample_len_ns; 276 local_samples_len += sample_len_ns;
277 __get_cpu_var(running_sample_length) = local_samples_len; 277 __this_cpu_write(running_sample_length, local_samples_len);
278 278
279 /* 279 /*
280 * note: this will be biased artifically low until we have 280 * note: this will be biased artifically low until we have
@@ -882,7 +882,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list);
882static void perf_pmu_rotate_start(struct pmu *pmu) 882static void perf_pmu_rotate_start(struct pmu *pmu)
883{ 883{
884 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 884 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
885 struct list_head *head = &__get_cpu_var(rotation_list); 885 struct list_head *head = this_cpu_ptr(&rotation_list);
886 886
887 WARN_ON(!irqs_disabled()); 887 WARN_ON(!irqs_disabled());
888 888
@@ -2462,7 +2462,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
2462 * to check if we have to switch out PMU state. 2462 * to check if we have to switch out PMU state.
2463 * cgroup event are system-wide mode only 2463 * cgroup event are system-wide mode only
2464 */ 2464 */
2465 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2465 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2466 perf_cgroup_sched_out(task, next); 2466 perf_cgroup_sched_out(task, next);
2467} 2467}
2468 2468
@@ -2705,11 +2705,11 @@ void __perf_event_task_sched_in(struct task_struct *prev,
2705 * to check if we have to switch in PMU state. 2705 * to check if we have to switch in PMU state.
2706 * cgroup event are system-wide mode only 2706 * cgroup event are system-wide mode only
2707 */ 2707 */
2708 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2708 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2709 perf_cgroup_sched_in(prev, task); 2709 perf_cgroup_sched_in(prev, task);
2710 2710
2711 /* check for system-wide branch_stack events */ 2711 /* check for system-wide branch_stack events */
2712 if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) 2712 if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
2713 perf_branch_stack_sched_in(prev, task); 2713 perf_branch_stack_sched_in(prev, task);
2714} 2714}
2715 2715
@@ -2964,7 +2964,7 @@ bool perf_event_can_stop_tick(void)
2964 2964
2965void perf_event_task_tick(void) 2965void perf_event_task_tick(void)
2966{ 2966{
2967 struct list_head *head = &__get_cpu_var(rotation_list); 2967 struct list_head *head = this_cpu_ptr(&rotation_list);
2968 struct perf_cpu_context *cpuctx, *tmp; 2968 struct perf_cpu_context *cpuctx, *tmp;
2969 struct perf_event_context *ctx; 2969 struct perf_event_context *ctx;
2970 int throttled; 2970 int throttled;
@@ -5833,7 +5833,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5833 struct perf_sample_data *data, 5833 struct perf_sample_data *data,
5834 struct pt_regs *regs) 5834 struct pt_regs *regs)
5835{ 5835{
5836 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5836 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5837 struct perf_event *event; 5837 struct perf_event *event;
5838 struct hlist_head *head; 5838 struct hlist_head *head;
5839 5839
@@ -5852,7 +5852,7 @@ end:
5852 5852
5853int perf_swevent_get_recursion_context(void) 5853int perf_swevent_get_recursion_context(void)
5854{ 5854{
5855 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5855 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5856 5856
5857 return get_recursion_context(swhash->recursion); 5857 return get_recursion_context(swhash->recursion);
5858} 5858}
@@ -5860,7 +5860,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5860 5860
5861inline void perf_swevent_put_recursion_context(int rctx) 5861inline void perf_swevent_put_recursion_context(int rctx)
5862{ 5862{
5863 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5863 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5864 5864
5865 put_recursion_context(swhash->recursion, rctx); 5865 put_recursion_context(swhash->recursion, rctx);
5866} 5866}
@@ -5889,7 +5889,7 @@ static void perf_swevent_read(struct perf_event *event)
5889 5889
5890static int perf_swevent_add(struct perf_event *event, int flags) 5890static int perf_swevent_add(struct perf_event *event, int flags)
5891{ 5891{
5892 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5892 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5893 struct hw_perf_event *hwc = &event->hw; 5893 struct hw_perf_event *hwc = &event->hw;
5894 struct hlist_head *head; 5894 struct hlist_head *head;
5895 5895