aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2014-08-17 13:30:27 -0400
committerTejun Heo <tj@kernel.org>2014-08-26 13:45:45 -0400
commit4a32fea9d78f2d2315c0072757b197d5a304dc8b (patch)
tree6cb53d3bb67bed81671ff9ff38d2f48d118a2345 /kernel
parentdc5df73b3afffc8d042dadffc1c959008b2c1163 (diff)
scheduler: Replace __get_cpu_var with this_cpu_ptr
Convert all uses of __get_cpu_var for address calculation to use this_cpu_ptr instead. [Uses of __get_cpu_var with cpumask_var_t are no longer handled by this patch] Cc: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/callchain.c4
-rw-r--r--kernel/events/core.c24
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/taskstats.c2
-rw-r--r--kernel/time/tick-sched.c4
-rw-r--r--kernel/user-return-notifier.c4
6 files changed, 21 insertions, 21 deletions
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 97b67df8fbfe..c4f63e68a35c 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
137 int cpu; 137 int cpu;
138 struct callchain_cpus_entries *entries; 138 struct callchain_cpus_entries *entries;
139 139
140 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); 140 *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
141 if (*rctx == -1) 141 if (*rctx == -1)
142 return NULL; 142 return NULL;
143 143
@@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
153static void 153static void
154put_callchain_entry(int rctx) 154put_callchain_entry(int rctx)
155{ 155{
156 put_recursion_context(__get_cpu_var(callchain_recursion), rctx); 156 put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
157} 157}
158 158
159struct perf_callchain_entry * 159struct perf_callchain_entry *
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1cf24b3e42ec..4d44e40a0483 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -239,7 +239,7 @@ static void perf_duration_warn(struct irq_work *w)
239 u64 avg_local_sample_len; 239 u64 avg_local_sample_len;
240 u64 local_samples_len; 240 u64 local_samples_len;
241 241
242 local_samples_len = __get_cpu_var(running_sample_length); 242 local_samples_len = __this_cpu_read(running_sample_length);
243 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 243 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
244 244
245 printk_ratelimited(KERN_WARNING 245 printk_ratelimited(KERN_WARNING
@@ -261,10 +261,10 @@ void perf_sample_event_took(u64 sample_len_ns)
261 return; 261 return;
262 262
263 /* decay the counter by 1 average sample */ 263 /* decay the counter by 1 average sample */
264 local_samples_len = __get_cpu_var(running_sample_length); 264 local_samples_len = __this_cpu_read(running_sample_length);
265 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 265 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
266 local_samples_len += sample_len_ns; 266 local_samples_len += sample_len_ns;
267 __get_cpu_var(running_sample_length) = local_samples_len; 267 __this_cpu_write(running_sample_length, local_samples_len);
268 268
269 /* 269 /*
270 * note: this will be biased artifically low until we have 270 * note: this will be biased artifically low until we have
@@ -877,7 +877,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list);
877static void perf_pmu_rotate_start(struct pmu *pmu) 877static void perf_pmu_rotate_start(struct pmu *pmu)
878{ 878{
879 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 879 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
880 struct list_head *head = &__get_cpu_var(rotation_list); 880 struct list_head *head = this_cpu_ptr(&rotation_list);
881 881
882 WARN_ON(!irqs_disabled()); 882 WARN_ON(!irqs_disabled());
883 883
@@ -2389,7 +2389,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
2389 * to check if we have to switch out PMU state. 2389 * to check if we have to switch out PMU state.
2390 * cgroup event are system-wide mode only 2390 * cgroup event are system-wide mode only
2391 */ 2391 */
2392 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2392 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2393 perf_cgroup_sched_out(task, next); 2393 perf_cgroup_sched_out(task, next);
2394} 2394}
2395 2395
@@ -2632,11 +2632,11 @@ void __perf_event_task_sched_in(struct task_struct *prev,
2632 * to check if we have to switch in PMU state. 2632 * to check if we have to switch in PMU state.
2633 * cgroup event are system-wide mode only 2633 * cgroup event are system-wide mode only
2634 */ 2634 */
2635 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2635 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2636 perf_cgroup_sched_in(prev, task); 2636 perf_cgroup_sched_in(prev, task);
2637 2637
2638 /* check for system-wide branch_stack events */ 2638 /* check for system-wide branch_stack events */
2639 if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) 2639 if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
2640 perf_branch_stack_sched_in(prev, task); 2640 perf_branch_stack_sched_in(prev, task);
2641} 2641}
2642 2642
@@ -2891,7 +2891,7 @@ bool perf_event_can_stop_tick(void)
2891 2891
2892void perf_event_task_tick(void) 2892void perf_event_task_tick(void)
2893{ 2893{
2894 struct list_head *head = &__get_cpu_var(rotation_list); 2894 struct list_head *head = this_cpu_ptr(&rotation_list);
2895 struct perf_cpu_context *cpuctx, *tmp; 2895 struct perf_cpu_context *cpuctx, *tmp;
2896 struct perf_event_context *ctx; 2896 struct perf_event_context *ctx;
2897 int throttled; 2897 int throttled;
@@ -5671,7 +5671,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5671 struct perf_sample_data *data, 5671 struct perf_sample_data *data,
5672 struct pt_regs *regs) 5672 struct pt_regs *regs)
5673{ 5673{
5674 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5674 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5675 struct perf_event *event; 5675 struct perf_event *event;
5676 struct hlist_head *head; 5676 struct hlist_head *head;
5677 5677
@@ -5690,7 +5690,7 @@ end:
5690 5690
5691int perf_swevent_get_recursion_context(void) 5691int perf_swevent_get_recursion_context(void)
5692{ 5692{
5693 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5693 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5694 5694
5695 return get_recursion_context(swhash->recursion); 5695 return get_recursion_context(swhash->recursion);
5696} 5696}
@@ -5698,7 +5698,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5698 5698
5699inline void perf_swevent_put_recursion_context(int rctx) 5699inline void perf_swevent_put_recursion_context(int rctx)
5700{ 5700{
5701 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5701 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5702 5702
5703 put_recursion_context(swhash->recursion, rctx); 5703 put_recursion_context(swhash->recursion, rctx);
5704} 5704}
@@ -5727,7 +5727,7 @@ static void perf_swevent_read(struct perf_event *event)
5727 5727
5728static int perf_swevent_add(struct perf_event *event, int flags) 5728static int perf_swevent_add(struct perf_event *event, int flags)
5729{ 5729{
5730 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5730 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5731 struct hw_perf_event *hwc = &event->hw; 5731 struct hw_perf_event *hwc = &event->hw;
5732 struct hlist_head *head; 5732 struct hlist_head *head;
5733 5733
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 579712f4e9d5..77d92f8130e8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -650,10 +650,10 @@ static inline int cpu_of(struct rq *rq)
650DECLARE_PER_CPU(struct rq, runqueues); 650DECLARE_PER_CPU(struct rq, runqueues);
651 651
652#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 652#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
653#define this_rq() (&__get_cpu_var(runqueues)) 653#define this_rq() this_cpu_ptr(&runqueues)
654#define task_rq(p) cpu_rq(task_cpu(p)) 654#define task_rq(p) cpu_rq(task_cpu(p))
655#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 655#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
656#define raw_rq() (&__raw_get_cpu_var(runqueues)) 656#define raw_rq() raw_cpu_ptr(&runqueues)
657 657
658static inline u64 rq_clock(struct rq *rq) 658static inline u64 rq_clock(struct rq *rq)
659{ 659{
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 13d2f7cd65db..b312fcc73024 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
638 fill_tgid_exit(tsk); 638 fill_tgid_exit(tsk);
639 } 639 }
640 640
641 listeners = __this_cpu_ptr(&listener_array); 641 listeners = raw_cpu_ptr(&listener_array);
642 if (list_empty(&listeners->list)) 642 if (list_empty(&listeners->list))
643 return; 643 return;
644 644
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 73f90932282b..3cadc112519f 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -924,7 +924,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
924 */ 924 */
925void tick_nohz_idle_exit(void) 925void tick_nohz_idle_exit(void)
926{ 926{
927 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 927 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
928 ktime_t now; 928 ktime_t now;
929 929
930 local_irq_disable(); 930 local_irq_disable();
@@ -1041,7 +1041,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1041 1041
1042static inline void tick_nohz_irq_enter(void) 1042static inline void tick_nohz_irq_enter(void)
1043{ 1043{
1044 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1044 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1045 ktime_t now; 1045 ktime_t now;
1046 1046
1047 if (!ts->idle_active && !ts->tick_stopped) 1047 if (!ts->idle_active && !ts->tick_stopped)
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 394f70b17162..9586b670a5b2 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -14,7 +14,7 @@ static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
14void user_return_notifier_register(struct user_return_notifier *urn) 14void user_return_notifier_register(struct user_return_notifier *urn)
15{ 15{
16 set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); 16 set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
17 hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list)); 17 hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list));
18} 18}
19EXPORT_SYMBOL_GPL(user_return_notifier_register); 19EXPORT_SYMBOL_GPL(user_return_notifier_register);
20 20
@@ -25,7 +25,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
25void user_return_notifier_unregister(struct user_return_notifier *urn) 25void user_return_notifier_unregister(struct user_return_notifier *urn)
26{ 26{
27 hlist_del(&urn->link); 27 hlist_del(&urn->link);
28 if (hlist_empty(&__get_cpu_var(return_notifier_list))) 28 if (hlist_empty(this_cpu_ptr(&return_notifier_list)))
29 clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); 29 clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
30} 30}
31EXPORT_SYMBOL_GPL(user_return_notifier_unregister); 31EXPORT_SYMBOL_GPL(user_return_notifier_unregister);