aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-15 01:48:18 -0400
commit0429fbc0bdc297d64188483ba029a23773ae07b0 (patch)
tree67de46978c90f37540dd6ded1db20eb53a569030 /kernel
parent6929c358972facf2999f8768815c40dd88514fc2 (diff)
parent513d1a2884a49654f368b5fa25ef186e976bdada (diff)
Merge branch 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
Pull percpu consistent-ops changes from Tejun Heo: "Way back, before the current percpu allocator was implemented, static and dynamic percpu memory areas were allocated and handled separately and had their own accessors. The distinction has been gone for many years now; however, the now duplicate two sets of accessors remained with the pointer based ones - this_cpu_*() - evolving various other operations over time. During the process, we also accumulated other inconsistent operations. This pull request contains Christoph's patches to clean up the duplicate accessor situation. __get_cpu_var() uses are replaced with with this_cpu_ptr() and __this_cpu_ptr() with raw_cpu_ptr(). Unfortunately, the former sometimes is tricky thanks to C being a bit messy with the distinction between lvalues and pointers, which led to a rather ugly solution for cpumask_var_t involving the introduction of this_cpu_cpumask_var_ptr(). This converts most of the uses but not all. Christoph will follow up with the remaining conversions in this merge window and hopefully remove the obsolete accessors" * 'for-3.18-consistent-ops' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (38 commits) irqchip: Properly fetch the per cpu offset percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t -fix ia64: sn_nodepda cannot be assigned to after this_cpu conversion. Use __this_cpu_write. percpu: Resolve ambiguities in __get_cpu_var/cpumask_var_t Revert "powerpc: Replace __get_cpu_var uses" percpu: Remove __this_cpu_ptr clocksource: Replace __this_cpu_ptr with raw_cpu_ptr sparc: Replace __get_cpu_var uses avr32: Replace __get_cpu_var with __this_cpu_write blackfin: Replace __get_cpu_var uses tile: Use this_cpu_ptr() for hardware counters tile: Replace __get_cpu_var uses powerpc: Replace __get_cpu_var uses alpha: Replace __get_cpu_var ia64: Replace __get_cpu_var uses s390: cio driver &__get_cpu_var replacements s390: Replace __get_cpu_var uses mips: Replace __get_cpu_var uses MIPS: Replace __get_cpu_var uses in FPU emulator. arm: Replace __this_cpu_ptr with raw_cpu_ptr ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/callchain.c4
-rw-r--r--kernel/events/core.c24
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq_work.c12
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/smp.c6
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/taskstats.c2
-rw-r--r--kernel/time/hrtimer.c22
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--kernel/time/tick-sched.c24
-rw-r--r--kernel/time/timer.c2
-rw-r--r--kernel/user-return-notifier.c4
-rw-r--r--kernel/watchdog.c12
21 files changed, 72 insertions, 72 deletions
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index f2a88de87a49..d659487254d5 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
137 int cpu; 137 int cpu;
138 struct callchain_cpus_entries *entries; 138 struct callchain_cpus_entries *entries;
139 139
140 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion)); 140 *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
141 if (*rctx == -1) 141 if (*rctx == -1)
142 return NULL; 142 return NULL;
143 143
@@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
153static void 153static void
154put_callchain_entry(int rctx) 154put_callchain_entry(int rctx)
155{ 155{
156 put_recursion_context(__get_cpu_var(callchain_recursion), rctx); 156 put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
157} 157}
158 158
159struct perf_callchain_entry * 159struct perf_callchain_entry *
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 094df8c0742d..1425d07018de 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -249,7 +249,7 @@ static void perf_duration_warn(struct irq_work *w)
249 u64 avg_local_sample_len; 249 u64 avg_local_sample_len;
250 u64 local_samples_len; 250 u64 local_samples_len;
251 251
252 local_samples_len = __get_cpu_var(running_sample_length); 252 local_samples_len = __this_cpu_read(running_sample_length);
253 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES; 253 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
254 254
255 printk_ratelimited(KERN_WARNING 255 printk_ratelimited(KERN_WARNING
@@ -271,10 +271,10 @@ void perf_sample_event_took(u64 sample_len_ns)
271 return; 271 return;
272 272
273 /* decay the counter by 1 average sample */ 273 /* decay the counter by 1 average sample */
274 local_samples_len = __get_cpu_var(running_sample_length); 274 local_samples_len = __this_cpu_read(running_sample_length);
275 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES; 275 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
276 local_samples_len += sample_len_ns; 276 local_samples_len += sample_len_ns;
277 __get_cpu_var(running_sample_length) = local_samples_len; 277 __this_cpu_write(running_sample_length, local_samples_len);
278 278
279 /* 279 /*
280 * note: this will be biased artifically low until we have 280 * note: this will be biased artifically low until we have
@@ -882,7 +882,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list);
882static void perf_pmu_rotate_start(struct pmu *pmu) 882static void perf_pmu_rotate_start(struct pmu *pmu)
883{ 883{
884 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); 884 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
885 struct list_head *head = &__get_cpu_var(rotation_list); 885 struct list_head *head = this_cpu_ptr(&rotation_list);
886 886
887 WARN_ON(!irqs_disabled()); 887 WARN_ON(!irqs_disabled());
888 888
@@ -2462,7 +2462,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
2462 * to check if we have to switch out PMU state. 2462 * to check if we have to switch out PMU state.
2463 * cgroup event are system-wide mode only 2463 * cgroup event are system-wide mode only
2464 */ 2464 */
2465 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2465 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2466 perf_cgroup_sched_out(task, next); 2466 perf_cgroup_sched_out(task, next);
2467} 2467}
2468 2468
@@ -2705,11 +2705,11 @@ void __perf_event_task_sched_in(struct task_struct *prev,
2705 * to check if we have to switch in PMU state. 2705 * to check if we have to switch in PMU state.
2706 * cgroup event are system-wide mode only 2706 * cgroup event are system-wide mode only
2707 */ 2707 */
2708 if (atomic_read(&__get_cpu_var(perf_cgroup_events))) 2708 if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
2709 perf_cgroup_sched_in(prev, task); 2709 perf_cgroup_sched_in(prev, task);
2710 2710
2711 /* check for system-wide branch_stack events */ 2711 /* check for system-wide branch_stack events */
2712 if (atomic_read(&__get_cpu_var(perf_branch_stack_events))) 2712 if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
2713 perf_branch_stack_sched_in(prev, task); 2713 perf_branch_stack_sched_in(prev, task);
2714} 2714}
2715 2715
@@ -2964,7 +2964,7 @@ bool perf_event_can_stop_tick(void)
2964 2964
2965void perf_event_task_tick(void) 2965void perf_event_task_tick(void)
2966{ 2966{
2967 struct list_head *head = &__get_cpu_var(rotation_list); 2967 struct list_head *head = this_cpu_ptr(&rotation_list);
2968 struct perf_cpu_context *cpuctx, *tmp; 2968 struct perf_cpu_context *cpuctx, *tmp;
2969 struct perf_event_context *ctx; 2969 struct perf_event_context *ctx;
2970 int throttled; 2970 int throttled;
@@ -5833,7 +5833,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5833 struct perf_sample_data *data, 5833 struct perf_sample_data *data,
5834 struct pt_regs *regs) 5834 struct pt_regs *regs)
5835{ 5835{
5836 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5836 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5837 struct perf_event *event; 5837 struct perf_event *event;
5838 struct hlist_head *head; 5838 struct hlist_head *head;
5839 5839
@@ -5852,7 +5852,7 @@ end:
5852 5852
5853int perf_swevent_get_recursion_context(void) 5853int perf_swevent_get_recursion_context(void)
5854{ 5854{
5855 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5855 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5856 5856
5857 return get_recursion_context(swhash->recursion); 5857 return get_recursion_context(swhash->recursion);
5858} 5858}
@@ -5860,7 +5860,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5860 5860
5861inline void perf_swevent_put_recursion_context(int rctx) 5861inline void perf_swevent_put_recursion_context(int rctx)
5862{ 5862{
5863 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5863 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5864 5864
5865 put_recursion_context(swhash->recursion, rctx); 5865 put_recursion_context(swhash->recursion, rctx);
5866} 5866}
@@ -5889,7 +5889,7 @@ static void perf_swevent_read(struct perf_event *event)
5889 5889
5890static int perf_swevent_add(struct perf_event *event, int flags) 5890static int perf_swevent_add(struct perf_event *event, int flags)
5891{ 5891{
5892 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable); 5892 struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
5893 struct hw_perf_event *hwc = &event->hw; 5893 struct hw_perf_event *hwc = &event->hw;
5894 struct hlist_head *head; 5894 struct hlist_head *head;
5895 5895
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 8fb52e9bddc1..e5202f00cabc 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -699,7 +699,7 @@ void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
699{ 699{
700 struct irq_chip *chip = irq_desc_get_chip(desc); 700 struct irq_chip *chip = irq_desc_get_chip(desc);
701 struct irqaction *action = desc->action; 701 struct irqaction *action = desc->action;
702 void *dev_id = __this_cpu_ptr(action->percpu_dev_id); 702 void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
703 irqreturn_t res; 703 irqreturn_t res;
704 704
705 kstat_incr_irqs_this_cpu(irq, desc); 705 kstat_incr_irqs_this_cpu(irq, desc);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 385b85aded19..3ab9048483fa 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -95,11 +95,11 @@ bool irq_work_queue(struct irq_work *work)
95 95
96 /* If the work is "lazy", handle it from next tick if any */ 96 /* If the work is "lazy", handle it from next tick if any */
97 if (work->flags & IRQ_WORK_LAZY) { 97 if (work->flags & IRQ_WORK_LAZY) {
98 if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) && 98 if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
99 tick_nohz_tick_stopped()) 99 tick_nohz_tick_stopped())
100 arch_irq_work_raise(); 100 arch_irq_work_raise();
101 } else { 101 } else {
102 if (llist_add(&work->llnode, &__get_cpu_var(raised_list))) 102 if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
103 arch_irq_work_raise(); 103 arch_irq_work_raise();
104 } 104 }
105 105
@@ -113,8 +113,8 @@ bool irq_work_needs_cpu(void)
113{ 113{
114 struct llist_head *raised, *lazy; 114 struct llist_head *raised, *lazy;
115 115
116 raised = &__get_cpu_var(raised_list); 116 raised = this_cpu_ptr(&raised_list);
117 lazy = &__get_cpu_var(lazy_list); 117 lazy = this_cpu_ptr(&lazy_list);
118 118
119 if (llist_empty(raised) || arch_irq_work_has_interrupt()) 119 if (llist_empty(raised) || arch_irq_work_has_interrupt())
120 if (llist_empty(lazy)) 120 if (llist_empty(lazy))
@@ -168,8 +168,8 @@ static void irq_work_run_list(struct llist_head *list)
168 */ 168 */
169void irq_work_run(void) 169void irq_work_run(void)
170{ 170{
171 irq_work_run_list(&__get_cpu_var(raised_list)); 171 irq_work_run_list(this_cpu_ptr(&raised_list));
172 irq_work_run_list(&__get_cpu_var(lazy_list)); 172 irq_work_run_list(this_cpu_ptr(&lazy_list));
173} 173}
174EXPORT_SYMBOL_GPL(irq_work_run); 174EXPORT_SYMBOL_GPL(irq_work_run);
175 175
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index e3962d63e368..ced2b84b1cb7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2622,7 +2622,7 @@ void wake_up_klogd(void)
2622 preempt_disable(); 2622 preempt_disable();
2623 if (waitqueue_active(&log_wait)) { 2623 if (waitqueue_active(&log_wait)) {
2624 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); 2624 this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
2625 irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); 2625 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
2626 } 2626 }
2627 preempt_enable(); 2627 preempt_enable();
2628} 2628}
@@ -2638,7 +2638,7 @@ int printk_deferred(const char *fmt, ...)
2638 va_end(args); 2638 va_end(args);
2639 2639
2640 __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); 2640 __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
2641 irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); 2641 irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
2642 preempt_enable(); 2642 preempt_enable();
2643 2643
2644 return r; 2644 return r;
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 3ef6451e972e..c27e4f8f4879 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -134,7 +134,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
134 134
135static inline struct sched_clock_data *this_scd(void) 135static inline struct sched_clock_data *this_scd(void)
136{ 136{
137 return &__get_cpu_var(sched_clock_data); 137 return this_cpu_ptr(&sched_clock_data);
138} 138}
139 139
140static inline struct sched_clock_data *cpu_sdc(int cpu) 140static inline struct sched_clock_data *cpu_sdc(int cpu)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index abfaf3d9a29f..256e577faf1b 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1153,7 +1153,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1153static int find_later_rq(struct task_struct *task) 1153static int find_later_rq(struct task_struct *task)
1154{ 1154{
1155 struct sched_domain *sd; 1155 struct sched_domain *sd;
1156 struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl); 1156 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1157 int this_cpu = smp_processor_id(); 1157 int this_cpu = smp_processor_id();
1158 int best_cpu, cpu = task_cpu(task); 1158 int best_cpu, cpu = task_cpu(task);
1159 1159
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b78280c59b46..0b069bf3e708 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6615,7 +6615,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
6615 struct sched_group *group; 6615 struct sched_group *group;
6616 struct rq *busiest; 6616 struct rq *busiest;
6617 unsigned long flags; 6617 unsigned long flags;
6618 struct cpumask *cpus = __get_cpu_var(load_balance_mask); 6618 struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
6619 6619
6620 struct lb_env env = { 6620 struct lb_env env = {
6621 .sd = sd, 6621 .sd = sd,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 87ea5bf1b87f..d024e6ce30ba 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1525,7 +1525,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1525static int find_lowest_rq(struct task_struct *task) 1525static int find_lowest_rq(struct task_struct *task)
1526{ 1526{
1527 struct sched_domain *sd; 1527 struct sched_domain *sd;
1528 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 1528 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1529 int this_cpu = smp_processor_id(); 1529 int this_cpu = smp_processor_id();
1530 int cpu = task_cpu(task); 1530 int cpu = task_cpu(task);
1531 1531
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6130251de280..24156c8434d1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -663,10 +663,10 @@ static inline int cpu_of(struct rq *rq)
663DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 663DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
664 664
665#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 665#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
666#define this_rq() (&__get_cpu_var(runqueues)) 666#define this_rq() this_cpu_ptr(&runqueues)
667#define task_rq(p) cpu_rq(task_cpu(p)) 667#define task_rq(p) cpu_rq(task_cpu(p))
668#define cpu_curr(cpu) (cpu_rq(cpu)->curr) 668#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
669#define raw_rq() (&__raw_get_cpu_var(runqueues)) 669#define raw_rq() raw_cpu_ptr(&runqueues)
670 670
671static inline u64 rq_clock(struct rq *rq) 671static inline u64 rq_clock(struct rq *rq)
672{ 672{
diff --git a/kernel/smp.c b/kernel/smp.c
index 9e0d0b289118..f38a1e692259 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -165,7 +165,7 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
165 if (!csd) { 165 if (!csd) {
166 csd = &csd_stack; 166 csd = &csd_stack;
167 if (!wait) 167 if (!wait)
168 csd = &__get_cpu_var(csd_data); 168 csd = this_cpu_ptr(&csd_data);
169 } 169 }
170 170
171 csd_lock(csd); 171 csd_lock(csd);
@@ -230,7 +230,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
230 230
231 WARN_ON(!irqs_disabled()); 231 WARN_ON(!irqs_disabled());
232 232
233 head = &__get_cpu_var(call_single_queue); 233 head = this_cpu_ptr(&call_single_queue);
234 entry = llist_del_all(head); 234 entry = llist_del_all(head);
235 entry = llist_reverse_order(entry); 235 entry = llist_reverse_order(entry);
236 236
@@ -420,7 +420,7 @@ void smp_call_function_many(const struct cpumask *mask,
420 return; 420 return;
421 } 421 }
422 422
423 cfd = &__get_cpu_var(cfd_data); 423 cfd = this_cpu_ptr(&cfd_data);
424 424
425 cpumask_and(cfd->cpumask, mask, cpu_online_mask); 425 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
426 cpumask_clear_cpu(this_cpu, cfd->cpumask); 426 cpumask_clear_cpu(this_cpu, cfd->cpumask);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 348ec763b104..0699add19164 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -485,7 +485,7 @@ static void tasklet_action(struct softirq_action *a)
485 local_irq_disable(); 485 local_irq_disable();
486 list = __this_cpu_read(tasklet_vec.head); 486 list = __this_cpu_read(tasklet_vec.head);
487 __this_cpu_write(tasklet_vec.head, NULL); 487 __this_cpu_write(tasklet_vec.head, NULL);
488 __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head); 488 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
489 local_irq_enable(); 489 local_irq_enable();
490 490
491 while (list) { 491 while (list) {
@@ -521,7 +521,7 @@ static void tasklet_hi_action(struct softirq_action *a)
521 local_irq_disable(); 521 local_irq_disable();
522 list = __this_cpu_read(tasklet_hi_vec.head); 522 list = __this_cpu_read(tasklet_hi_vec.head);
523 __this_cpu_write(tasklet_hi_vec.head, NULL); 523 __this_cpu_write(tasklet_hi_vec.head, NULL);
524 __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head); 524 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
525 local_irq_enable(); 525 local_irq_enable();
526 526
527 while (list) { 527 while (list) {
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 13d2f7cd65db..b312fcc73024 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
638 fill_tgid_exit(tsk); 638 fill_tgid_exit(tsk);
639 } 639 }
640 640
641 listeners = __this_cpu_ptr(&listener_array); 641 listeners = raw_cpu_ptr(&listener_array);
642 if (list_empty(&listeners->list)) 642 if (list_empty(&listeners->list))
643 return; 643 return;
644 644
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index ab370ffffd53..37e50aadd471 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -558,7 +558,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
558static int hrtimer_reprogram(struct hrtimer *timer, 558static int hrtimer_reprogram(struct hrtimer *timer,
559 struct hrtimer_clock_base *base) 559 struct hrtimer_clock_base *base)
560{ 560{
561 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 561 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
562 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 562 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
563 int res; 563 int res;
564 564
@@ -629,7 +629,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
629 */ 629 */
630static void retrigger_next_event(void *arg) 630static void retrigger_next_event(void *arg)
631{ 631{
632 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); 632 struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
633 633
634 if (!hrtimer_hres_active()) 634 if (!hrtimer_hres_active())
635 return; 635 return;
@@ -903,7 +903,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
903 */ 903 */
904 debug_deactivate(timer); 904 debug_deactivate(timer);
905 timer_stats_hrtimer_clear_start_info(timer); 905 timer_stats_hrtimer_clear_start_info(timer);
906 reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases); 906 reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
907 /* 907 /*
908 * We must preserve the CALLBACK state flag here, 908 * We must preserve the CALLBACK state flag here,
909 * otherwise we could move the timer base in 909 * otherwise we could move the timer base in
@@ -963,7 +963,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
963 * on dynticks target. 963 * on dynticks target.
964 */ 964 */
965 wake_up_nohz_cpu(new_base->cpu_base->cpu); 965 wake_up_nohz_cpu(new_base->cpu_base->cpu);
966 } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) && 966 } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
967 hrtimer_reprogram(timer, new_base)) { 967 hrtimer_reprogram(timer, new_base)) {
968 /* 968 /*
969 * Only allow reprogramming if the new base is on this CPU. 969 * Only allow reprogramming if the new base is on this CPU.
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
1103 */ 1103 */
1104ktime_t hrtimer_get_next_event(void) 1104ktime_t hrtimer_get_next_event(void)
1105{ 1105{
1106 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1106 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1107 struct hrtimer_clock_base *base = cpu_base->clock_base; 1107 struct hrtimer_clock_base *base = cpu_base->clock_base;
1108 ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; 1108 ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
1109 unsigned long flags; 1109 unsigned long flags;
@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
1144 1144
1145 memset(timer, 0, sizeof(struct hrtimer)); 1145 memset(timer, 0, sizeof(struct hrtimer));
1146 1146
1147 cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1147 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1148 1148
1149 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) 1149 if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
1150 clock_id = CLOCK_MONOTONIC; 1150 clock_id = CLOCK_MONOTONIC;
@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1187 struct hrtimer_cpu_base *cpu_base; 1187 struct hrtimer_cpu_base *cpu_base;
1188 int base = hrtimer_clockid_to_base(which_clock); 1188 int base = hrtimer_clockid_to_base(which_clock);
1189 1189
1190 cpu_base = &__raw_get_cpu_var(hrtimer_bases); 1190 cpu_base = raw_cpu_ptr(&hrtimer_bases);
1191 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution); 1191 *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
1192 1192
1193 return 0; 1193 return 0;
@@ -1242,7 +1242,7 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1242 */ 1242 */
1243void hrtimer_interrupt(struct clock_event_device *dev) 1243void hrtimer_interrupt(struct clock_event_device *dev)
1244{ 1244{
1245 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1245 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1246 ktime_t expires_next, now, entry_time, delta; 1246 ktime_t expires_next, now, entry_time, delta;
1247 int i, retries = 0; 1247 int i, retries = 0;
1248 1248
@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void)
1376 if (!hrtimer_hres_active()) 1376 if (!hrtimer_hres_active())
1377 return; 1377 return;
1378 1378
1379 td = &__get_cpu_var(tick_cpu_device); 1379 td = this_cpu_ptr(&tick_cpu_device);
1380 if (td && td->evtdev) 1380 if (td && td->evtdev)
1381 hrtimer_interrupt(td->evtdev); 1381 hrtimer_interrupt(td->evtdev);
1382} 1382}
@@ -1440,7 +1440,7 @@ void hrtimer_run_pending(void)
1440void hrtimer_run_queues(void) 1440void hrtimer_run_queues(void)
1441{ 1441{
1442 struct timerqueue_node *node; 1442 struct timerqueue_node *node;
1443 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1443 struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
1444 struct hrtimer_clock_base *base; 1444 struct hrtimer_clock_base *base;
1445 int index, gettime = 1; 1445 int index, gettime = 1;
1446 1446
@@ -1679,7 +1679,7 @@ static void migrate_hrtimers(int scpu)
1679 1679
1680 local_irq_disable(); 1680 local_irq_disable();
1681 old_base = &per_cpu(hrtimer_bases, scpu); 1681 old_base = &per_cpu(hrtimer_bases, scpu);
1682 new_base = &__get_cpu_var(hrtimer_bases); 1682 new_base = this_cpu_ptr(&hrtimer_bases);
1683 /* 1683 /*
1684 * The caller is globally serialized and nobody else 1684 * The caller is globally serialized and nobody else
1685 * takes two locks at once, deadlock is not possible. 1685 * takes two locks at once, deadlock is not possible.
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 64c5990fd500..066f0ec05e48 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
554void tick_check_oneshot_broadcast_this_cpu(void) 554void tick_check_oneshot_broadcast_this_cpu(void)
555{ 555{
556 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) { 556 if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
557 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 557 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
558 558
559 /* 559 /*
560 * We might be in the middle of switching over from 560 * We might be in the middle of switching over from
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 052b4b53c3d6..7efeedf53ebd 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td,
224 224
225void tick_install_replacement(struct clock_event_device *newdev) 225void tick_install_replacement(struct clock_event_device *newdev)
226{ 226{
227 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 227 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
228 int cpu = smp_processor_id(); 228 int cpu = smp_processor_id();
229 229
230 clockevents_exchange_device(td->evtdev, newdev); 230 clockevents_exchange_device(td->evtdev, newdev);
@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup)
374 374
375void tick_suspend(void) 375void tick_suspend(void)
376{ 376{
377 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 377 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
378 378
379 clockevents_shutdown(td->evtdev); 379 clockevents_shutdown(td->evtdev);
380} 380}
381 381
382void tick_resume(void) 382void tick_resume(void)
383{ 383{
384 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 384 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
385 int broadcast = tick_resume_broadcast(); 385 int broadcast = tick_resume_broadcast();
386 386
387 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME); 387 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 824109060a33..7ce740e78e1b 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
59 */ 59 */
60int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) 60int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
61{ 61{
62 struct tick_device *td = &__get_cpu_var(tick_cpu_device); 62 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
63 struct clock_event_device *dev = td->evtdev; 63 struct clock_event_device *dev = td->evtdev;
64 64
65 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || 65 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a73efdf6f696..7b5741fc4110 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
205 */ 205 */
206void __tick_nohz_full_check(void) 206void __tick_nohz_full_check(void)
207{ 207{
208 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 208 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
209 209
210 if (tick_nohz_full_cpu(smp_processor_id())) { 210 if (tick_nohz_full_cpu(smp_processor_id())) {
211 if (ts->tick_stopped && !is_idle_task(current)) { 211 if (ts->tick_stopped && !is_idle_task(current)) {
@@ -573,7 +573,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
573 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 573 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
574 ktime_t last_update, expires, ret = { .tv64 = 0 }; 574 ktime_t last_update, expires, ret = { .tv64 = 0 };
575 unsigned long rcu_delta_jiffies; 575 unsigned long rcu_delta_jiffies;
576 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 576 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
577 u64 time_delta; 577 u64 time_delta;
578 578
579 time_delta = timekeeping_max_deferment(); 579 time_delta = timekeeping_max_deferment();
@@ -841,7 +841,7 @@ void tick_nohz_idle_enter(void)
841 841
842 local_irq_disable(); 842 local_irq_disable();
843 843
844 ts = &__get_cpu_var(tick_cpu_sched); 844 ts = this_cpu_ptr(&tick_cpu_sched);
845 ts->inidle = 1; 845 ts->inidle = 1;
846 __tick_nohz_idle_enter(ts); 846 __tick_nohz_idle_enter(ts);
847 847
@@ -859,7 +859,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
859 */ 859 */
860void tick_nohz_irq_exit(void) 860void tick_nohz_irq_exit(void)
861{ 861{
862 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 862 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
863 863
864 if (ts->inidle) 864 if (ts->inidle)
865 __tick_nohz_idle_enter(ts); 865 __tick_nohz_idle_enter(ts);
@@ -874,7 +874,7 @@ void tick_nohz_irq_exit(void)
874 */ 874 */
875ktime_t tick_nohz_get_sleep_length(void) 875ktime_t tick_nohz_get_sleep_length(void)
876{ 876{
877 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 877 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
878 878
879 return ts->sleep_length; 879 return ts->sleep_length;
880} 880}
@@ -952,7 +952,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
952 */ 952 */
953void tick_nohz_idle_exit(void) 953void tick_nohz_idle_exit(void)
954{ 954{
955 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 955 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
956 ktime_t now; 956 ktime_t now;
957 957
958 local_irq_disable(); 958 local_irq_disable();
@@ -987,7 +987,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
987 */ 987 */
988static void tick_nohz_handler(struct clock_event_device *dev) 988static void tick_nohz_handler(struct clock_event_device *dev)
989{ 989{
990 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 990 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
991 struct pt_regs *regs = get_irq_regs(); 991 struct pt_regs *regs = get_irq_regs();
992 ktime_t now = ktime_get(); 992 ktime_t now = ktime_get();
993 993
@@ -1011,7 +1011,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
1011 */ 1011 */
1012static void tick_nohz_switch_to_nohz(void) 1012static void tick_nohz_switch_to_nohz(void)
1013{ 1013{
1014 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1014 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1015 ktime_t next; 1015 ktime_t next;
1016 1016
1017 if (!tick_nohz_enabled) 1017 if (!tick_nohz_enabled)
@@ -1073,7 +1073,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
1073 1073
1074static inline void tick_nohz_irq_enter(void) 1074static inline void tick_nohz_irq_enter(void)
1075{ 1075{
1076 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1076 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1077 ktime_t now; 1077 ktime_t now;
1078 1078
1079 if (!ts->idle_active && !ts->tick_stopped) 1079 if (!ts->idle_active && !ts->tick_stopped)
@@ -1151,7 +1151,7 @@ early_param("skew_tick", skew_tick);
1151 */ 1151 */
1152void tick_setup_sched_timer(void) 1152void tick_setup_sched_timer(void)
1153{ 1153{
1154 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1154 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1155 ktime_t now = ktime_get(); 1155 ktime_t now = ktime_get();
1156 1156
1157 /* 1157 /*
@@ -1220,7 +1220,7 @@ void tick_clock_notify(void)
1220 */ 1220 */
1221void tick_oneshot_notify(void) 1221void tick_oneshot_notify(void)
1222{ 1222{
1223 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1223 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1224 1224
1225 set_bit(0, &ts->check_clocks); 1225 set_bit(0, &ts->check_clocks);
1226} 1226}
@@ -1235,7 +1235,7 @@ void tick_oneshot_notify(void)
1235 */ 1235 */
1236int tick_check_oneshot_change(int allow_nohz) 1236int tick_check_oneshot_change(int allow_nohz)
1237{ 1237{
1238 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 1238 struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
1239 1239
1240 if (!test_and_clear_bit(0, &ts->check_clocks)) 1240 if (!test_and_clear_bit(0, &ts->check_clocks))
1241 return 0; 1241 return 0;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 9bbb8344ed3b..3260ffdb368f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer)
655static void do_init_timer(struct timer_list *timer, unsigned int flags, 655static void do_init_timer(struct timer_list *timer, unsigned int flags,
656 const char *name, struct lock_class_key *key) 656 const char *name, struct lock_class_key *key)
657{ 657{
658 struct tvec_base *base = __raw_get_cpu_var(tvec_bases); 658 struct tvec_base *base = raw_cpu_read(tvec_bases);
659 659
660 timer->entry.next = NULL; 660 timer->entry.next = NULL;
661 timer->base = (void *)((unsigned long)base | flags); 661 timer->base = (void *)((unsigned long)base | flags);
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 394f70b17162..9586b670a5b2 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -14,7 +14,7 @@ static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
14void user_return_notifier_register(struct user_return_notifier *urn) 14void user_return_notifier_register(struct user_return_notifier *urn)
15{ 15{
16 set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); 16 set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
17 hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list)); 17 hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list));
18} 18}
19EXPORT_SYMBOL_GPL(user_return_notifier_register); 19EXPORT_SYMBOL_GPL(user_return_notifier_register);
20 20
@@ -25,7 +25,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
25void user_return_notifier_unregister(struct user_return_notifier *urn) 25void user_return_notifier_unregister(struct user_return_notifier *urn)
26{ 26{
27 hlist_del(&urn->link); 27 hlist_del(&urn->link);
28 if (hlist_empty(&__get_cpu_var(return_notifier_list))) 28 if (hlist_empty(this_cpu_ptr(&return_notifier_list)))
29 clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY); 29 clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
30} 30}
31EXPORT_SYMBOL_GPL(user_return_notifier_unregister); 31EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 49e9537f3673..70bf11815f84 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -208,7 +208,7 @@ void touch_nmi_watchdog(void)
208 * case we shouldn't have to worry about the watchdog 208 * case we shouldn't have to worry about the watchdog
209 * going off. 209 * going off.
210 */ 210 */
211 __raw_get_cpu_var(watchdog_nmi_touch) = true; 211 raw_cpu_write(watchdog_nmi_touch, true);
212 touch_softlockup_watchdog(); 212 touch_softlockup_watchdog();
213} 213}
214EXPORT_SYMBOL(touch_nmi_watchdog); 214EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -217,8 +217,8 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
217 217
218void touch_softlockup_watchdog_sync(void) 218void touch_softlockup_watchdog_sync(void)
219{ 219{
220 __raw_get_cpu_var(softlockup_touch_sync) = true; 220 __this_cpu_write(softlockup_touch_sync, true);
221 __raw_get_cpu_var(watchdog_touch_ts) = 0; 221 __this_cpu_write(watchdog_touch_ts, 0);
222} 222}
223 223
224#ifdef CONFIG_HARDLOCKUP_DETECTOR 224#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -425,7 +425,7 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
425 425
426static void watchdog_enable(unsigned int cpu) 426static void watchdog_enable(unsigned int cpu)
427{ 427{
428 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 428 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
429 429
430 /* kick off the timer for the hardlockup detector */ 430 /* kick off the timer for the hardlockup detector */
431 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 431 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -445,7 +445,7 @@ static void watchdog_enable(unsigned int cpu)
445 445
446static void watchdog_disable(unsigned int cpu) 446static void watchdog_disable(unsigned int cpu)
447{ 447{
448 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 448 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
449 449
450 watchdog_set_prio(SCHED_NORMAL, 0); 450 watchdog_set_prio(SCHED_NORMAL, 0);
451 hrtimer_cancel(hrtimer); 451 hrtimer_cancel(hrtimer);
@@ -585,7 +585,7 @@ static struct smp_hotplug_thread watchdog_threads = {
585 585
586static void restart_watchdog_hrtimer(void *info) 586static void restart_watchdog_hrtimer(void *info)
587{ 587{
588 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 588 struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
589 int ret; 589 int ret;
590 590
591 /* 591 /*