aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/hw_irq.h2
-rw-r--r--arch/ia64/include/asm/sn/arch.h4
-rw-r--r--arch/ia64/include/asm/sn/nodepda.h2
-rw-r--r--arch/ia64/include/asm/switch_to.h2
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h2
-rw-r--r--arch/ia64/kernel/irq.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/kprobes.c6
-rw-r--r--arch/ia64/kernel/mca.c16
-rw-r--r--arch/ia64/kernel/process.c6
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c28
12 files changed, 38 insertions, 38 deletions
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 029bab36cd91..668786e84af8 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -159,7 +159,7 @@ static inline ia64_vector __ia64_irq_to_vector(int irq)
159static inline unsigned int 159static inline unsigned int
160__ia64_local_vector_to_irq (ia64_vector vec) 160__ia64_local_vector_to_irq (ia64_vector vec)
161{ 161{
162 return __get_cpu_var(vector_irq)[vec]; 162 return __this_cpu_read(vector_irq[vec]);
163} 163}
164#endif 164#endif
165 165
diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h
index 7caa1f44cd95..31eb784866f8 100644
--- a/arch/ia64/include/asm/sn/arch.h
+++ b/arch/ia64/include/asm/sn/arch.h
@@ -57,7 +57,7 @@ struct sn_hub_info_s {
57 u16 nasid_bitmask; 57 u16 nasid_bitmask;
58}; 58};
59DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 59DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
60#define sn_hub_info (&__get_cpu_var(__sn_hub_info)) 60#define sn_hub_info this_cpu_ptr(&__sn_hub_info)
61#define is_shub2() (sn_hub_info->shub2) 61#define is_shub2() (sn_hub_info->shub2)
62#define is_shub1() (sn_hub_info->shub2 == 0) 62#define is_shub1() (sn_hub_info->shub2 == 0)
63 63
@@ -72,7 +72,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
72 * cpu. 72 * cpu.
73 */ 73 */
74DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]); 74DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
75#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0])) 75#define sn_cnodeid_to_nasid this_cpu_ptr(&__sn_cnodeid_to_nasid[0])
76 76
77 77
78extern u8 sn_partition_id; 78extern u8 sn_partition_id;
diff --git a/arch/ia64/include/asm/sn/nodepda.h b/arch/ia64/include/asm/sn/nodepda.h
index ee118b901de4..7c8b4710f071 100644
--- a/arch/ia64/include/asm/sn/nodepda.h
+++ b/arch/ia64/include/asm/sn/nodepda.h
@@ -70,7 +70,7 @@ typedef struct nodepda_s nodepda_t;
70 */ 70 */
71 71
72DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda); 72DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
73#define sn_nodepda (__get_cpu_var(__sn_nodepda)) 73#define sn_nodepda __this_cpu_read(__sn_nodepda)
74#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid]) 74#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
75 75
76/* 76/*
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h
index d38c7ea5eea5..e8f3585e7e7a 100644
--- a/arch/ia64/include/asm/switch_to.h
+++ b/arch/ia64/include/asm/switch_to.h
@@ -32,7 +32,7 @@ extern void ia64_load_extra (struct task_struct *task);
32 32
33#ifdef CONFIG_PERFMON 33#ifdef CONFIG_PERFMON
34 DECLARE_PER_CPU(unsigned long, pfm_syst_info); 34 DECLARE_PER_CPU(unsigned long, pfm_syst_info);
35# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) 35# define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1)
36#else 36#else
37# define PERFMON_IS_SYSWIDE() (0) 37# define PERFMON_IS_SYSWIDE() (0)
38#endif 38#endif
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
index 53e9dfacd073..2a88c7204e52 100644
--- a/arch/ia64/include/asm/uv/uv_hub.h
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -108,7 +108,7 @@ struct uv_hub_info_s {
108 unsigned char n_val; 108 unsigned char n_val;
109}; 109};
110DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); 110DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
111#define uv_hub_info (&__get_cpu_var(__uv_hub_info)) 111#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
112#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu)) 112#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
113 113
114/* 114/*
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index f2c418281130..812a1e6b3179 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -42,7 +42,7 @@ ia64_vector __ia64_irq_to_vector(int irq)
42 42
43unsigned int __ia64_local_vector_to_irq (ia64_vector vec) 43unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
44{ 44{
45 return __get_cpu_var(vector_irq)[vec]; 45 return __this_cpu_read(vector_irq[vec]);
46} 46}
47#endif 47#endif
48 48
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 03ea78ed64a9..698d8fefde6c 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -330,7 +330,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
330 int irq; 330 int irq;
331 struct irq_desc *desc; 331 struct irq_desc *desc;
332 struct irq_cfg *cfg; 332 struct irq_cfg *cfg;
333 irq = __get_cpu_var(vector_irq)[vector]; 333 irq = __this_cpu_read(vector_irq[vector]);
334 if (irq < 0) 334 if (irq < 0)
335 continue; 335 continue;
336 336
@@ -344,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
344 goto unlock; 344 goto unlock;
345 345
346 spin_lock_irqsave(&vector_lock, flags); 346 spin_lock_irqsave(&vector_lock, flags);
347 __get_cpu_var(vector_irq)[vector] = -1; 347 __this_cpu_write(vector_irq[vector], -1);
348 cpu_clear(me, vector_table[vector]); 348 cpu_clear(me, vector_table[vector]);
349 spin_unlock_irqrestore(&vector_lock, flags); 349 spin_unlock_irqrestore(&vector_lock, flags);
350 cfg->move_cleanup_count--; 350 cfg->move_cleanup_count--;
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 074fde49c9e6..c7c51445c3be 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -396,7 +396,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
396{ 396{
397 unsigned int i; 397 unsigned int i;
398 i = atomic_read(&kcb->prev_kprobe_index); 398 i = atomic_read(&kcb->prev_kprobe_index);
399 __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp; 399 __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp);
400 kcb->kprobe_status = kcb->prev_kprobe[i-1].status; 400 kcb->kprobe_status = kcb->prev_kprobe[i-1].status;
401 atomic_sub(1, &kcb->prev_kprobe_index); 401 atomic_sub(1, &kcb->prev_kprobe_index);
402} 402}
@@ -404,7 +404,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
404static void __kprobes set_current_kprobe(struct kprobe *p, 404static void __kprobes set_current_kprobe(struct kprobe *p,
405 struct kprobe_ctlblk *kcb) 405 struct kprobe_ctlblk *kcb)
406{ 406{
407 __get_cpu_var(current_kprobe) = p; 407 __this_cpu_write(current_kprobe, p);
408} 408}
409 409
410static void kretprobe_trampoline(void) 410static void kretprobe_trampoline(void)
@@ -823,7 +823,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
823 /* 823 /*
824 * jprobe instrumented function just completed 824 * jprobe instrumented function just completed
825 */ 825 */
826 p = __get_cpu_var(current_kprobe); 826 p = __this_cpu_read(current_kprobe);
827 if (p->break_handler && p->break_handler(p, regs)) { 827 if (p->break_handler && p->break_handler(p, regs)) {
828 goto ss_probe; 828 goto ss_probe;
829 } 829 }
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index db7b36bb068b..8bfd36af46f8 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1341,7 +1341,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1341 ia64_mlogbuf_finish(1); 1341 ia64_mlogbuf_finish(1);
1342 } 1342 }
1343 1343
1344 if (__get_cpu_var(ia64_mca_tr_reload)) { 1344 if (__this_cpu_read(ia64_mca_tr_reload)) {
1345 mca_insert_tr(0x1); /*Reload dynamic itrs*/ 1345 mca_insert_tr(0x1); /*Reload dynamic itrs*/
1346 mca_insert_tr(0x2); /*Reload dynamic itrs*/ 1346 mca_insert_tr(0x2); /*Reload dynamic itrs*/
1347 } 1347 }
@@ -1868,14 +1868,14 @@ ia64_mca_cpu_init(void *cpu_data)
1868 "MCA", cpu); 1868 "MCA", cpu);
1869 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack), 1869 format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
1870 "INIT", cpu); 1870 "INIT", cpu);
1871 __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data); 1871 __this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data)));
1872 1872
1873 /* 1873 /*
1874 * Stash away a copy of the PTE needed to map the per-CPU page. 1874 * Stash away a copy of the PTE needed to map the per-CPU page.
1875 * We may need it during MCA recovery. 1875 * We may need it during MCA recovery.
1876 */ 1876 */
1877 __get_cpu_var(ia64_mca_per_cpu_pte) = 1877 __this_cpu_write(ia64_mca_per_cpu_pte,
1878 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)); 1878 pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)));
1879 1879
1880 /* 1880 /*
1881 * Also, stash away a copy of the PAL address and the PTE 1881 * Also, stash away a copy of the PAL address and the PTE
@@ -1884,10 +1884,10 @@ ia64_mca_cpu_init(void *cpu_data)
1884 pal_vaddr = efi_get_pal_addr(); 1884 pal_vaddr = efi_get_pal_addr();
1885 if (!pal_vaddr) 1885 if (!pal_vaddr)
1886 return; 1886 return;
1887 __get_cpu_var(ia64_mca_pal_base) = 1887 __this_cpu_write(ia64_mca_pal_base,
1888 GRANULEROUNDDOWN((unsigned long) pal_vaddr); 1888 GRANULEROUNDDOWN((unsigned long) pal_vaddr));
1889 __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr), 1889 __this_cpu_write(ia64_mca_pal_pte, pte_val(mk_pte_phys(__pa(pal_vaddr),
1890 PAGE_KERNEL)); 1890 PAGE_KERNEL)));
1891} 1891}
1892 1892
1893static void ia64_mca_cmc_vector_adjust(void *dummy) 1893static void ia64_mca_cmc_vector_adjust(void *dummy)
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index deed6fa96bb0..b51514957620 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -215,7 +215,7 @@ static inline void play_dead(void)
215 unsigned int this_cpu = smp_processor_id(); 215 unsigned int this_cpu = smp_processor_id();
216 216
217 /* Ack it */ 217 /* Ack it */
218 __get_cpu_var(cpu_state) = CPU_DEAD; 218 __this_cpu_write(cpu_state, CPU_DEAD);
219 219
220 max_xtp(); 220 max_xtp();
221 local_irq_disable(); 221 local_irq_disable();
@@ -273,7 +273,7 @@ ia64_save_extra (struct task_struct *task)
273 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) 273 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
274 pfm_save_regs(task); 274 pfm_save_regs(task);
275 275
276 info = __get_cpu_var(pfm_syst_info); 276 info = __this_cpu_read(pfm_syst_info);
277 if (info & PFM_CPUINFO_SYST_WIDE) 277 if (info & PFM_CPUINFO_SYST_WIDE)
278 pfm_syst_wide_update_task(task, info, 0); 278 pfm_syst_wide_update_task(task, info, 0);
279#endif 279#endif
@@ -293,7 +293,7 @@ ia64_load_extra (struct task_struct *task)
293 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0) 293 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
294 pfm_load_regs(task); 294 pfm_load_regs(task);
295 295
296 info = __get_cpu_var(pfm_syst_info); 296 info = __this_cpu_read(pfm_syst_info);
297 if (info & PFM_CPUINFO_SYST_WIDE) 297 if (info & PFM_CPUINFO_SYST_WIDE)
298 pfm_syst_wide_update_task(task, info, 1); 298 pfm_syst_wide_update_task(task, info, 1);
299#endif 299#endif
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index d3636e67a98e..6f7d4a4dcf24 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -299,7 +299,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
299 299
300 if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) { 300 if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
301 unsigned long count, current_jiffies = jiffies; 301 unsigned long count, current_jiffies = jiffies;
302 struct fpu_swa_msg *cp = &__get_cpu_var(cpulast); 302 struct fpu_swa_msg *cp = this_cpu_ptr(&cpulast);
303 303
304 if (unlikely(current_jiffies > cp->time)) 304 if (unlikely(current_jiffies > cp->time))
305 cp->count = 0; 305 cp->count = 0;
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 68c845411624..f9c8d9fc5939 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -134,8 +134,8 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm)
134 itc = ia64_get_itc(); 134 itc = ia64_get_itc();
135 smp_flush_tlb_cpumask(*mm_cpumask(mm)); 135 smp_flush_tlb_cpumask(*mm_cpumask(mm));
136 itc = ia64_get_itc() - itc; 136 itc = ia64_get_itc() - itc;
137 __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc; 137 __this_cpu_add(ptcstats.shub_ipi_flushes_itc_clocks, itc);
138 __get_cpu_var(ptcstats).shub_ipi_flushes++; 138 __this_cpu_inc(ptcstats.shub_ipi_flushes);
139} 139}
140 140
141/** 141/**
@@ -199,14 +199,14 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
199 start += (1UL << nbits); 199 start += (1UL << nbits);
200 } while (start < end); 200 } while (start < end);
201 ia64_srlz_i(); 201 ia64_srlz_i();
202 __get_cpu_var(ptcstats).ptc_l++; 202 __this_cpu_inc(ptcstats.ptc_l);
203 preempt_enable(); 203 preempt_enable();
204 return; 204 return;
205 } 205 }
206 206
207 if (atomic_read(&mm->mm_users) == 1 && mymm) { 207 if (atomic_read(&mm->mm_users) == 1 && mymm) {
208 flush_tlb_mm(mm); 208 flush_tlb_mm(mm);
209 __get_cpu_var(ptcstats).change_rid++; 209 __this_cpu_inc(ptcstats.change_rid);
210 preempt_enable(); 210 preempt_enable();
211 return; 211 return;
212 } 212 }
@@ -250,11 +250,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
250 spin_lock_irqsave(PTC_LOCK(shub1), flags); 250 spin_lock_irqsave(PTC_LOCK(shub1), flags);
251 itc2 = ia64_get_itc(); 251 itc2 = ia64_get_itc();
252 252
253 __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc; 253 __this_cpu_add(ptcstats.lock_itc_clocks, itc2 - itc);
254 __get_cpu_var(ptcstats).shub_ptc_flushes++; 254 __this_cpu_inc(ptcstats.shub_ptc_flushes);
255 __get_cpu_var(ptcstats).nodes_flushed += nix; 255 __this_cpu_add(ptcstats.nodes_flushed, nix);
256 if (!mymm) 256 if (!mymm)
257 __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++; 257 __this_cpu_inc(ptcstats.shub_ptc_flushes_not_my_mm);
258 258
259 if (use_cpu_ptcga && !mymm) { 259 if (use_cpu_ptcga && !mymm) {
260 old_rr = ia64_get_rr(start); 260 old_rr = ia64_get_rr(start);
@@ -299,9 +299,9 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
299 299
300done: 300done:
301 itc2 = ia64_get_itc() - itc2; 301 itc2 = ia64_get_itc() - itc2;
302 __get_cpu_var(ptcstats).shub_itc_clocks += itc2; 302 __this_cpu_add(ptcstats.shub_itc_clocks, itc2);
303 if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max) 303 if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max))
304 __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2; 304 __this_cpu_write(ptcstats.shub_itc_clocks_max, itc2);
305 305
306 if (old_rr) { 306 if (old_rr) {
307 ia64_set_rr(start, old_rr); 307 ia64_set_rr(start, old_rr);
@@ -311,7 +311,7 @@ done:
311 spin_unlock_irqrestore(PTC_LOCK(shub1), flags); 311 spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
312 312
313 if (flush_opt == 1 && deadlock) { 313 if (flush_opt == 1 && deadlock) {
314 __get_cpu_var(ptcstats).deadlocks++; 314 __this_cpu_inc(ptcstats.deadlocks);
315 sn2_ipi_flush_all_tlb(mm); 315 sn2_ipi_flush_all_tlb(mm);
316 } 316 }
317 317
@@ -334,7 +334,7 @@ sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
334 short nasid, i; 334 short nasid, i;
335 unsigned long *piows, zeroval, n; 335 unsigned long *piows, zeroval, n;
336 336
337 __get_cpu_var(ptcstats).deadlocks++; 337 __this_cpu_inc(ptcstats.deadlocks);
338 338
339 piows = (unsigned long *) pda->pio_write_status_addr; 339 piows = (unsigned long *) pda->pio_write_status_addr;
340 zeroval = pda->pio_write_status_val; 340 zeroval = pda->pio_write_status_val;
@@ -349,7 +349,7 @@ sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
349 ptc1 = CHANGE_NASID(nasid, ptc1); 349 ptc1 = CHANGE_NASID(nasid, ptc1);
350 350
351 n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval); 351 n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
352 __get_cpu_var(ptcstats).deadlocks2 += n; 352 __this_cpu_add(ptcstats.deadlocks2, n);
353 } 353 }
354 354
355} 355}