aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/tile/include/asm/irqflags.h4
-rw-r--r--arch/tile/include/asm/mmu_context.h6
-rw-r--r--arch/tile/kernel/irq.c14
-rw-r--r--arch/tile/kernel/messaging.c4
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/setup.c3
-rw-r--r--arch/tile/kernel/single_step.c4
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c6
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/init.c4
12 files changed, 30 insertions, 29 deletions
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 71af5747874d..60d62a292fce 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -140,12 +140,12 @@ extern unsigned int debug_smp_processor_id(void);
140 140
141/* 141/*
142 * Read the set of maskable interrupts. 142 * Read the set of maskable interrupts.
143 * We avoid the preemption warning here via __this_cpu_ptr since even 143 * We avoid the preemption warning here via raw_cpu_ptr since even
144 * if irqs are already enabled, it's harmless to read the wrong cpu's 144 * if irqs are already enabled, it's harmless to read the wrong cpu's
145 * enabled mask. 145 * enabled mask.
146 */ 146 */
147#define arch_local_irqs_enabled() \ 147#define arch_local_irqs_enabled() \
148 (*__this_cpu_ptr(&interrupts_enabled_mask)) 148 (*raw_cpu_ptr(&interrupts_enabled_mask))
149 149
150/* Re-enable all maskable interrupts. */ 150/* Re-enable all maskable interrupts. */
151#define arch_local_irq_enable() \ 151#define arch_local_irq_enable() \
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 4734215e2ad4..f67753db1f78 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -84,7 +84,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
84 * clear any pending DMA interrupts. 84 * clear any pending DMA interrupts.
85 */ 85 */
86 if (current->thread.tile_dma_state.enabled) 86 if (current->thread.tile_dma_state.enabled)
87 install_page_table(mm->pgd, __get_cpu_var(current_asid)); 87 install_page_table(mm->pgd, __this_cpu_read(current_asid));
88#endif 88#endif
89} 89}
90 90
@@ -96,12 +96,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
96 int cpu = smp_processor_id(); 96 int cpu = smp_processor_id();
97 97
98 /* Pick new ASID. */ 98 /* Pick new ASID. */
99 int asid = __get_cpu_var(current_asid) + 1; 99 int asid = __this_cpu_read(current_asid) + 1;
100 if (asid > max_asid) { 100 if (asid > max_asid) {
101 asid = min_asid; 101 asid = min_asid;
102 local_flush_tlb(); 102 local_flush_tlb();
103 } 103 }
104 __get_cpu_var(current_asid) = asid; 104 __this_cpu_write(current_asid, asid);
105 105
106 /* Clear cpu from the old mm, and set it in the new one. */ 106 /* Clear cpu from the old mm, and set it in the new one. */
107 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 107 cpumask_clear_cpu(cpu, mm_cpumask(prev));
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 637f2ffaa5f5..ba85765e1436 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth);
73 */ 73 */
74void tile_dev_intr(struct pt_regs *regs, int intnum) 74void tile_dev_intr(struct pt_regs *regs, int intnum)
75{ 75{
76 int depth = __get_cpu_var(irq_depth)++; 76 int depth = __this_cpu_inc_return(irq_depth);
77 unsigned long original_irqs; 77 unsigned long original_irqs;
78 unsigned long remaining_irqs; 78 unsigned long remaining_irqs;
79 struct pt_regs *old_regs; 79 struct pt_regs *old_regs;
@@ -120,7 +120,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
120 120
121 /* Count device irqs; Linux IPIs are counted elsewhere. */ 121 /* Count device irqs; Linux IPIs are counted elsewhere. */
122 if (irq != IRQ_RESCHEDULE) 122 if (irq != IRQ_RESCHEDULE)
123 __get_cpu_var(irq_stat).irq_dev_intr_count++; 123 __this_cpu_inc(irq_stat.irq_dev_intr_count);
124 124
125 generic_handle_irq(irq); 125 generic_handle_irq(irq);
126 } 126 }
@@ -130,10 +130,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
130 * including any that were reenabled during interrupt 130 * including any that were reenabled during interrupt
131 * handling. 131 * handling.
132 */ 132 */
133 if (depth == 0) 133 if (depth == 1)
134 unmask_irqs(~__get_cpu_var(irq_disable_mask)); 134 unmask_irqs(~__this_cpu_read(irq_disable_mask));
135 135
136 __get_cpu_var(irq_depth)--; 136 __this_cpu_dec(irq_depth);
137 137
138 /* 138 /*
139 * Track time spent against the current process again and 139 * Track time spent against the current process again and
@@ -151,7 +151,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
151static void tile_irq_chip_enable(struct irq_data *d) 151static void tile_irq_chip_enable(struct irq_data *d)
152{ 152{
153 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); 153 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
154 if (__get_cpu_var(irq_depth) == 0) 154 if (__this_cpu_read(irq_depth) == 0)
155 unmask_irqs(1UL << d->irq); 155 unmask_irqs(1UL << d->irq);
156 put_cpu_var(irq_disable_mask); 156 put_cpu_var(irq_disable_mask);
157} 157}
@@ -197,7 +197,7 @@ static void tile_irq_chip_ack(struct irq_data *d)
197 */ 197 */
198static void tile_irq_chip_eoi(struct irq_data *d) 198static void tile_irq_chip_eoi(struct irq_data *d)
199{ 199{
200 if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) 200 if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
201 unmask_irqs(1UL << d->irq); 201 unmask_irqs(1UL << d->irq);
202} 202}
203 203
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 7867266f9716..ac950be1318e 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state);
28void init_messaging(void) 28void init_messaging(void)
29{ 29{
30 /* Allocate storage for messages in kernel space */ 30 /* Allocate storage for messages in kernel space */
31 HV_MsgState *state = &__get_cpu_var(msg_state); 31 HV_MsgState *state = this_cpu_ptr(&msg_state);
32 int rc = hv_register_message_state(state); 32 int rc = hv_register_message_state(state);
33 if (rc != HV_OK) 33 if (rc != HV_OK)
34 panic("hv_register_message_state: error %d", rc); 34 panic("hv_register_message_state: error %d", rc);
@@ -96,7 +96,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
96 struct hv_driver_cb *cb = 96 struct hv_driver_cb *cb =
97 (struct hv_driver_cb *)him->intarg; 97 (struct hv_driver_cb *)him->intarg;
98 cb->callback(cb, him->intdata); 98 cb->callback(cb, him->intdata);
99 __get_cpu_var(irq_stat).irq_hv_msg_count++; 99 __this_cpu_inc(irq_stat.irq_hv_msg_count);
100 } 100 }
101 } 101 }
102 102
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 16ed58948757..0050cbc1d9de 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -64,7 +64,7 @@ early_param("idle", idle_setup);
64 64
65void arch_cpu_idle(void) 65void arch_cpu_idle(void)
66{ 66{
67 __get_cpu_var(irq_stat).idle_timestamp = jiffies; 67 __this_cpu_write(irq_stat.idle_timestamp, jiffies);
68 _cpu_idle(); 68 _cpu_idle();
69} 69}
70 70
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 112ababa9e55..b9736ded06f2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1218,7 +1218,8 @@ static void __init validate_hv(void)
1218 * various asid variables to their appropriate initial states. 1218 * various asid variables to their appropriate initial states.
1219 */ 1219 */
1220 asid_range = hv_inquire_asid(0); 1220 asid_range = hv_inquire_asid(0);
1221 __get_cpu_var(current_asid) = min_asid = asid_range.start; 1221 min_asid = asid_range.start;
1222 __this_cpu_write(current_asid, min_asid);
1222 max_asid = asid_range.start + asid_range.size - 1; 1223 max_asid = asid_range.start + asid_range.size - 1;
1223 1224
1224 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, 1225 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de07fa7d1315..6cb2ce31b5a2 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -740,7 +740,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
740 740
741void gx_singlestep_handle(struct pt_regs *regs, int fault_num) 741void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
742{ 742{
743 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); 743 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
744 struct thread_info *info = (void *)current_thread_info(); 744 struct thread_info *info = (void *)current_thread_info();
745 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP); 745 int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
746 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); 746 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
@@ -766,7 +766,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
766 766
767void single_step_once(struct pt_regs *regs) 767void single_step_once(struct pt_regs *regs)
768{ 768{
769 unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc); 769 unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
770 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K); 770 unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
771 771
772 *ss_pc = regs->pc; 772 *ss_pc = regs->pc;
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 01e8ab29f43a..3dbedb0174b3 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -188,7 +188,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
188/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ 188/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
189static irqreturn_t handle_reschedule_ipi(int irq, void *token) 189static irqreturn_t handle_reschedule_ipi(int irq, void *token)
190{ 190{
191 __get_cpu_var(irq_stat).irq_resched_count++; 191 __this_cpu_inc(irq_stat.irq_resched_count);
192 scheduler_ipi(); 192 scheduler_ipi();
193 193
194 return IRQ_HANDLED; 194 return IRQ_HANDLED;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 732e9d138661..0d59a1b60c74 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void)
41 int cpu = smp_processor_id(); 41 int cpu = smp_processor_id();
42 set_cpu_online(cpu, 1); 42 set_cpu_online(cpu, 1);
43 set_cpu_present(cpu, 1); 43 set_cpu_present(cpu, 1);
44 __get_cpu_var(cpu_state) = CPU_ONLINE; 44 __this_cpu_write(cpu_state, CPU_ONLINE);
45 45
46 init_messaging(); 46 init_messaging();
47} 47}
@@ -158,7 +158,7 @@ static void start_secondary(void)
158 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ 158 /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
159 159
160 /* Initialize the current asid for our first page table. */ 160 /* Initialize the current asid for our first page table. */
161 __get_cpu_var(current_asid) = min_asid; 161 __this_cpu_write(current_asid, min_asid);
162 162
163 /* Set up this thread as another owner of the init_mm */ 163 /* Set up this thread as another owner of the init_mm */
164 atomic_inc(&init_mm.mm_count); 164 atomic_inc(&init_mm.mm_count);
@@ -201,7 +201,7 @@ void online_secondary(void)
201 notify_cpu_starting(smp_processor_id()); 201 notify_cpu_starting(smp_processor_id());
202 202
203 set_cpu_online(smp_processor_id(), 1); 203 set_cpu_online(smp_processor_id(), 1);
204 __get_cpu_var(cpu_state) = CPU_ONLINE; 204 __this_cpu_write(cpu_state, CPU_ONLINE);
205 205
206 /* Set up tile-specific state for this cpu. */ 206 /* Set up tile-specific state for this cpu. */
207 setup_cpu(0); 207 setup_cpu(0);
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index d8fbc289e680..ab1c9fe2aa7f 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
162 162
163void setup_tile_timer(void) 163void setup_tile_timer(void)
164{ 164{
165 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 165 struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
166 166
167 /* Fill in fields that are speed-specific. */ 167 /* Fill in fields that are speed-specific. */
168 clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC); 168 clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
@@ -182,7 +182,7 @@ void setup_tile_timer(void)
182void do_timer_interrupt(struct pt_regs *regs, int fault_num) 182void do_timer_interrupt(struct pt_regs *regs, int fault_num)
183{ 183{
184 struct pt_regs *old_regs = set_irq_regs(regs); 184 struct pt_regs *old_regs = set_irq_regs(regs);
185 struct clock_event_device *evt = &__get_cpu_var(tile_timer); 185 struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
186 186
187 /* 187 /*
188 * Mask the timer interrupt here, since we are a oneshot timer 188 * Mask the timer interrupt here, since we are a oneshot timer
@@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
194 irq_enter(); 194 irq_enter();
195 195
196 /* Track interrupt count. */ 196 /* Track interrupt count. */
197 __get_cpu_var(irq_stat).irq_timer_count++; 197 __this_cpu_inc(irq_stat.irq_timer_count);
198 198
199 /* Call the generic timer handler */ 199 /* Call the generic timer handler */
200 evt->event_handler(evt); 200 evt->event_handler(evt);
@@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs)
235 * We do not have to disable preemption here as each core has the same 235 * We do not have to disable preemption here as each core has the same
236 * clock frequency. 236 * clock frequency.
237 */ 237 */
238 struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer); 238 struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
239 239
240 /* 240 /*
241 * as in clocksource.h and x86's timer.h, we split the calculation 241 * as in clocksource.h and x86's timer.h, we split the calculation
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 0dc218294770..6aa2f2625447 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -103,7 +103,7 @@ static void kmap_atomic_register(struct page *page, int type,
103 spin_lock(&amp_lock); 103 spin_lock(&amp_lock);
104 104
105 /* With interrupts disabled, now fill in the per-cpu info. */ 105 /* With interrupts disabled, now fill in the per-cpu info. */
106 amp = &__get_cpu_var(amps).per_type[type]; 106 amp = this_cpu_ptr(&amps.per_type[type]);
107 amp->page = page; 107 amp->page = page;
108 amp->cpu = smp_processor_id(); 108 amp->cpu = smp_processor_id();
109 amp->va = va; 109 amp->va = va;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index bfb3127b4df9..f46a152b09e6 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -593,14 +593,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
593 interrupt_mask_set_mask(-1ULL); 593 interrupt_mask_set_mask(-1ULL);
594 rc = flush_and_install_context(__pa(pgtables), 594 rc = flush_and_install_context(__pa(pgtables),
595 init_pgprot((unsigned long)pgtables), 595 init_pgprot((unsigned long)pgtables),
596 __get_cpu_var(current_asid), 596 __this_cpu_read(current_asid),
597 cpumask_bits(my_cpu_mask)); 597 cpumask_bits(my_cpu_mask));
598 interrupt_mask_restore_mask(irqmask); 598 interrupt_mask_restore_mask(irqmask);
599 BUG_ON(rc != 0); 599 BUG_ON(rc != 0);
600 600
601 /* Copy the page table back to the normal swapper_pg_dir. */ 601 /* Copy the page table back to the normal swapper_pg_dir. */
602 memcpy(pgd_base, pgtables, sizeof(pgtables)); 602 memcpy(pgd_base, pgtables, sizeof(pgtables));
603 __install_page_table(pgd_base, __get_cpu_var(current_asid), 603 __install_page_table(pgd_base, __this_cpu_read(current_asid),
604 swapper_pgprot); 604 swapper_pgprot);
605 605
606 /* 606 /*