aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/multicalls.h2
-rw-r--r--arch/x86/xen/spinlock.c8
-rw-r--r--arch/x86/xen/time.c8
4 files changed, 11 insertions, 11 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 44dcad43989d..aa8c89ae54cf 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -574,8 +574,8 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
574 574
575 preempt_disable(); 575 preempt_disable();
576 576
577 start = __get_cpu_var(idt_desc).address; 577 start = __this_cpu_read(idt_desc.address);
578 end = start + __get_cpu_var(idt_desc).size + 1; 578 end = start + __this_cpu_read(idt_desc.size) + 1;
579 579
580 xen_mc_flush(); 580 xen_mc_flush();
581 581
diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h
index 9e565da5d1f7..4ec8035e3216 100644
--- a/arch/x86/xen/multicalls.h
+++ b/arch/x86/xen/multicalls.h
@@ -22,7 +22,7 @@ static inline void xen_mc_batch(void)
22 unsigned long flags; 22 unsigned long flags;
23 /* need to disable interrupts until this entry is complete */ 23 /* need to disable interrupts until this entry is complete */
24 local_irq_save(flags); 24 local_irq_save(flags);
25 __get_cpu_var(xen_mc_irq_flags) = flags; 25 __this_cpu_write(xen_mc_irq_flags, flags);
26} 26}
27 27
28static inline struct multicall_space xen_mc_entry(size_t args) 28static inline struct multicall_space xen_mc_entry(size_t args)
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 23e061b9327b..cc9b1e182fcf 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -159,8 +159,8 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
159{ 159{
160 struct xen_spinlock *prev; 160 struct xen_spinlock *prev;
161 161
162 prev = __get_cpu_var(lock_spinners); 162 prev = __this_cpu_read(lock_spinners);
163 __get_cpu_var(lock_spinners) = xl; 163 __this_cpu_write(lock_spinners, xl);
164 164
165 wmb(); /* set lock of interest before count */ 165 wmb(); /* set lock of interest before count */
166 166
@@ -179,14 +179,14 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
179 asm(LOCK_PREFIX " decw %0" 179 asm(LOCK_PREFIX " decw %0"
180 : "+m" (xl->spinners) : : "memory"); 180 : "+m" (xl->spinners) : : "memory");
181 wmb(); /* decrement count before restoring lock */ 181 wmb(); /* decrement count before restoring lock */
182 __get_cpu_var(lock_spinners) = prev; 182 __this_cpu_write(lock_spinners, prev);
183} 183}
184 184
185static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) 185static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
186{ 186{
187 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 187 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
188 struct xen_spinlock *prev; 188 struct xen_spinlock *prev;
189 int irq = __get_cpu_var(lock_kicker_irq); 189 int irq = __this_cpu_read(lock_kicker_irq);
190 int ret; 190 int ret;
191 u64 start; 191 u64 start;
192 192
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index b2bb5aa3b054..ef8930f51b09 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -135,24 +135,24 @@ static void do_stolen_accounting(void)
135 135
136 /* Add the appropriate number of ticks of stolen time, 136 /* Add the appropriate number of ticks of stolen time,
137 including any left-overs from last time. */ 137 including any left-overs from last time. */
138 stolen = runnable + offline + __get_cpu_var(xen_residual_stolen); 138 stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
139 139
140 if (stolen < 0) 140 if (stolen < 0)
141 stolen = 0; 141 stolen = 0;
142 142
143 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); 143 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
144 __get_cpu_var(xen_residual_stolen) = stolen; 144 __this_cpu_write(xen_residual_stolen, stolen);
145 account_steal_ticks(ticks); 145 account_steal_ticks(ticks);
146 146
147 /* Add the appropriate number of ticks of blocked time, 147 /* Add the appropriate number of ticks of blocked time,
148 including any left-overs from last time. */ 148 including any left-overs from last time. */
149 blocked += __get_cpu_var(xen_residual_blocked); 149 blocked += __this_cpu_read(xen_residual_blocked);
150 150
151 if (blocked < 0) 151 if (blocked < 0)
152 blocked = 0; 152 blocked = 0;
153 153
154 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); 154 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
155 __get_cpu_var(xen_residual_blocked) = blocked; 155 __this_cpu_write(xen_residual_blocked, blocked);
156 account_idle_ticks(ticks); 156 account_idle_ticks(ticks);
157} 157}
158 158