aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/time.c24
1 files changed, 15 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2c8564d54e4d..0a8a820672f4 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -222,19 +222,28 @@ struct cpu_purr_data {
222 int initialized; /* thread is running */ 222 int initialized; /* thread is running */
223 u64 tb; /* last TB value read */ 223 u64 tb; /* last TB value read */
224 u64 purr; /* last PURR value read */ 224 u64 purr; /* last PURR value read */
225 spinlock_t lock;
226}; 225};
227 226
227/*
228 * Each entry in the cpu_purr_data array is manipulated only by its
229 * "owner" cpu -- usually in the timer interrupt but also occasionally
230 * in process context for cpu online. As long as cpus do not touch
231 * each others' cpu_purr_data, disabling local interrupts is
232 * sufficient to serialize accesses.
233 */
228static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); 234static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
229 235
230static void snapshot_tb_and_purr(void *data) 236static void snapshot_tb_and_purr(void *data)
231{ 237{
238 unsigned long flags;
232 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); 239 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
233 240
241 local_irq_save(flags);
234 p->tb = mftb(); 242 p->tb = mftb();
235 p->purr = mfspr(SPRN_PURR); 243 p->purr = mfspr(SPRN_PURR);
236 wmb(); 244 wmb();
237 p->initialized = 1; 245 p->initialized = 1;
246 local_irq_restore(flags);
238} 247}
239 248
240/* 249/*
@@ -242,15 +251,14 @@ static void snapshot_tb_and_purr(void *data)
242 */ 251 */
243void snapshot_timebases(void) 252void snapshot_timebases(void)
244{ 253{
245 int cpu;
246
247 if (!cpu_has_feature(CPU_FTR_PURR)) 254 if (!cpu_has_feature(CPU_FTR_PURR))
248 return; 255 return;
249 for_each_possible_cpu(cpu)
250 spin_lock_init(&per_cpu(cpu_purr_data, cpu).lock);
251 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); 256 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
252} 257}
253 258
259/*
260 * Must be called with interrupts disabled.
261 */
254void calculate_steal_time(void) 262void calculate_steal_time(void)
255{ 263{
256 u64 tb, purr; 264 u64 tb, purr;
@@ -262,7 +270,6 @@ void calculate_steal_time(void)
262 pme = &per_cpu(cpu_purr_data, smp_processor_id()); 270 pme = &per_cpu(cpu_purr_data, smp_processor_id());
263 if (!pme->initialized) 271 if (!pme->initialized)
264 return; /* this can happen in early boot */ 272 return; /* this can happen in early boot */
265 spin_lock(&pme->lock);
266 tb = mftb(); 273 tb = mftb();
267 purr = mfspr(SPRN_PURR); 274 purr = mfspr(SPRN_PURR);
268 stolen = (tb - pme->tb) - (purr - pme->purr); 275 stolen = (tb - pme->tb) - (purr - pme->purr);
@@ -270,7 +277,6 @@ void calculate_steal_time(void)
270 account_steal_time(current, stolen); 277 account_steal_time(current, stolen);
271 pme->tb = tb; 278 pme->tb = tb;
272 pme->purr = purr; 279 pme->purr = purr;
273 spin_unlock(&pme->lock);
274} 280}
275 281
276/* 282/*
@@ -284,12 +290,12 @@ static void snapshot_purr(void)
284 290
285 if (!cpu_has_feature(CPU_FTR_PURR)) 291 if (!cpu_has_feature(CPU_FTR_PURR))
286 return; 292 return;
293 local_irq_save(flags);
287 pme = &per_cpu(cpu_purr_data, smp_processor_id()); 294 pme = &per_cpu(cpu_purr_data, smp_processor_id());
288 spin_lock_irqsave(&pme->lock, flags);
289 pme->tb = mftb(); 295 pme->tb = mftb();
290 pme->purr = mfspr(SPRN_PURR); 296 pme->purr = mfspr(SPRN_PURR);
291 pme->initialized = 1; 297 pme->initialized = 1;
292 spin_unlock_irqrestore(&pme->lock, flags); 298 local_irq_restore(flags);
293} 299}
294 300
295#endif /* CONFIG_PPC_SPLPAR */ 301#endif /* CONFIG_PPC_SPLPAR */