aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r--arch/powerpc/kernel/time.c81
1 files changed, 54 insertions, 27 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6b4d01d1ccf0..4e7759c8ca30 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -167,7 +167,15 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
167 167
168cputime_t cputime_one_jiffy; 168cputime_t cputime_one_jiffy;
169 169
170#ifdef CONFIG_PPC_SPLPAR
170void (*dtl_consumer)(struct dtl_entry *, u64); 171void (*dtl_consumer)(struct dtl_entry *, u64);
172#endif
173
174#ifdef CONFIG_PPC64
175#define get_accounting(tsk) (&get_paca()->accounting)
176#else
177#define get_accounting(tsk) (&task_thread_info(tsk)->accounting)
178#endif
171 179
172static void calc_cputime_factors(void) 180static void calc_cputime_factors(void)
173{ 181{
@@ -187,7 +195,7 @@ static void calc_cputime_factors(void)
187 * Read the SPURR on systems that have it, otherwise the PURR, 195 * Read the SPURR on systems that have it, otherwise the PURR,
188 * or if that doesn't exist return the timebase value passed in. 196 * or if that doesn't exist return the timebase value passed in.
189 */ 197 */
190static u64 read_spurr(u64 tb) 198static unsigned long read_spurr(unsigned long tb)
191{ 199{
192 if (cpu_has_feature(CPU_FTR_SPURR)) 200 if (cpu_has_feature(CPU_FTR_SPURR))
193 return mfspr(SPRN_SPURR); 201 return mfspr(SPRN_SPURR);
@@ -250,8 +258,8 @@ static u64 scan_dispatch_log(u64 stop_tb)
250void accumulate_stolen_time(void) 258void accumulate_stolen_time(void)
251{ 259{
252 u64 sst, ust; 260 u64 sst, ust;
253
254 u8 save_soft_enabled = local_paca->soft_enabled; 261 u8 save_soft_enabled = local_paca->soft_enabled;
262 struct cpu_accounting_data *acct = &local_paca->accounting;
255 263
256 /* We are called early in the exception entry, before 264 /* We are called early in the exception entry, before
257 * soft/hard_enabled are sync'ed to the expected state 265 * soft/hard_enabled are sync'ed to the expected state
@@ -261,10 +269,10 @@ void accumulate_stolen_time(void)
261 */ 269 */
262 local_paca->soft_enabled = 0; 270 local_paca->soft_enabled = 0;
263 271
264 sst = scan_dispatch_log(local_paca->starttime_user); 272 sst = scan_dispatch_log(acct->starttime_user);
265 ust = scan_dispatch_log(local_paca->starttime); 273 ust = scan_dispatch_log(acct->starttime);
266 local_paca->system_time -= sst; 274 acct->system_time -= sst;
267 local_paca->user_time -= ust; 275 acct->user_time -= ust;
268 local_paca->stolen_time += ust + sst; 276 local_paca->stolen_time += ust + sst;
269 277
270 local_paca->soft_enabled = save_soft_enabled; 278 local_paca->soft_enabled = save_soft_enabled;
@@ -276,7 +284,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
276 284
277 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) { 285 if (get_paca()->dtl_ridx != be64_to_cpu(get_lppaca()->dtl_idx)) {
278 stolen = scan_dispatch_log(stop_tb); 286 stolen = scan_dispatch_log(stop_tb);
279 get_paca()->system_time -= stolen; 287 get_paca()->accounting.system_time -= stolen;
280 } 288 }
281 289
282 stolen += get_paca()->stolen_time; 290 stolen += get_paca()->stolen_time;
@@ -296,27 +304,29 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
296 * Account time for a transition between system, hard irq 304 * Account time for a transition between system, hard irq
297 * or soft irq state. 305 * or soft irq state.
298 */ 306 */
299static u64 vtime_delta(struct task_struct *tsk, 307static unsigned long vtime_delta(struct task_struct *tsk,
300 u64 *sys_scaled, u64 *stolen) 308 unsigned long *sys_scaled,
309 unsigned long *stolen)
301{ 310{
302 u64 now, nowscaled, deltascaled; 311 unsigned long now, nowscaled, deltascaled;
303 u64 udelta, delta, user_scaled; 312 unsigned long udelta, delta, user_scaled;
313 struct cpu_accounting_data *acct = get_accounting(tsk);
304 314
305 WARN_ON_ONCE(!irqs_disabled()); 315 WARN_ON_ONCE(!irqs_disabled());
306 316
307 now = mftb(); 317 now = mftb();
308 nowscaled = read_spurr(now); 318 nowscaled = read_spurr(now);
309 get_paca()->system_time += now - get_paca()->starttime; 319 acct->system_time += now - acct->starttime;
310 get_paca()->starttime = now; 320 acct->starttime = now;
311 deltascaled = nowscaled - get_paca()->startspurr; 321 deltascaled = nowscaled - acct->startspurr;
312 get_paca()->startspurr = nowscaled; 322 acct->startspurr = nowscaled;
313 323
314 *stolen = calculate_stolen_time(now); 324 *stolen = calculate_stolen_time(now);
315 325
316 delta = get_paca()->system_time; 326 delta = acct->system_time;
317 get_paca()->system_time = 0; 327 acct->system_time = 0;
318 udelta = get_paca()->user_time - get_paca()->utime_sspurr; 328 udelta = acct->user_time - acct->utime_sspurr;
319 get_paca()->utime_sspurr = get_paca()->user_time; 329 acct->utime_sspurr = acct->user_time;
320 330
321 /* 331 /*
322 * Because we don't read the SPURR on every kernel entry/exit, 332 * Because we don't read the SPURR on every kernel entry/exit,
@@ -338,14 +348,14 @@ static u64 vtime_delta(struct task_struct *tsk,
338 *sys_scaled = deltascaled; 348 *sys_scaled = deltascaled;
339 } 349 }
340 } 350 }
341 get_paca()->user_time_scaled += user_scaled; 351 acct->user_time_scaled += user_scaled;
342 352
343 return delta; 353 return delta;
344} 354}
345 355
346void vtime_account_system(struct task_struct *tsk) 356void vtime_account_system(struct task_struct *tsk)
347{ 357{
348 u64 delta, sys_scaled, stolen; 358 unsigned long delta, sys_scaled, stolen;
349 359
350 delta = vtime_delta(tsk, &sys_scaled, &stolen); 360 delta = vtime_delta(tsk, &sys_scaled, &stolen);
351 account_system_time(tsk, 0, delta, sys_scaled); 361 account_system_time(tsk, 0, delta, sys_scaled);
@@ -356,7 +366,7 @@ EXPORT_SYMBOL_GPL(vtime_account_system);
356 366
357void vtime_account_idle(struct task_struct *tsk) 367void vtime_account_idle(struct task_struct *tsk)
358{ 368{
359 u64 delta, sys_scaled, stolen; 369 unsigned long delta, sys_scaled, stolen;
360 370
361 delta = vtime_delta(tsk, &sys_scaled, &stolen); 371 delta = vtime_delta(tsk, &sys_scaled, &stolen);
362 account_idle_time(delta + stolen); 372 account_idle_time(delta + stolen);
@@ -374,15 +384,32 @@ void vtime_account_idle(struct task_struct *tsk)
374void vtime_account_user(struct task_struct *tsk) 384void vtime_account_user(struct task_struct *tsk)
375{ 385{
376 cputime_t utime, utimescaled; 386 cputime_t utime, utimescaled;
387 struct cpu_accounting_data *acct = get_accounting(tsk);
377 388
378 utime = get_paca()->user_time; 389 utime = acct->user_time;
379 utimescaled = get_paca()->user_time_scaled; 390 utimescaled = acct->user_time_scaled;
380 get_paca()->user_time = 0; 391 acct->user_time = 0;
381 get_paca()->user_time_scaled = 0; 392 acct->user_time_scaled = 0;
382 get_paca()->utime_sspurr = 0; 393 acct->utime_sspurr = 0;
383 account_user_time(tsk, utime, utimescaled); 394 account_user_time(tsk, utime, utimescaled);
384} 395}
385 396
397#ifdef CONFIG_PPC32
398/*
399 * Called from the context switch with interrupts disabled, to charge all
400 * accumulated times to the current process, and to prepare accounting on
401 * the next process.
402 */
403void arch_vtime_task_switch(struct task_struct *prev)
404{
405 struct cpu_accounting_data *acct = get_accounting(current);
406
407 acct->starttime = get_accounting(prev)->starttime;
408 acct->system_time = 0;
409 acct->user_time = 0;
410}
411#endif /* CONFIG_PPC32 */
412
386#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 413#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
387#define calc_cputime_factors() 414#define calc_cputime_factors()
388#endif 415#endif