aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-07-25 01:56:04 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-01-27 13:23:27 -0500
commitabf917cd91cbb73952758f9741e2fa65002a48ee (patch)
tree5f975b87615dcaed9c98bc74b4548d568b92dcbc /kernel
parentae8dda5c473bf1a85913942adcaac449e5754bf3 (diff)
cputime: Generic on-demand virtual cputime accounting
If we want to stop the tick further idle, we need to be able to account the cputime without using the tick. Virtual based cputime accounting solves that problem by hooking into kernel/user boundaries. However implementing CONFIG_VIRT_CPU_ACCOUNTING require low level hooks and involves more overhead. But we already have a generic context tracking subsystem that is required for RCU needs by archs which plan to shut down the tick outside idle. This patch implements a generic virtual based cputime accounting that relies on these generic kernel/user hooks. There are some upsides of doing this: - This requires no arch code to implement CONFIG_VIRT_CPU_ACCOUNTING if context tracking is already built (already necessary for RCU in full tickless mode). - We can rely on the generic context tracking subsystem to dynamically (de)activate the hooks, so that we can switch anytime between virtual and tick based accounting. This way we don't have the overhead of the virtual accounting when the tick is running periodically. And one downside: - There is probably more overhead than a native virtual based cputime accounting. But this relies on hooks that are already set anyway. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Namhyung Kim <namhyung.kim@lge.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/context_tracking.c6
-rw-r--r--kernel/sched/cputime.c61
2 files changed, 61 insertions, 6 deletions
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 54f471e536dc..9002e92e6372 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -30,8 +30,9 @@ void user_enter(void)
30 local_irq_save(flags); 30 local_irq_save(flags);
31 if (__this_cpu_read(context_tracking.active) && 31 if (__this_cpu_read(context_tracking.active) &&
32 __this_cpu_read(context_tracking.state) != IN_USER) { 32 __this_cpu_read(context_tracking.state) != IN_USER) {
33 __this_cpu_write(context_tracking.state, IN_USER); 33 vtime_user_enter(current);
34 rcu_user_enter(); 34 rcu_user_enter();
35 __this_cpu_write(context_tracking.state, IN_USER);
35 } 36 }
36 local_irq_restore(flags); 37 local_irq_restore(flags);
37} 38}
@@ -53,8 +54,9 @@ void user_exit(void)
53 54
54 local_irq_save(flags); 55 local_irq_save(flags);
55 if (__this_cpu_read(context_tracking.state) == IN_USER) { 56 if (__this_cpu_read(context_tracking.state) == IN_USER) {
56 __this_cpu_write(context_tracking.state, IN_KERNEL);
57 rcu_user_exit(); 57 rcu_user_exit();
58 vtime_user_exit(current);
59 __this_cpu_write(context_tracking.state, IN_KERNEL);
58 } 60 }
59 local_irq_restore(flags); 61 local_irq_restore(flags);
60} 62}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 5849448b981e..1c964eced92c 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -3,6 +3,7 @@
3#include <linux/tsacct_kern.h> 3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h> 4#include <linux/kernel_stat.h>
5#include <linux/static_key.h> 5#include <linux/static_key.h>
6#include <linux/context_tracking.h>
6#include "sched.h" 7#include "sched.h"
7 8
8 9
@@ -479,7 +480,9 @@ void vtime_task_switch(struct task_struct *prev)
479 else 480 else
480 vtime_account_system(prev); 481 vtime_account_system(prev);
481 482
483#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
482 vtime_account_user(prev); 484 vtime_account_user(prev);
485#endif
483 arch_vtime_task_switch(prev); 486 arch_vtime_task_switch(prev);
484} 487}
485#endif 488#endif
@@ -495,10 +498,24 @@ void vtime_task_switch(struct task_struct *prev)
495#ifndef __ARCH_HAS_VTIME_ACCOUNT 498#ifndef __ARCH_HAS_VTIME_ACCOUNT
496void vtime_account(struct task_struct *tsk) 499void vtime_account(struct task_struct *tsk)
497{ 500{
498 if (in_interrupt() || !is_idle_task(tsk)) 501 if (!in_interrupt()) {
499 vtime_account_system(tsk); 502 /*
500 else 503 * If we interrupted user, context_tracking_in_user()
501 vtime_account_idle(tsk); 504 * is 1 because the context tracking don't hook
505 * on irq entry/exit. This way we know if
506 * we need to flush user time on kernel entry.
507 */
508 if (context_tracking_in_user()) {
509 vtime_account_user(tsk);
510 return;
511 }
512
513 if (is_idle_task(tsk)) {
514 vtime_account_idle(tsk);
515 return;
516 }
517 }
518 vtime_account_system(tsk);
502} 519}
503EXPORT_SYMBOL_GPL(vtime_account); 520EXPORT_SYMBOL_GPL(vtime_account);
504#endif /* __ARCH_HAS_VTIME_ACCOUNT */ 521#endif /* __ARCH_HAS_VTIME_ACCOUNT */
@@ -583,3 +600,39 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
583 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); 600 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
584} 601}
585#endif 602#endif
603
604#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
605static DEFINE_PER_CPU(unsigned long long, cputime_snap);
606
607static cputime_t get_vtime_delta(void)
608{
609 unsigned long long delta;
610
611 delta = sched_clock() - __this_cpu_read(cputime_snap);
612 __this_cpu_add(cputime_snap, delta);
613
614 /* CHECKME: always safe to convert nsecs to cputime? */
615 return nsecs_to_cputime(delta);
616}
617
618void vtime_account_system(struct task_struct *tsk)
619{
620 cputime_t delta_cpu = get_vtime_delta();
621
622 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
623}
624
625void vtime_account_user(struct task_struct *tsk)
626{
627 cputime_t delta_cpu = get_vtime_delta();
628
629 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
630}
631
632void vtime_account_idle(struct task_struct *tsk)
633{
634 cputime_t delta_cpu = get_vtime_delta();
635
636 account_idle_time(delta_cpu);
637}
638#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */