aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venki@google.com>2010-10-04 20:03:19 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-18 14:52:24 -0400
commitb52bfee445d315549d41eacf2fa7c156e7d153d5 (patch)
tree740f3aa24e2afad42772a662ceb460c555003c0f /kernel/sched.c
parent6cdd5199daf0cb7b0fcc8dca941af08492612887 (diff)
sched: Add IRQ_TIME_ACCOUNTING, finer accounting of irq time
s390/powerpc/ia64 have support for CONFIG_VIRT_CPU_ACCOUNTING which does the fine granularity accounting of user, system, hardirq, softirq times. Adding that option on archs like x86 will be challenging however, given the state of TSC reliability on various platforms and also the overhead it will add in syscall entry exit. Instead, add a lighter variant that only does finer accounting of hardirq and softirq times, providing precise irq times (instead of timer tick based samples). This accounting is added with a new config option CONFIG_IRQ_TIME_ACCOUNTING so that there won't be any overhead for users not interested in paying the perf penalty. This accounting is based on sched_clock, with the code being generic. So, other archs may find it useful as well. This patch just adds the core logic and does not enable this logic yet. Signed-off-by: Venkatesh Pallipadi <venki@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1286237003-12406-5-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c49
1 files changed, 49 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 089be8adb074..9b302e355791 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1908,6 +1908,55 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
1908 dec_nr_running(rq); 1908 dec_nr_running(rq);
1909} 1909}
1910 1910
1911#ifdef CONFIG_IRQ_TIME_ACCOUNTING
1912
1913static DEFINE_PER_CPU(u64, cpu_hardirq_time);
1914static DEFINE_PER_CPU(u64, cpu_softirq_time);
1915
1916static DEFINE_PER_CPU(u64, irq_start_time);
1917static int sched_clock_irqtime;
1918
1919void enable_sched_clock_irqtime(void)
1920{
1921 sched_clock_irqtime = 1;
1922}
1923
1924void disable_sched_clock_irqtime(void)
1925{
1926 sched_clock_irqtime = 0;
1927}
1928
1929void account_system_vtime(struct task_struct *curr)
1930{
1931 unsigned long flags;
1932 int cpu;
1933 u64 now, delta;
1934
1935 if (!sched_clock_irqtime)
1936 return;
1937
1938 local_irq_save(flags);
1939
1940 now = sched_clock();
1941 cpu = smp_processor_id();
1942 delta = now - per_cpu(irq_start_time, cpu);
1943 per_cpu(irq_start_time, cpu) = now;
1944 /*
1945 * We do not account for softirq time from ksoftirqd here.
1946 * We want to continue accounting softirq time to ksoftirqd thread
1947 * in that case, so as not to confuse scheduler with a special task
1948 * that do not consume any time, but still wants to run.
1949 */
1950 if (hardirq_count())
1951 per_cpu(cpu_hardirq_time, cpu) += delta;
1952 else if (in_serving_softirq() && !(curr->flags & PF_KSOFTIRQD))
1953 per_cpu(cpu_softirq_time, cpu) += delta;
1954
1955 local_irq_restore(flags);
1956}
1957
1958#endif
1959
1911#include "sched_idletask.c" 1960#include "sched_idletask.c"
1912#include "sched_fair.c" 1961#include "sched_fair.c"
1913#include "sched_rt.c" 1962#include "sched_rt.c"