aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2007-10-18 06:06:34 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-18 17:37:28 -0400
commitc66f08be7e3ad0a28bcd9a0aef766fdf08ea0ec6 (patch)
treed147c0a43a66973014d924a6020388a249c509a7
parent898eb71cb17644964c5895fb190e79e3d0c49679 (diff)
Add scaled time to taskstats based process accounting
This adds items to the taststats struct to account for user and system time based on scaling the CPU frequency and instruction issue rates. Adds account_(user|system)_time_scaled callbacks which architectures can use to account for time using this mechanism. Signed-off-by: Michael Neuling <mikey@neuling.org> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Jay Lan <jlan@engr.sgi.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/taskstats.h11
-rw-r--r--kernel/delayacct.c6
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/sched.c21
-rw-r--r--kernel/timer.c7
-rw-r--r--kernel/tsacct.c4
8 files changed, 50 insertions, 5 deletions
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 12bf44f083f5..e8ffce898bf9 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -53,7 +53,9 @@ static inline int kstat_irqs(int irq)
53} 53}
54 54
55extern void account_user_time(struct task_struct *, cputime_t); 55extern void account_user_time(struct task_struct *, cputime_t);
56extern void account_user_time_scaled(struct task_struct *, cputime_t);
56extern void account_system_time(struct task_struct *, int, cputime_t); 57extern void account_system_time(struct task_struct *, int, cputime_t);
58extern void account_system_time_scaled(struct task_struct *, cputime_t);
57extern void account_steal_time(struct task_struct *, cputime_t); 59extern void account_steal_time(struct task_struct *, cputime_t);
58 60
59#endif /* _LINUX_KERNEL_STAT_H */ 61#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 269b234609b8..7accc04e23ab 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -991,7 +991,7 @@ struct task_struct {
991 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ 991 int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
992 992
993 unsigned int rt_priority; 993 unsigned int rt_priority;
994 cputime_t utime, stime; 994 cputime_t utime, stime, utimescaled, stimescaled;
995 cputime_t gtime; 995 cputime_t gtime;
996 unsigned long nvcsw, nivcsw; /* context switch counts */ 996 unsigned long nvcsw, nivcsw; /* context switch counts */
997 struct timespec start_time; /* monotonic time */ 997 struct timespec start_time; /* monotonic time */
diff --git a/include/linux/taskstats.h b/include/linux/taskstats.h
index dce1ed204972..92bfd1c153fb 100644
--- a/include/linux/taskstats.h
+++ b/include/linux/taskstats.h
@@ -31,7 +31,7 @@
31 */ 31 */
32 32
33 33
34#define TASKSTATS_VERSION 5 34#define TASKSTATS_VERSION 6
35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN 35#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
36 * in linux/sched.h */ 36 * in linux/sched.h */
37 37
@@ -85,9 +85,12 @@ struct taskstats {
85 * On some architectures, value will adjust for cpu time stolen 85 * On some architectures, value will adjust for cpu time stolen
86 * from the kernel in involuntary waits due to virtualization. 86 * from the kernel in involuntary waits due to virtualization.
87 * Value is cumulative, in nanoseconds, without a corresponding count 87 * Value is cumulative, in nanoseconds, without a corresponding count
88 * and wraps around to zero silently on overflow 88 * and wraps around to zero silently on overflow. The
89 * _scaled_ version accounts for cpus which can scale the
90 * number of instructions executed each cycle.
89 */ 91 */
90 __u64 cpu_run_real_total; 92 __u64 cpu_run_real_total;
93 __u64 cpu_scaled_run_real_total;
91 94
92 /* cpu "virtual" running time 95 /* cpu "virtual" running time
93 * Uses time intervals seen by the kernel i.e. no adjustment 96 * Uses time intervals seen by the kernel i.e. no adjustment
@@ -142,6 +145,10 @@ struct taskstats {
142 __u64 write_char; /* bytes written */ 145 __u64 write_char; /* bytes written */
143 __u64 read_syscalls; /* read syscalls */ 146 __u64 read_syscalls; /* read syscalls */
144 __u64 write_syscalls; /* write syscalls */ 147 __u64 write_syscalls; /* write syscalls */
148
149 /* time accounting for SMT machines */
150 __u64 ac_utimescaled; /* utime scaled on frequency etc */
151 __u64 ac_stimescaled; /* stime scaled on frequency etc */
145 /* Extended accounting fields end */ 152 /* Extended accounting fields end */
146 153
147#define TASKSTATS_HAS_IO_ACCOUNTING 154#define TASKSTATS_HAS_IO_ACCOUNTING
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 09e9574eeb26..10e43fd8b721 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -115,6 +115,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
115 tmp += timespec_to_ns(&ts); 115 tmp += timespec_to_ns(&ts);
116 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 116 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
117 117
118 tmp = (s64)d->cpu_scaled_run_real_total;
119 cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
120 tmp += timespec_to_ns(&ts);
121 d->cpu_scaled_run_real_total =
122 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
123
118 /* 124 /*
119 * No locking available for sched_info (and too expensive to add one) 125 * No locking available for sched_info (and too expensive to add one)
120 * Mitigate by taking snapshot of values 126 * Mitigate by taking snapshot of values
diff --git a/kernel/fork.c b/kernel/fork.c
index 1232aac6a1cd..2ce28f165e31 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1059,6 +1059,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1059 p->utime = cputime_zero; 1059 p->utime = cputime_zero;
1060 p->stime = cputime_zero; 1060 p->stime = cputime_zero;
1061 p->gtime = cputime_zero; 1061 p->gtime = cputime_zero;
1062 p->utimescaled = cputime_zero;
1063 p->stimescaled = cputime_zero;
1062 1064
1063#ifdef CONFIG_TASK_XACCT 1065#ifdef CONFIG_TASK_XACCT
1064 p->rchar = 0; /* I/O counter: bytes read */ 1066 p->rchar = 0; /* I/O counter: bytes read */
diff --git a/kernel/sched.c b/kernel/sched.c
index 92721d1534b8..12534421d7b5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3334,6 +3334,16 @@ void account_guest_time(struct task_struct *p, cputime_t cputime)
3334} 3334}
3335 3335
3336/* 3336/*
3337 * Account scaled user cpu time to a process.
3338 * @p: the process that the cpu time gets accounted to
3339 * @cputime: the cpu time spent in user space since the last update
3340 */
3341void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
3342{
3343 p->utimescaled = cputime_add(p->utimescaled, cputime);
3344}
3345
3346/*
3337 * Account system cpu time to a process. 3347 * Account system cpu time to a process.
3338 * @p: the process that the cpu time gets accounted to 3348 * @p: the process that the cpu time gets accounted to
3339 * @hardirq_offset: the offset to subtract from hardirq_count() 3349 * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -3371,6 +3381,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3371} 3381}
3372 3382
3373/* 3383/*
3384 * Account scaled system cpu time to a process.
3385 * @p: the process that the cpu time gets accounted to
3386 * @hardirq_offset: the offset to subtract from hardirq_count()
3387 * @cputime: the cpu time spent in kernel space since the last update
3388 */
3389void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
3390{
3391 p->stimescaled = cputime_add(p->stimescaled, cputime);
3392}
3393
3394/*
3374 * Account for involuntary wait time. 3395 * Account for involuntary wait time.
3375 * @p: the process from which the cpu time has been stolen 3396 * @p: the process from which the cpu time has been stolen
3376 * @steal: the cpu time spent in involuntary wait 3397 * @steal: the cpu time spent in involuntary wait
diff --git a/kernel/timer.c b/kernel/timer.c
index 0735f0aa3afb..8521d10fbb27 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -826,10 +826,13 @@ void update_process_times(int user_tick)
826 int cpu = smp_processor_id(); 826 int cpu = smp_processor_id();
827 827
828 /* Note: this timer irq context must be accounted for as well. */ 828 /* Note: this timer irq context must be accounted for as well. */
829 if (user_tick) 829 if (user_tick) {
830 account_user_time(p, jiffies_to_cputime(1)); 830 account_user_time(p, jiffies_to_cputime(1));
831 else 831 account_user_time_scaled(p, jiffies_to_cputime(1));
832 } else {
832 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); 833 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
834 account_system_time_scaled(p, jiffies_to_cputime(1));
835 }
833 run_local_timers(); 836 run_local_timers();
834 if (rcu_pending(cpu)) 837 if (rcu_pending(cpu))
835 rcu_check_callbacks(cpu, user_tick); 838 rcu_check_callbacks(cpu, user_tick);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index c122131a122f..4ab1b584961b 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -62,6 +62,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
62 rcu_read_unlock(); 62 rcu_read_unlock();
63 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; 63 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
64 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; 64 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
65 stats->ac_utimescaled =
66 cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
67 stats->ac_stimescaled =
68 cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
65 stats->ac_minflt = tsk->min_flt; 69 stats->ac_minflt = tsk->min_flt;
66 stats->ac_majflt = tsk->maj_flt; 70 stats->ac_majflt = tsk->maj_flt;
67 71