aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/delayacct.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/delayacct.c')
-rw-r--r--kernel/delayacct.c62
1 files changed, 61 insertions, 1 deletions
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 3546b0800f9f..1be274a462ca 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -41,6 +41,10 @@ void delayacct_init(void)
41 41
42void __delayacct_tsk_init(struct task_struct *tsk) 42void __delayacct_tsk_init(struct task_struct *tsk)
43{ 43{
44 spin_lock_init(&tsk->delays_lock);
45 /* No need to acquire tsk->delays_lock for allocation here unless
46 __delayacct_tsk_init called after tsk is attached to tasklist
47 */
44 tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL); 48 tsk->delays = kmem_cache_zalloc(delayacct_cache, SLAB_KERNEL);
45 if (tsk->delays) 49 if (tsk->delays)
46 spin_lock_init(&tsk->delays->lock); 50 spin_lock_init(&tsk->delays->lock);
@@ -48,8 +52,11 @@ void __delayacct_tsk_init(struct task_struct *tsk)
48 52
49void __delayacct_tsk_exit(struct task_struct *tsk) 53void __delayacct_tsk_exit(struct task_struct *tsk)
50{ 54{
51 kmem_cache_free(delayacct_cache, tsk->delays); 55 struct task_delay_info *delays = tsk->delays;
56 spin_lock(&tsk->delays_lock);
52 tsk->delays = NULL; 57 tsk->delays = NULL;
58 spin_unlock(&tsk->delays_lock);
59 kmem_cache_free(delayacct_cache, delays);
53} 60}
54 61
55/* 62/*
@@ -104,3 +111,56 @@ void __delayacct_blkio_end(void)
104 &current->delays->blkio_delay, 111 &current->delays->blkio_delay,
105 &current->delays->blkio_count); 112 &current->delays->blkio_count);
106} 113}
114
115int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
116{
117 s64 tmp;
118 struct timespec ts;
119 unsigned long t1,t2,t3;
120
121 spin_lock(&tsk->delays_lock);
122
123 /* Though tsk->delays accessed later, early exit avoids
124 * unnecessary returning of other data
125 */
126 if (!tsk->delays)
127 goto done;
128
129 tmp = (s64)d->cpu_run_real_total;
130 cputime_to_timespec(tsk->utime + tsk->stime, &ts);
131 tmp += timespec_to_ns(&ts);
132 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
133
134 /*
135 * No locking available for sched_info (and too expensive to add one)
136 * Mitigate by taking snapshot of values
137 */
138 t1 = tsk->sched_info.pcnt;
139 t2 = tsk->sched_info.run_delay;
140 t3 = tsk->sched_info.cpu_time;
141
142 d->cpu_count += t1;
143
144 jiffies_to_timespec(t2, &ts);
145 tmp = (s64)d->cpu_delay_total + timespec_to_ns(&ts);
146 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
147
148 tmp = (s64)d->cpu_run_virtual_total + (s64)jiffies_to_usecs(t3) * 1000;
149 d->cpu_run_virtual_total =
150 (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
151
152 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
153
154 spin_lock(&tsk->delays->lock);
155 tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
156 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
157 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
158 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
159 d->blkio_count += tsk->delays->blkio_count;
160 d->swapin_count += tsk->delays->swapin_count;
161 spin_unlock(&tsk->delays->lock);
162
163done:
164 spin_unlock(&tsk->delays_lock);
165 return 0;
166}