aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 17:43:00 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 17:43:00 -0400
commit766fd5f6cdaf1d558afba19850493b2603c9625d (patch)
treefcb4eb4a041aa0b0f9309c30a982c5b7bdad76ae
parentcca08cd66ce6cc37812b6b36986ba7eaabd33e0b (diff)
parent553bf6bbfd8a540c70aee28eb50e24caff456a03 (diff)
Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull NOHZ updates from Ingo Molnar: - fix system/idle cputime leaked on cputime accounting (all nohz configs) (Rik van Riel) - remove the messy, ad-hoc irqtime account on nohz-full and make it compatible with CONFIG_IRQ_TIME_ACCOUNTING=y instead (Rik van Riel) - cleanups (Frederic Weisbecker) - remove unecessary irq disablement in the irqtime code (Rik van Riel) * 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/cputime: Drop local_irq_save/restore from irqtime_account_irq() sched/cputime: Reorganize vtime native irqtime accounting headers sched/cputime: Clean up the old vtime gen irqtime accounting completely sched/cputime: Replace VTIME_GEN irq time code with IRQ_TIME_ACCOUNTING code sched/cputime: Count actually elapsed irq & softirq time
-rw-r--r--include/asm-generic/cputime_nsecs.h2
-rw-r--r--include/linux/vtime.h50
-rw-r--r--init/Kconfig6
-rw-r--r--kernel/sched/cputime.c171
4 files changed, 109 insertions, 120 deletions
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 0f1c6f315cdc..a84e28e0c634 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -50,6 +50,8 @@ typedef u64 __nocast cputime64_t;
50 (__force u64)(__ct) 50 (__force u64)(__ct)
51#define nsecs_to_cputime(__nsecs) \ 51#define nsecs_to_cputime(__nsecs) \
52 (__force cputime_t)(__nsecs) 52 (__force cputime_t)(__nsecs)
53#define nsecs_to_cputime64(__nsecs) \
54 (__force cputime64_t)(__nsecs)
53 55
54 56
55/* 57/*
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index fa2196990f84..aa9bfea8804a 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -12,11 +12,9 @@ struct task_struct;
12/* 12/*
13 * vtime_accounting_cpu_enabled() definitions/declarations 13 * vtime_accounting_cpu_enabled() definitions/declarations
14 */ 14 */
15#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE 15#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
16static inline bool vtime_accounting_cpu_enabled(void) { return true; } 16static inline bool vtime_accounting_cpu_enabled(void) { return true; }
17#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */ 17#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
18
19#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
20/* 18/*
21 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful 19 * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
22 * in that case and compute the tickless cputime. 20 * in that case and compute the tickless cputime.
@@ -37,11 +35,9 @@ static inline bool vtime_accounting_cpu_enabled(void)
37 35
38 return false; 36 return false;
39} 37}
40#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 38#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
41
42#ifndef CONFIG_VIRT_CPU_ACCOUNTING
43static inline bool vtime_accounting_cpu_enabled(void) { return false; } 39static inline bool vtime_accounting_cpu_enabled(void) { return false; }
44#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 40#endif
45 41
46 42
47/* 43/*
@@ -64,35 +60,15 @@ extern void vtime_account_system(struct task_struct *tsk);
64extern void vtime_account_idle(struct task_struct *tsk); 60extern void vtime_account_idle(struct task_struct *tsk);
65extern void vtime_account_user(struct task_struct *tsk); 61extern void vtime_account_user(struct task_struct *tsk);
66 62
67#ifdef __ARCH_HAS_VTIME_ACCOUNT
68extern void vtime_account_irq_enter(struct task_struct *tsk);
69#else
70extern void vtime_common_account_irq_enter(struct task_struct *tsk);
71static inline void vtime_account_irq_enter(struct task_struct *tsk)
72{
73 if (vtime_accounting_cpu_enabled())
74 vtime_common_account_irq_enter(tsk);
75}
76#endif /* __ARCH_HAS_VTIME_ACCOUNT */
77
78#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 63#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
79 64
80static inline void vtime_task_switch(struct task_struct *prev) { } 65static inline void vtime_task_switch(struct task_struct *prev) { }
81static inline void vtime_account_system(struct task_struct *tsk) { } 66static inline void vtime_account_system(struct task_struct *tsk) { }
82static inline void vtime_account_user(struct task_struct *tsk) { } 67static inline void vtime_account_user(struct task_struct *tsk) { }
83static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
84#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 68#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
85 69
86#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 70#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
87extern void arch_vtime_task_switch(struct task_struct *tsk); 71extern void arch_vtime_task_switch(struct task_struct *tsk);
88extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
89
90static inline void vtime_account_irq_exit(struct task_struct *tsk)
91{
92 if (vtime_accounting_cpu_enabled())
93 vtime_gen_account_irq_exit(tsk);
94}
95
96extern void vtime_user_enter(struct task_struct *tsk); 72extern void vtime_user_enter(struct task_struct *tsk);
97 73
98static inline void vtime_user_exit(struct task_struct *tsk) 74static inline void vtime_user_exit(struct task_struct *tsk)
@@ -103,11 +79,6 @@ extern void vtime_guest_enter(struct task_struct *tsk);
103extern void vtime_guest_exit(struct task_struct *tsk); 79extern void vtime_guest_exit(struct task_struct *tsk);
104extern void vtime_init_idle(struct task_struct *tsk, int cpu); 80extern void vtime_init_idle(struct task_struct *tsk, int cpu);
105#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 81#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
106static inline void vtime_account_irq_exit(struct task_struct *tsk)
107{
108 /* On hard|softirq exit we always account to hard|softirq cputime */
109 vtime_account_system(tsk);
110}
111static inline void vtime_user_enter(struct task_struct *tsk) { } 82static inline void vtime_user_enter(struct task_struct *tsk) { }
112static inline void vtime_user_exit(struct task_struct *tsk) { } 83static inline void vtime_user_exit(struct task_struct *tsk) { }
113static inline void vtime_guest_enter(struct task_struct *tsk) { } 84static inline void vtime_guest_enter(struct task_struct *tsk) { }
@@ -115,6 +86,19 @@ static inline void vtime_guest_exit(struct task_struct *tsk) { }
115static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { } 86static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
116#endif 87#endif
117 88
89#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
90extern void vtime_account_irq_enter(struct task_struct *tsk);
91static inline void vtime_account_irq_exit(struct task_struct *tsk)
92{
93 /* On hard|softirq exit we always account to hard|softirq cputime */
94 vtime_account_system(tsk);
95}
96#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
97static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
98static inline void vtime_account_irq_exit(struct task_struct *tsk) { }
99#endif
100
101
118#ifdef CONFIG_IRQ_TIME_ACCOUNTING 102#ifdef CONFIG_IRQ_TIME_ACCOUNTING
119extern void irqtime_account_irq(struct task_struct *tsk); 103extern void irqtime_account_irq(struct task_struct *tsk);
120#else 104#else
diff --git a/init/Kconfig b/init/Kconfig
index 7e0b24f69de9..557bdf10cd44 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -375,9 +375,11 @@ config VIRT_CPU_ACCOUNTING_GEN
375 375
376 If unsure, say N. 376 If unsure, say N.
377 377
378endchoice
379
378config IRQ_TIME_ACCOUNTING 380config IRQ_TIME_ACCOUNTING
379 bool "Fine granularity task level IRQ time accounting" 381 bool "Fine granularity task level IRQ time accounting"
380 depends on HAVE_IRQ_TIME_ACCOUNTING && !NO_HZ_FULL 382 depends on HAVE_IRQ_TIME_ACCOUNTING && !VIRT_CPU_ACCOUNTING_NATIVE
381 help 383 help
382 Select this option to enable fine granularity task irq time 384 Select this option to enable fine granularity task irq time
383 accounting. This is done by reading a timestamp on each 385 accounting. This is done by reading a timestamp on each
@@ -386,8 +388,6 @@ config IRQ_TIME_ACCOUNTING
386 388
387 If in doubt, say N here. 389 If in doubt, say N here.
388 390
389endchoice
390
391config BSD_PROCESS_ACCT 391config BSD_PROCESS_ACCT
392 bool "BSD Process Accounting" 392 bool "BSD Process Accounting"
393 depends on MULTIUSER 393 depends on MULTIUSER
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 3d60e5d76fdb..ea0f6f31a244 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -49,15 +49,12 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
49 */ 49 */
50void irqtime_account_irq(struct task_struct *curr) 50void irqtime_account_irq(struct task_struct *curr)
51{ 51{
52 unsigned long flags;
53 s64 delta; 52 s64 delta;
54 int cpu; 53 int cpu;
55 54
56 if (!sched_clock_irqtime) 55 if (!sched_clock_irqtime)
57 return; 56 return;
58 57
59 local_irq_save(flags);
60
61 cpu = smp_processor_id(); 58 cpu = smp_processor_id();
62 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); 59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
63 __this_cpu_add(irq_start_time, delta); 60 __this_cpu_add(irq_start_time, delta);
@@ -75,44 +72,53 @@ void irqtime_account_irq(struct task_struct *curr)
75 __this_cpu_add(cpu_softirq_time, delta); 72 __this_cpu_add(cpu_softirq_time, delta);
76 73
77 irq_time_write_end(); 74 irq_time_write_end();
78 local_irq_restore(flags);
79} 75}
80EXPORT_SYMBOL_GPL(irqtime_account_irq); 76EXPORT_SYMBOL_GPL(irqtime_account_irq);
81 77
82static int irqtime_account_hi_update(void) 78static cputime_t irqtime_account_hi_update(cputime_t maxtime)
83{ 79{
84 u64 *cpustat = kcpustat_this_cpu->cpustat; 80 u64 *cpustat = kcpustat_this_cpu->cpustat;
85 unsigned long flags; 81 unsigned long flags;
86 u64 latest_ns; 82 cputime_t irq_cputime;
87 int ret = 0;
88 83
89 local_irq_save(flags); 84 local_irq_save(flags);
90 latest_ns = this_cpu_read(cpu_hardirq_time); 85 irq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)) -
91 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) 86 cpustat[CPUTIME_IRQ];
92 ret = 1; 87 irq_cputime = min(irq_cputime, maxtime);
88 cpustat[CPUTIME_IRQ] += irq_cputime;
93 local_irq_restore(flags); 89 local_irq_restore(flags);
94 return ret; 90 return irq_cputime;
95} 91}
96 92
97static int irqtime_account_si_update(void) 93static cputime_t irqtime_account_si_update(cputime_t maxtime)
98{ 94{
99 u64 *cpustat = kcpustat_this_cpu->cpustat; 95 u64 *cpustat = kcpustat_this_cpu->cpustat;
100 unsigned long flags; 96 unsigned long flags;
101 u64 latest_ns; 97 cputime_t softirq_cputime;
102 int ret = 0;
103 98
104 local_irq_save(flags); 99 local_irq_save(flags);
105 latest_ns = this_cpu_read(cpu_softirq_time); 100 softirq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)) -
106 if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) 101 cpustat[CPUTIME_SOFTIRQ];
107 ret = 1; 102 softirq_cputime = min(softirq_cputime, maxtime);
103 cpustat[CPUTIME_SOFTIRQ] += softirq_cputime;
108 local_irq_restore(flags); 104 local_irq_restore(flags);
109 return ret; 105 return softirq_cputime;
110} 106}
111 107
112#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 108#else /* CONFIG_IRQ_TIME_ACCOUNTING */
113 109
114#define sched_clock_irqtime (0) 110#define sched_clock_irqtime (0)
115 111
112static cputime_t irqtime_account_hi_update(cputime_t dummy)
113{
114 return 0;
115}
116
117static cputime_t irqtime_account_si_update(cputime_t dummy)
118{
119 return 0;
120}
121
116#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ 122#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */
117 123
118static inline void task_group_account_field(struct task_struct *p, int index, 124static inline void task_group_account_field(struct task_struct *p, int index,
@@ -257,32 +263,45 @@ void account_idle_time(cputime_t cputime)
257 cpustat[CPUTIME_IDLE] += (__force u64) cputime; 263 cpustat[CPUTIME_IDLE] += (__force u64) cputime;
258} 264}
259 265
260static __always_inline unsigned long steal_account_process_tick(unsigned long max_jiffies) 266static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
261{ 267{
262#ifdef CONFIG_PARAVIRT 268#ifdef CONFIG_PARAVIRT
263 if (static_key_false(&paravirt_steal_enabled)) { 269 if (static_key_false(&paravirt_steal_enabled)) {
270 cputime_t steal_cputime;
264 u64 steal; 271 u64 steal;
265 unsigned long steal_jiffies;
266 272
267 steal = paravirt_steal_clock(smp_processor_id()); 273 steal = paravirt_steal_clock(smp_processor_id());
268 steal -= this_rq()->prev_steal_time; 274 steal -= this_rq()->prev_steal_time;
269 275
270 /* 276 steal_cputime = min(nsecs_to_cputime(steal), maxtime);
271 * steal is in nsecs but our caller is expecting steal 277 account_steal_time(steal_cputime);
272 * time in jiffies. Lets cast the result to jiffies 278 this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
273 * granularity and account the rest on the next rounds.
274 */
275 steal_jiffies = min(nsecs_to_jiffies(steal), max_jiffies);
276 this_rq()->prev_steal_time += jiffies_to_nsecs(steal_jiffies);
277 279
278 account_steal_time(jiffies_to_cputime(steal_jiffies)); 280 return steal_cputime;
279 return steal_jiffies;
280 } 281 }
281#endif 282#endif
282 return 0; 283 return 0;
283} 284}
284 285
285/* 286/*
287 * Account how much elapsed time was spent in steal, irq, or softirq time.
288 */
289static inline cputime_t account_other_time(cputime_t max)
290{
291 cputime_t accounted;
292
293 accounted = steal_account_process_time(max);
294
295 if (accounted < max)
296 accounted += irqtime_account_hi_update(max - accounted);
297
298 if (accounted < max)
299 accounted += irqtime_account_si_update(max - accounted);
300
301 return accounted;
302}
303
304/*
286 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live 305 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
287 * tasks (sum on group iteration) belonging to @tsk's group. 306 * tasks (sum on group iteration) belonging to @tsk's group.
288 */ 307 */
@@ -342,21 +361,23 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
342static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 361static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
343 struct rq *rq, int ticks) 362 struct rq *rq, int ticks)
344{ 363{
345 cputime_t scaled = cputime_to_scaled(cputime_one_jiffy); 364 u64 cputime = (__force u64) cputime_one_jiffy * ticks;
346 u64 cputime = (__force u64) cputime_one_jiffy; 365 cputime_t scaled, other;
347 u64 *cpustat = kcpustat_this_cpu->cpustat;
348 366
349 if (steal_account_process_tick(ULONG_MAX)) 367 /*
368 * When returning from idle, many ticks can get accounted at
369 * once, including some ticks of steal, irq, and softirq time.
370 * Subtract those ticks from the amount of time accounted to
371 * idle, or potentially user or system time. Due to rounding,
372 * other time can exceed ticks occasionally.
373 */
374 other = account_other_time(cputime);
375 if (other >= cputime)
350 return; 376 return;
377 cputime -= other;
378 scaled = cputime_to_scaled(cputime);
351 379
352 cputime *= ticks; 380 if (this_cpu_ksoftirqd() == p) {
353 scaled *= ticks;
354
355 if (irqtime_account_hi_update()) {
356 cpustat[CPUTIME_IRQ] += cputime;
357 } else if (irqtime_account_si_update()) {
358 cpustat[CPUTIME_SOFTIRQ] += cputime;
359 } else if (this_cpu_ksoftirqd() == p) {
360 /* 381 /*
361 * ksoftirqd time do not get accounted in cpu_softirq_time. 382 * ksoftirqd time do not get accounted in cpu_softirq_time.
362 * So, we have to handle it separately here. 383 * So, we have to handle it separately here.
@@ -406,6 +427,10 @@ void vtime_common_task_switch(struct task_struct *prev)
406} 427}
407#endif 428#endif
408 429
430#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
431
432
433#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
409/* 434/*
410 * Archs that account the whole time spent in the idle task 435 * Archs that account the whole time spent in the idle task
411 * (outside irq) as idle time can rely on this and just implement 436 * (outside irq) as idle time can rely on this and just implement
@@ -415,33 +440,16 @@ void vtime_common_task_switch(struct task_struct *prev)
415 * vtime_account(). 440 * vtime_account().
416 */ 441 */
417#ifndef __ARCH_HAS_VTIME_ACCOUNT 442#ifndef __ARCH_HAS_VTIME_ACCOUNT
418void vtime_common_account_irq_enter(struct task_struct *tsk) 443void vtime_account_irq_enter(struct task_struct *tsk)
419{ 444{
420 if (!in_interrupt()) { 445 if (!in_interrupt() && is_idle_task(tsk))
421 /* 446 vtime_account_idle(tsk);
422 * If we interrupted user, context_tracking_in_user() 447 else
423 * is 1 because the context tracking don't hook 448 vtime_account_system(tsk);
424 * on irq entry/exit. This way we know if
425 * we need to flush user time on kernel entry.
426 */
427 if (context_tracking_in_user()) {
428 vtime_account_user(tsk);
429 return;
430 }
431
432 if (is_idle_task(tsk)) {
433 vtime_account_idle(tsk);
434 return;
435 }
436 }
437 vtime_account_system(tsk);
438} 449}
439EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter); 450EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
440#endif /* __ARCH_HAS_VTIME_ACCOUNT */ 451#endif /* __ARCH_HAS_VTIME_ACCOUNT */
441#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
442 452
443
444#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
445void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 453void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
446{ 454{
447 *ut = p->utime; 455 *ut = p->utime;
@@ -466,7 +474,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
466 */ 474 */
467void account_process_tick(struct task_struct *p, int user_tick) 475void account_process_tick(struct task_struct *p, int user_tick)
468{ 476{
469 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 477 cputime_t cputime, scaled, steal;
470 struct rq *rq = this_rq(); 478 struct rq *rq = this_rq();
471 479
472 if (vtime_accounting_cpu_enabled()) 480 if (vtime_accounting_cpu_enabled())
@@ -477,16 +485,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
477 return; 485 return;
478 } 486 }
479 487
480 if (steal_account_process_tick(ULONG_MAX)) 488 cputime = cputime_one_jiffy;
489 steal = steal_account_process_time(cputime);
490
491 if (steal >= cputime)
481 return; 492 return;
482 493
494 cputime -= steal;
495 scaled = cputime_to_scaled(cputime);
496
483 if (user_tick) 497 if (user_tick)
484 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 498 account_user_time(p, cputime, scaled);
485 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 499 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
486 account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, 500 account_system_time(p, HARDIRQ_OFFSET, cputime, scaled);
487 one_jiffy_scaled);
488 else 501 else
489 account_idle_time(cputime_one_jiffy); 502 account_idle_time(cputime);
490} 503}
491 504
492/* 505/*
@@ -681,14 +694,14 @@ static cputime_t vtime_delta(struct task_struct *tsk)
681static cputime_t get_vtime_delta(struct task_struct *tsk) 694static cputime_t get_vtime_delta(struct task_struct *tsk)
682{ 695{
683 unsigned long now = READ_ONCE(jiffies); 696 unsigned long now = READ_ONCE(jiffies);
684 unsigned long delta_jiffies, steal_jiffies; 697 cputime_t delta, other;
685 698
686 delta_jiffies = now - tsk->vtime_snap; 699 delta = jiffies_to_cputime(now - tsk->vtime_snap);
687 steal_jiffies = steal_account_process_tick(delta_jiffies); 700 other = account_other_time(delta);
688 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); 701 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
689 tsk->vtime_snap = now; 702 tsk->vtime_snap = now;
690 703
691 return jiffies_to_cputime(delta_jiffies - steal_jiffies); 704 return delta - other;
692} 705}
693 706
694static void __vtime_account_system(struct task_struct *tsk) 707static void __vtime_account_system(struct task_struct *tsk)
@@ -708,16 +721,6 @@ void vtime_account_system(struct task_struct *tsk)
708 write_seqcount_end(&tsk->vtime_seqcount); 721 write_seqcount_end(&tsk->vtime_seqcount);
709} 722}
710 723
711void vtime_gen_account_irq_exit(struct task_struct *tsk)
712{
713 write_seqcount_begin(&tsk->vtime_seqcount);
714 if (vtime_delta(tsk))
715 __vtime_account_system(tsk);
716 if (context_tracking_in_user())
717 tsk->vtime_snap_whence = VTIME_USER;
718 write_seqcount_end(&tsk->vtime_seqcount);
719}
720
721void vtime_account_user(struct task_struct *tsk) 724void vtime_account_user(struct task_struct *tsk)
722{ 725{
723 cputime_t delta_cpu; 726 cputime_t delta_cpu;