aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--include/linux/hardirq.h4
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/kvm_host.h20
-rw-r--r--include/linux/sched.h27
-rw-r--r--include/linux/vtime.h47
-rw-r--r--kernel/context_tracking.c21
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/cputime.c193
-rw-r--r--kernel/softirq.c6
11 files changed, 290 insertions, 52 deletions
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e84b8b68444a..ce9cc5aa2033 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk)
127 * Update process times based on virtual cpu times stored by entry.S 127 * Update process times based on virtual cpu times stored by entry.S
128 * to the lowcore fields user_timer, system_timer & steal_clock. 128 * to the lowcore fields user_timer, system_timer & steal_clock.
129 */ 129 */
130void vtime_account(struct task_struct *tsk) 130void vtime_account_irq_enter(struct task_struct *tsk)
131{ 131{
132 struct thread_info *ti = task_thread_info(tsk); 132 struct thread_info *ti = task_thread_info(tsk);
133 u64 timer, system; 133 u64 timer, system;
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk)
145 145
146 virt_timer_forward(system); 146 virt_timer_forward(system);
147} 147}
148EXPORT_SYMBOL_GPL(vtime_account); 148EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
149 149
150void vtime_account_system(struct task_struct *tsk) 150void vtime_account_system(struct task_struct *tsk)
151__attribute__((alias("vtime_account"))); 151__attribute__((alias("vtime_account_irq_enter")));
152EXPORT_SYMBOL_GPL(vtime_account_system); 152EXPORT_SYMBOL_GPL(vtime_account_system);
153 153
154void __kprobes vtime_stop_cpu(void) 154void __kprobes vtime_stop_cpu(void)
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 624ef3f45c8e..7105d5cbb762 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -153,7 +153,7 @@ extern void rcu_nmi_exit(void);
153 */ 153 */
154#define __irq_enter() \ 154#define __irq_enter() \
155 do { \ 155 do { \
156 vtime_account_irq_enter(current); \ 156 account_irq_enter_time(current); \
157 add_preempt_count(HARDIRQ_OFFSET); \ 157 add_preempt_count(HARDIRQ_OFFSET); \
158 trace_hardirq_enter(); \ 158 trace_hardirq_enter(); \
159 } while (0) 159 } while (0)
@@ -169,7 +169,7 @@ extern void irq_enter(void);
169#define __irq_exit() \ 169#define __irq_exit() \
170 do { \ 170 do { \
171 trace_hardirq_exit(); \ 171 trace_hardirq_exit(); \
172 vtime_account_irq_exit(current); \ 172 account_irq_exit_time(current); \
173 sub_preempt_count(HARDIRQ_OFFSET); \ 173 sub_preempt_count(HARDIRQ_OFFSET); \
174 } while (0) 174 } while (0)
175 175
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6d087c5f57f7..cc898b871cef 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -10,6 +10,7 @@
10#include <linux/pid_namespace.h> 10#include <linux/pid_namespace.h>
11#include <linux/user_namespace.h> 11#include <linux/user_namespace.h>
12#include <linux/securebits.h> 12#include <linux/securebits.h>
13#include <linux/seqlock.h>
13#include <net/net_namespace.h> 14#include <net/net_namespace.h>
14 15
15#ifdef CONFIG_SMP 16#ifdef CONFIG_SMP
@@ -141,6 +142,15 @@ extern struct task_group root_task_group;
141# define INIT_PERF_EVENTS(tsk) 142# define INIT_PERF_EVENTS(tsk)
142#endif 143#endif
143 144
145#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
146# define INIT_VTIME(tsk) \
147 .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
148 .vtime_snap = 0, \
149 .vtime_snap_whence = VTIME_SYS,
150#else
151# define INIT_VTIME(tsk)
152#endif
153
144#define INIT_TASK_COMM "swapper" 154#define INIT_TASK_COMM "swapper"
145 155
146/* 156/*
@@ -210,6 +220,7 @@ extern struct task_group root_task_group;
210 INIT_TRACE_RECURSION \ 220 INIT_TRACE_RECURSION \
211 INIT_TASK_RCU_PREEMPT(tsk) \ 221 INIT_TASK_RCU_PREEMPT(tsk) \
212 INIT_CPUSET_SEQ \ 222 INIT_CPUSET_SEQ \
223 INIT_VTIME(tsk) \
213} 224}
214 225
215 226
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4fe2396401da..b7996a768eb2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -741,7 +741,7 @@ static inline int kvm_deassign_device(struct kvm *kvm,
741} 741}
742#endif /* CONFIG_IOMMU_API */ 742#endif /* CONFIG_IOMMU_API */
743 743
744static inline void guest_enter(void) 744static inline void __guest_enter(void)
745{ 745{
746 /* 746 /*
747 * This is running in ioctl context so we can avoid 747 * This is running in ioctl context so we can avoid
@@ -751,7 +751,7 @@ static inline void guest_enter(void)
751 current->flags |= PF_VCPU; 751 current->flags |= PF_VCPU;
752} 752}
753 753
754static inline void guest_exit(void) 754static inline void __guest_exit(void)
755{ 755{
756 /* 756 /*
757 * This is running in ioctl context so we can avoid 757 * This is running in ioctl context so we can avoid
@@ -761,6 +761,22 @@ static inline void guest_exit(void)
761 current->flags &= ~PF_VCPU; 761 current->flags &= ~PF_VCPU;
762} 762}
763 763
764#ifdef CONFIG_CONTEXT_TRACKING
765extern void guest_enter(void);
766extern void guest_exit(void);
767
768#else /* !CONFIG_CONTEXT_TRACKING */
769static inline void guest_enter(void)
770{
771 __guest_enter();
772}
773
774static inline void guest_exit(void)
775{
776 __guest_exit();
777}
778#endif /* !CONFIG_CONTEXT_TRACKING */
779
764static inline void kvm_guest_enter(void) 780static inline void kvm_guest_enter(void)
765{ 781{
766 unsigned long flags; 782 unsigned long flags;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a9c608b6154e..a9fa5145e1a7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1368,6 +1368,15 @@ struct task_struct {
1368#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1368#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1369 struct cputime prev_cputime; 1369 struct cputime prev_cputime;
1370#endif 1370#endif
1371#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1372 seqlock_t vtime_seqlock;
1373 unsigned long long vtime_snap;
1374 enum {
1375 VTIME_SLEEPING = 0,
1376 VTIME_USER,
1377 VTIME_SYS,
1378 } vtime_snap_whence;
1379#endif
1371 unsigned long nvcsw, nivcsw; /* context switch counts */ 1380 unsigned long nvcsw, nivcsw; /* context switch counts */
1372 struct timespec start_time; /* monotonic time */ 1381 struct timespec start_time; /* monotonic time */
1373 struct timespec real_start_time; /* boot based time */ 1382 struct timespec real_start_time; /* boot based time */
@@ -1792,11 +1801,13 @@ static inline void put_task_struct(struct task_struct *t)
1792 __put_task_struct(t); 1801 __put_task_struct(t);
1793} 1802}
1794 1803
1795static inline cputime_t task_gtime(struct task_struct *t) 1804#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1796{ 1805extern void task_cputime(struct task_struct *t,
1797 return t->gtime; 1806 cputime_t *utime, cputime_t *stime);
1798} 1807extern void task_cputime_scaled(struct task_struct *t,
1799 1808 cputime_t *utimescaled, cputime_t *stimescaled);
1809extern cputime_t task_gtime(struct task_struct *t);
1810#else
1800static inline void task_cputime(struct task_struct *t, 1811static inline void task_cputime(struct task_struct *t,
1801 cputime_t *utime, cputime_t *stime) 1812 cputime_t *utime, cputime_t *stime)
1802{ 1813{
@@ -1815,6 +1826,12 @@ static inline void task_cputime_scaled(struct task_struct *t,
1815 if (stimescaled) 1826 if (stimescaled)
1816 *stimescaled = t->stimescaled; 1827 *stimescaled = t->stimescaled;
1817} 1828}
1829
1830static inline cputime_t task_gtime(struct task_struct *t)
1831{
1832 return t->gtime;
1833}
1834#endif
1818extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1835extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1819extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1836extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1820 1837
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index bb50c3ca0d79..71a5782d8c59 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -8,35 +8,44 @@ extern void vtime_task_switch(struct task_struct *prev);
8extern void vtime_account_system(struct task_struct *tsk); 8extern void vtime_account_system(struct task_struct *tsk);
9extern void vtime_account_idle(struct task_struct *tsk); 9extern void vtime_account_idle(struct task_struct *tsk);
10extern void vtime_account_user(struct task_struct *tsk); 10extern void vtime_account_user(struct task_struct *tsk);
11extern void vtime_account(struct task_struct *tsk); 11extern void vtime_account_irq_enter(struct task_struct *tsk);
12 12
13#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 13#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
14extern bool vtime_accounting_enabled(void);
15#else
16static inline bool vtime_accounting_enabled(void) { return true; } 14static inline bool vtime_accounting_enabled(void) { return true; }
17#endif 15#endif
18 16
19#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 17#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
18
20static inline void vtime_task_switch(struct task_struct *prev) { } 19static inline void vtime_task_switch(struct task_struct *prev) { }
21static inline void vtime_account_system(struct task_struct *tsk) { } 20static inline void vtime_account_system(struct task_struct *tsk) { }
22static inline void vtime_account_user(struct task_struct *tsk) { } 21static inline void vtime_account_user(struct task_struct *tsk) { }
23static inline void vtime_account(struct task_struct *tsk) { } 22static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
24static inline bool vtime_accounting_enabled(void) { return false; } 23static inline bool vtime_accounting_enabled(void) { return false; }
25#endif 24#endif
26 25
27#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 26#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
28static inline void arch_vtime_task_switch(struct task_struct *tsk) { } 27extern void arch_vtime_task_switch(struct task_struct *tsk);
29static inline void vtime_user_enter(struct task_struct *tsk) 28extern void vtime_account_irq_exit(struct task_struct *tsk);
30{ 29extern bool vtime_accounting_enabled(void);
31 vtime_account_system(tsk); 30extern void vtime_user_enter(struct task_struct *tsk);
32}
33static inline void vtime_user_exit(struct task_struct *tsk) 31static inline void vtime_user_exit(struct task_struct *tsk)
34{ 32{
35 vtime_account_user(tsk); 33 vtime_account_user(tsk);
36} 34}
35extern void vtime_guest_enter(struct task_struct *tsk);
36extern void vtime_guest_exit(struct task_struct *tsk);
37extern void vtime_init_idle(struct task_struct *tsk);
37#else 38#else
39static inline void vtime_account_irq_exit(struct task_struct *tsk)
40{
41 /* On hard|softirq exit we always account to hard|softirq cputime */
42 vtime_account_system(tsk);
43}
38static inline void vtime_user_enter(struct task_struct *tsk) { } 44static inline void vtime_user_enter(struct task_struct *tsk) { }
39static inline void vtime_user_exit(struct task_struct *tsk) { } 45static inline void vtime_user_exit(struct task_struct *tsk) { }
46static inline void vtime_guest_enter(struct task_struct *tsk) { }
47static inline void vtime_guest_exit(struct task_struct *tsk) { }
48static inline void vtime_init_idle(struct task_struct *tsk) { }
40#endif 49#endif
41 50
42#ifdef CONFIG_IRQ_TIME_ACCOUNTING 51#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -45,25 +54,15 @@ extern void irqtime_account_irq(struct task_struct *tsk);
45static inline void irqtime_account_irq(struct task_struct *tsk) { } 54static inline void irqtime_account_irq(struct task_struct *tsk) { }
46#endif 55#endif
47 56
48static inline void vtime_account_irq_enter(struct task_struct *tsk) 57static inline void account_irq_enter_time(struct task_struct *tsk)
49{ 58{
50 /* 59 vtime_account_irq_enter(tsk);
51 * Hardirq can interrupt idle task anytime. So we need vtime_account()
52 * that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING.
53 * Softirq can also interrupt idle task directly if it calls
54 * local_bh_enable(). Such case probably don't exist but we never know.
55 * Ksoftirqd is not concerned because idle time is flushed on context
56 * switch. Softirqs in the end of hardirqs are also not a problem because
57 * the idle time is flushed on hardirq time already.
58 */
59 vtime_account(tsk);
60 irqtime_account_irq(tsk); 60 irqtime_account_irq(tsk);
61} 61}
62 62
63static inline void vtime_account_irq_exit(struct task_struct *tsk) 63static inline void account_irq_exit_time(struct task_struct *tsk)
64{ 64{
65 /* On hard|softirq exit we always account to hard|softirq cputime */ 65 vtime_account_irq_exit(tsk);
66 vtime_account_system(tsk);
67 irqtime_account_irq(tsk); 66 irqtime_account_irq(tsk);
68} 67}
69 68
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 9002e92e6372..74f68f4dc6c2 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -1,8 +1,9 @@
1#include <linux/context_tracking.h> 1#include <linux/context_tracking.h>
2#include <linux/kvm_host.h>
2#include <linux/rcupdate.h> 3#include <linux/rcupdate.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/hardirq.h> 5#include <linux/hardirq.h>
5 6#include <linux/export.h>
6 7
7DEFINE_PER_CPU(struct context_tracking, context_tracking) = { 8DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
8#ifdef CONFIG_CONTEXT_TRACKING_FORCE 9#ifdef CONFIG_CONTEXT_TRACKING_FORCE
@@ -61,6 +62,24 @@ void user_exit(void)
61 local_irq_restore(flags); 62 local_irq_restore(flags);
62} 63}
63 64
65void guest_enter(void)
66{
67 if (vtime_accounting_enabled())
68 vtime_guest_enter(current);
69 else
70 __guest_enter();
71}
72EXPORT_SYMBOL_GPL(guest_enter);
73
74void guest_exit(void)
75{
76 if (vtime_accounting_enabled())
77 vtime_guest_exit(current);
78 else
79 __guest_exit();
80}
81EXPORT_SYMBOL_GPL(guest_exit);
82
64void context_tracking_task_switch(struct task_struct *prev, 83void context_tracking_task_switch(struct task_struct *prev,
65 struct task_struct *next) 84 struct task_struct *next)
66{ 85{
diff --git a/kernel/fork.c b/kernel/fork.c
index 65ca6d27f24e..e68a95b4cf26 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1233,6 +1233,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1233#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1233#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1234 p->prev_cputime.utime = p->prev_cputime.stime = 0; 1234 p->prev_cputime.utime = p->prev_cputime.stime = 0;
1235#endif 1235#endif
1236#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1237 seqlock_init(&p->vtime_seqlock);
1238 p->vtime_snap = 0;
1239 p->vtime_snap_whence = VTIME_SLEEPING;
1240#endif
1241
1236#if defined(SPLIT_RSS_COUNTING) 1242#if defined(SPLIT_RSS_COUNTING)
1237 memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1243 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1238#endif 1244#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 257002c13bb0..261022d7e79d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4666,6 +4666,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4666 */ 4666 */
4667 idle->sched_class = &idle_sched_class; 4667 idle->sched_class = &idle_sched_class;
4668 ftrace_graph_init_idle_task(idle, cpu); 4668 ftrace_graph_init_idle_task(idle, cpu);
4669 vtime_init_idle(idle);
4669#if defined(CONFIG_SMP) 4670#if defined(CONFIG_SMP)
4670 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4671 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4671#endif 4672#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a44ecdf809a1..082e05d915b4 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -492,7 +492,7 @@ void vtime_task_switch(struct task_struct *prev)
492 * vtime_account(). 492 * vtime_account().
493 */ 493 */
494#ifndef __ARCH_HAS_VTIME_ACCOUNT 494#ifndef __ARCH_HAS_VTIME_ACCOUNT
495void vtime_account(struct task_struct *tsk) 495void vtime_account_irq_enter(struct task_struct *tsk)
496{ 496{
497 if (!vtime_accounting_enabled()) 497 if (!vtime_accounting_enabled())
498 return; 498 return;
@@ -516,7 +516,7 @@ void vtime_account(struct task_struct *tsk)
516 } 516 }
517 vtime_account_system(tsk); 517 vtime_account_system(tsk);
518} 518}
519EXPORT_SYMBOL_GPL(vtime_account); 519EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
520#endif /* __ARCH_HAS_VTIME_ACCOUNT */ 520#endif /* __ARCH_HAS_VTIME_ACCOUNT */
521 521
522#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 522#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
@@ -600,28 +600,55 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
600#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ 600#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
601 601
602#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN 602#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
603static DEFINE_PER_CPU(unsigned long long, cputime_snap); 603static unsigned long long vtime_delta(struct task_struct *tsk)
604{
605 unsigned long long clock;
606
607 clock = sched_clock();
608 if (clock < tsk->vtime_snap)
609 return 0;
604 610
605static cputime_t get_vtime_delta(void) 611 return clock - tsk->vtime_snap;
612}
613
614static cputime_t get_vtime_delta(struct task_struct *tsk)
606{ 615{
607 unsigned long long delta; 616 unsigned long long delta = vtime_delta(tsk);
608 617
609 delta = sched_clock() - __this_cpu_read(cputime_snap); 618 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
610 __this_cpu_add(cputime_snap, delta); 619 tsk->vtime_snap += delta;
611 620
612 /* CHECKME: always safe to convert nsecs to cputime? */ 621 /* CHECKME: always safe to convert nsecs to cputime? */
613 return nsecs_to_cputime(delta); 622 return nsecs_to_cputime(delta);
614} 623}
615 624
625static void __vtime_account_system(struct task_struct *tsk)
626{
627 cputime_t delta_cpu = get_vtime_delta(tsk);
628
629 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
630}
631
616void vtime_account_system(struct task_struct *tsk) 632void vtime_account_system(struct task_struct *tsk)
617{ 633{
618 cputime_t delta_cpu; 634 if (!vtime_accounting_enabled())
635 return;
636
637 write_seqlock(&tsk->vtime_seqlock);
638 __vtime_account_system(tsk);
639 write_sequnlock(&tsk->vtime_seqlock);
640}
619 641
642void vtime_account_irq_exit(struct task_struct *tsk)
643{
620 if (!vtime_accounting_enabled()) 644 if (!vtime_accounting_enabled())
621 return; 645 return;
622 646
623 delta_cpu = get_vtime_delta(); 647 write_seqlock(&tsk->vtime_seqlock);
624 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu)); 648 if (context_tracking_in_user())
649 tsk->vtime_snap_whence = VTIME_USER;
650 __vtime_account_system(tsk);
651 write_sequnlock(&tsk->vtime_seqlock);
625} 652}
626 653
627void vtime_account_user(struct task_struct *tsk) 654void vtime_account_user(struct task_struct *tsk)
@@ -631,14 +658,44 @@ void vtime_account_user(struct task_struct *tsk)
631 if (!vtime_accounting_enabled()) 658 if (!vtime_accounting_enabled())
632 return; 659 return;
633 660
634 delta_cpu = get_vtime_delta(); 661 delta_cpu = get_vtime_delta(tsk);
635 662
663 write_seqlock(&tsk->vtime_seqlock);
664 tsk->vtime_snap_whence = VTIME_SYS;
636 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); 665 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
666 write_sequnlock(&tsk->vtime_seqlock);
667}
668
669void vtime_user_enter(struct task_struct *tsk)
670{
671 if (!vtime_accounting_enabled())
672 return;
673
674 write_seqlock(&tsk->vtime_seqlock);
675 tsk->vtime_snap_whence = VTIME_USER;
676 __vtime_account_system(tsk);
677 write_sequnlock(&tsk->vtime_seqlock);
678}
679
680void vtime_guest_enter(struct task_struct *tsk)
681{
682 write_seqlock(&tsk->vtime_seqlock);
683 __vtime_account_system(tsk);
684 current->flags |= PF_VCPU;
685 write_sequnlock(&tsk->vtime_seqlock);
686}
687
688void vtime_guest_exit(struct task_struct *tsk)
689{
690 write_seqlock(&tsk->vtime_seqlock);
691 __vtime_account_system(tsk);
692 current->flags &= ~PF_VCPU;
693 write_sequnlock(&tsk->vtime_seqlock);
637} 694}
638 695
639void vtime_account_idle(struct task_struct *tsk) 696void vtime_account_idle(struct task_struct *tsk)
640{ 697{
641 cputime_t delta_cpu = get_vtime_delta(); 698 cputime_t delta_cpu = get_vtime_delta(tsk);
642 699
643 account_idle_time(delta_cpu); 700 account_idle_time(delta_cpu);
644} 701}
@@ -647,4 +704,116 @@ bool vtime_accounting_enabled(void)
647{ 704{
648 return context_tracking_active(); 705 return context_tracking_active();
649} 706}
707
708void arch_vtime_task_switch(struct task_struct *prev)
709{
710 write_seqlock(&prev->vtime_seqlock);
711 prev->vtime_snap_whence = VTIME_SLEEPING;
712 write_sequnlock(&prev->vtime_seqlock);
713
714 write_seqlock(&current->vtime_seqlock);
715 current->vtime_snap_whence = VTIME_SYS;
716 current->vtime_snap = sched_clock();
717 write_sequnlock(&current->vtime_seqlock);
718}
719
720void vtime_init_idle(struct task_struct *t)
721{
722 unsigned long flags;
723
724 write_seqlock_irqsave(&t->vtime_seqlock, flags);
725 t->vtime_snap_whence = VTIME_SYS;
726 t->vtime_snap = sched_clock();
727 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
728}
729
730cputime_t task_gtime(struct task_struct *t)
731{
732 unsigned long flags;
733 unsigned int seq;
734 cputime_t gtime;
735
736 do {
737 seq = read_seqbegin_irqsave(&t->vtime_seqlock, flags);
738
739 gtime = t->gtime;
740 if (t->flags & PF_VCPU)
741 gtime += vtime_delta(t);
742
743 } while (read_seqretry_irqrestore(&t->vtime_seqlock, seq, flags));
744
745 return gtime;
746}
747
748/*
749 * Fetch cputime raw values from fields of task_struct and
750 * add up the pending nohz execution time since the last
751 * cputime snapshot.
752 */
753static void
754fetch_task_cputime(struct task_struct *t,
755 cputime_t *u_dst, cputime_t *s_dst,
756 cputime_t *u_src, cputime_t *s_src,
757 cputime_t *udelta, cputime_t *sdelta)
758{
759 unsigned long flags;
760 unsigned int seq;
761 unsigned long long delta;
762
763 do {
764 *udelta = 0;
765 *sdelta = 0;
766
767 seq = read_seqbegin_irqsave(&t->vtime_seqlock, flags);
768
769 if (u_dst)
770 *u_dst = *u_src;
771 if (s_dst)
772 *s_dst = *s_src;
773
774 /* Task is sleeping, nothing to add */
775 if (t->vtime_snap_whence == VTIME_SLEEPING ||
776 is_idle_task(t))
777 continue;
778
779 delta = vtime_delta(t);
780
781 /*
782 * Task runs either in user or kernel space, add pending nohz time to
783 * the right place.
784 */
785 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
786 *udelta = delta;
787 } else {
788 if (t->vtime_snap_whence == VTIME_SYS)
789 *sdelta = delta;
790 }
791 } while (read_seqretry_irqrestore(&t->vtime_seqlock, seq, flags));
792}
793
794
795void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
796{
797 cputime_t udelta, sdelta;
798
799 fetch_task_cputime(t, utime, stime, &t->utime,
800 &t->stime, &udelta, &sdelta);
801 if (utime)
802 *utime += udelta;
803 if (stime)
804 *stime += sdelta;
805}
806
807void task_cputime_scaled(struct task_struct *t,
808 cputime_t *utimescaled, cputime_t *stimescaled)
809{
810 cputime_t udelta, sdelta;
811
812 fetch_task_cputime(t, utimescaled, stimescaled,
813 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
814 if (utimescaled)
815 *utimescaled += cputime_to_scaled(udelta);
816 if (stimescaled)
817 *stimescaled += cputime_to_scaled(sdelta);
818}
650#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */ 819#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ed567babe789..f5cc25f147a6 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void)
221 current->flags &= ~PF_MEMALLOC; 221 current->flags &= ~PF_MEMALLOC;
222 222
223 pending = local_softirq_pending(); 223 pending = local_softirq_pending();
224 vtime_account_irq_enter(current); 224 account_irq_enter_time(current);
225 225
226 __local_bh_disable((unsigned long)__builtin_return_address(0), 226 __local_bh_disable((unsigned long)__builtin_return_address(0),
227 SOFTIRQ_OFFSET); 227 SOFTIRQ_OFFSET);
@@ -272,7 +272,7 @@ restart:
272 272
273 lockdep_softirq_exit(); 273 lockdep_softirq_exit();
274 274
275 vtime_account_irq_exit(current); 275 account_irq_exit_time(current);
276 __local_bh_enable(SOFTIRQ_OFFSET); 276 __local_bh_enable(SOFTIRQ_OFFSET);
277 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 277 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
278} 278}
@@ -341,7 +341,7 @@ static inline void invoke_softirq(void)
341 */ 341 */
342void irq_exit(void) 342void irq_exit(void)
343{ 343{
344 vtime_account_irq_exit(current); 344 account_irq_exit_time(current);
345 trace_hardirq_exit(); 345 trace_hardirq_exit();
346 sub_preempt_count(IRQ_EXIT_OFFSET); 346 sub_preempt_count(IRQ_EXIT_OFFSET);
347 if (!in_interrupt() && local_softirq_pending()) 347 if (!in_interrupt() && local_softirq_pending())