aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-02-05 07:10:33 -0500
committerIngo Molnar <mingo@kernel.org>2013-02-05 07:10:33 -0500
commitb2c77a57e4a0a7877e357dead7ee8acc19944f3e (patch)
treefa192b5a058711299c2a8ce2621df6c9bd8f3a99
parentc3c186403c6abd32e719f005f0af950155a9e54d (diff)
parent6a61671bb2f3a1bd12cd17b8fca811a624782632 (diff)
Merge tag 'full-dynticks-cputime-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into sched/core
Pull full-dynticks (user-space execution is undisturbed and receives no timer IRQs) preparation changes that convert the cputime accounting code to be full-dynticks ready, from Frederic Weisbecker: "This implements the cputime accounting on full dynticks CPUs. Typical cputime stats infrastructure relies on the timer tick and its periodic polling on the CPU to account the amount of time spent by the CPUs and the tasks per high level domains such as userspace, kernelspace, guest, ... Now we are preparing to implement full dynticks capability on Linux for Real Time and HPC users who want full CPU isolation. This feature requires a cputime accounting that doesn't depend on the timer tick. To implement it, this new cputime infrastructure plugs into kernel/user/guest boundaries to take snapshots of cputime and flush these to the stats when needed. This performs pretty much like CONFIG_VIRT_CPU_ACCOUNTING except that context location and cputime snaphots are synchronized between write and read side such that the latter can safely retrieve the pending tickless cputime of a task and add it to its latest cputime snapshot to return the correct result to the user." Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/alpha/kernel/osf_sys.c6
-rw-r--r--arch/ia64/include/asm/cputime.h92
-rw-r--r--arch/ia64/include/asm/thread_info.h4
-rw-r--r--arch/ia64/include/asm/xen/minstate.h2
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/entry.S16
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ivt.S8
-rw-r--r--arch/ia64/kernel/minstate.h2
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig2
-rw-r--r--arch/powerpc/configs/pasemi_defconfig2
-rw-r--r--arch/powerpc/include/asm/cputime.h6
-rw-r--r--arch/powerpc/include/asm/lppaca.h2
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h4
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/time.c5
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c6
-rw-r--r--arch/powerpc/platforms/pseries/setup.c6
-rw-r--r--arch/s390/kernel/vtime.c6
-rw-r--r--arch/x86/kernel/apm_32.c11
-rw-r--r--drivers/isdn/mISDN/stack.c7
-rw-r--r--fs/binfmt_elf.c8
-rw-r--r--fs/binfmt_elf_fdpic.c7
-rw-r--r--fs/proc/array.c4
-rw-r--r--include/asm-generic/cputime.h66
-rw-r--r--include/asm-generic/cputime_jiffies.h72
-rw-r--r--include/asm-generic/cputime_nsecs.h104
-rw-r--r--include/linux/context_tracking.h28
-rw-r--r--include/linux/hardirq.h4
-rw-r--r--include/linux/init_task.h11
-rw-r--r--include/linux/kernel_stat.h2
-rw-r--r--include/linux/kvm_host.h55
-rw-r--r--include/linux/sched.h40
-rw-r--r--include/linux/tsacct_kern.h3
-rw-r--r--include/linux/vtime.h59
-rw-r--r--init/Kconfig23
-rw-r--r--kernel/acct.c6
-rw-r--r--kernel/context_tracking.c43
-rw-r--r--kernel/cpu.c4
-rw-r--r--kernel/delayacct.c7
-rw-r--r--kernel/exit.c10
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/posix-cpu-timers.c28
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/cputime.c298
-rw-r--r--kernel/signal.c12
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/time/tick-sched.c5
-rw-r--r--kernel/tsacct.c44
52 files changed, 842 insertions, 322 deletions
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 14db93e4c8a8..dbc1760f418b 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1139,6 +1139,7 @@ struct rusage32 {
1139SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru) 1139SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
1140{ 1140{
1141 struct rusage32 r; 1141 struct rusage32 r;
1142 cputime_t utime, stime;
1142 1143
1143 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) 1144 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
1144 return -EINVAL; 1145 return -EINVAL;
@@ -1146,8 +1147,9 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
1146 memset(&r, 0, sizeof(r)); 1147 memset(&r, 0, sizeof(r));
1147 switch (who) { 1148 switch (who) {
1148 case RUSAGE_SELF: 1149 case RUSAGE_SELF:
1149 jiffies_to_timeval32(current->utime, &r.ru_utime); 1150 task_cputime(current, &utime, &stime);
1150 jiffies_to_timeval32(current->stime, &r.ru_stime); 1151 jiffies_to_timeval32(utime, &r.ru_utime);
1152 jiffies_to_timeval32(stime, &r.ru_stime);
1151 r.ru_minflt = current->min_flt; 1153 r.ru_minflt = current->min_flt;
1152 r.ru_majflt = current->maj_flt; 1154 r.ru_majflt = current->maj_flt;
1153 break; 1155 break;
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fcf7f08ab06..e2d3f5baf265 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -11,99 +11,19 @@
11 * as published by the Free Software Foundation; either version 11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version. 12 * 2 of the License, or (at your option) any later version.
13 * 13 *
14 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec. 14 * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
15 * Otherwise we measure cpu time in jiffies using the generic definitions. 15 * Otherwise we measure cpu time in jiffies using the generic definitions.
16 */ 16 */
17 17
18#ifndef __IA64_CPUTIME_H 18#ifndef __IA64_CPUTIME_H
19#define __IA64_CPUTIME_H 19#define __IA64_CPUTIME_H
20 20
21#ifndef CONFIG_VIRT_CPU_ACCOUNTING 21#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
22#include <asm-generic/cputime.h> 22# include <asm-generic/cputime.h>
23#else 23#else
24 24# include <asm/processor.h>
25#include <linux/time.h> 25# include <asm-generic/cputime_nsecs.h>
26#include <linux/jiffies.h>
27#include <asm/processor.h>
28
29typedef u64 __nocast cputime_t;
30typedef u64 __nocast cputime64_t;
31
32#define cputime_one_jiffy jiffies_to_cputime(1)
33
34/*
35 * Convert cputime <-> jiffies (HZ)
36 */
37#define cputime_to_jiffies(__ct) \
38 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
39#define jiffies_to_cputime(__jif) \
40 (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
41#define cputime64_to_jiffies64(__ct) \
42 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
43#define jiffies64_to_cputime64(__jif) \
44 (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
45
46/*
47 * Convert cputime <-> microseconds
48 */
49#define cputime_to_usecs(__ct) \
50 ((__force u64)(__ct) / NSEC_PER_USEC)
51#define usecs_to_cputime(__usecs) \
52 (__force cputime_t)((__usecs) * NSEC_PER_USEC)
53#define usecs_to_cputime64(__usecs) \
54 (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
55
56/*
57 * Convert cputime <-> seconds
58 */
59#define cputime_to_secs(__ct) \
60 ((__force u64)(__ct) / NSEC_PER_SEC)
61#define secs_to_cputime(__secs) \
62 (__force cputime_t)((__secs) * NSEC_PER_SEC)
63
64/*
65 * Convert cputime <-> timespec (nsec)
66 */
67static inline cputime_t timespec_to_cputime(const struct timespec *val)
68{
69 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
70 return (__force cputime_t) ret;
71}
72static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
73{
74 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
75 val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
76}
77
78/*
79 * Convert cputime <-> timeval (msec)
80 */
81static inline cputime_t timeval_to_cputime(struct timeval *val)
82{
83 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
84 return (__force cputime_t) ret;
85}
86static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
87{
88 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
89 val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
90}
91
92/*
93 * Convert cputime <-> clock (USER_HZ)
94 */
95#define cputime_to_clock_t(__ct) \
96 ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
97#define clock_t_to_cputime(__x) \
98 (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
99
100/*
101 * Convert cputime64 to clock.
102 */
103#define cputime64_to_clock_t(__ct) \
104 cputime_to_clock_t((__force cputime_t)__ct)
105
106extern void arch_vtime_task_switch(struct task_struct *tsk); 26extern void arch_vtime_task_switch(struct task_struct *tsk);
27#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
107 28
108#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
109#endif /* __IA64_CPUTIME_H */ 29#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff2ae4136584..020d655ed082 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -31,7 +31,7 @@ struct thread_info {
31 mm_segment_t addr_limit; /* user-level address space limit */ 31 mm_segment_t addr_limit; /* user-level address space limit */
32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */ 32 int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
33 struct restart_block restart_block; 33 struct restart_block restart_block;
34#ifdef CONFIG_VIRT_CPU_ACCOUNTING 34#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
35 __u64 ac_stamp; 35 __u64 ac_stamp;
36 __u64 ac_leave; 36 __u64 ac_leave;
37 __u64 ac_stime; 37 __u64 ac_stime;
@@ -69,7 +69,7 @@ struct thread_info {
69#define task_stack_page(tsk) ((void *)(tsk)) 69#define task_stack_page(tsk) ((void *)(tsk))
70 70
71#define __HAVE_THREAD_FUNCTIONS 71#define __HAVE_THREAD_FUNCTIONS
72#ifdef CONFIG_VIRT_CPU_ACCOUNTING 72#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
73#define setup_thread_stack(p, org) \ 73#define setup_thread_stack(p, org) \
74 *task_thread_info(p) = *task_thread_info(org); \ 74 *task_thread_info(p) = *task_thread_info(org); \
75 task_thread_info(p)->ac_stime = 0; \ 75 task_thread_info(p)->ac_stime = 0; \
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
index c57fa910f2c9..00cf03e0cb82 100644
--- a/arch/ia64/include/asm/xen/minstate.h
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -1,5 +1,5 @@
1 1
2#ifdef CONFIG_VIRT_CPU_ACCOUNTING 2#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
3/* read ar.itc in advance, and use it before leaving bank 0 */ 3/* read ar.itc in advance, and use it before leaving bank 0 */
4#define XEN_ACCOUNT_GET_STAMP \ 4#define XEN_ACCOUNT_GET_STAMP \
5 MOV_FROM_ITC(pUStk, p6, r20, r2); 5 MOV_FROM_ITC(pUStk, p6, r20, r2);
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index a48bd9a9927b..46c9e3007315 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -41,7 +41,7 @@ void foo(void)
41 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 41 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
42 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 42 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
43 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); 43 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
44#ifdef CONFIG_VIRT_CPU_ACCOUNTING 44#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
45 DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp)); 45 DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
46 DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave)); 46 DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
47 DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); 47 DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 6bfd8429ee0f..7a53530f22c2 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
724#endif 724#endif
725.global __paravirt_work_processed_syscall; 725.global __paravirt_work_processed_syscall;
726__paravirt_work_processed_syscall: 726__paravirt_work_processed_syscall:
727#ifdef CONFIG_VIRT_CPU_ACCOUNTING 727#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
728 adds r2=PT(LOADRS)+16,r12 728 adds r2=PT(LOADRS)+16,r12
729 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave 729 MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
730 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 730 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -762,7 +762,7 @@ __paravirt_work_processed_syscall:
762 762
763 ld8 r29=[r2],16 // M0|1 load cr.ipsr 763 ld8 r29=[r2],16 // M0|1 load cr.ipsr
764 ld8 r28=[r3],16 // M0|1 load cr.iip 764 ld8 r28=[r3],16 // M0|1 load cr.iip
765#ifdef CONFIG_VIRT_CPU_ACCOUNTING 765#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
766(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13 766(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
767 ;; 767 ;;
768 ld8 r30=[r2],16 // M0|1 load cr.ifs 768 ld8 r30=[r2],16 // M0|1 load cr.ifs
@@ -793,7 +793,7 @@ __paravirt_work_processed_syscall:
793 ld8.fill r1=[r3],16 // M0|1 load r1 793 ld8.fill r1=[r3],16 // M0|1 load r1
794(pUStk) mov r17=1 // A 794(pUStk) mov r17=1 // A
795 ;; 795 ;;
796#ifdef CONFIG_VIRT_CPU_ACCOUNTING 796#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
797(pUStk) st1 [r15]=r17 // M2|3 797(pUStk) st1 [r15]=r17 // M2|3
798#else 798#else
799(pUStk) st1 [r14]=r17 // M2|3 799(pUStk) st1 [r14]=r17 // M2|3
@@ -813,7 +813,7 @@ __paravirt_work_processed_syscall:
813 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition 813 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
814 COVER // B add current frame into dirty partition & set cr.ifs 814 COVER // B add current frame into dirty partition & set cr.ifs
815 ;; 815 ;;
816#ifdef CONFIG_VIRT_CPU_ACCOUNTING 816#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
817 mov r19=ar.bsp // M2 get new backing store pointer 817 mov r19=ar.bsp // M2 get new backing store pointer
818 st8 [r14]=r22 // M save time at leave 818 st8 [r14]=r22 // M save time at leave
819 mov f10=f0 // F clear f10 819 mov f10=f0 // F clear f10
@@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
948 adds r16=PT(CR_IPSR)+16,r12 948 adds r16=PT(CR_IPSR)+16,r12
949 adds r17=PT(CR_IIP)+16,r12 949 adds r17=PT(CR_IIP)+16,r12
950 950
951#ifdef CONFIG_VIRT_CPU_ACCOUNTING 951#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
952 .pred.rel.mutex pUStk,pKStk 952 .pred.rel.mutex pUStk,pKStk
953 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled 953 MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
954 MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave 954 MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
@@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
981 ;; 981 ;;
982 ld8.fill r12=[r16],16 982 ld8.fill r12=[r16],16
983 ld8.fill r13=[r17],16 983 ld8.fill r13=[r17],16
984#ifdef CONFIG_VIRT_CPU_ACCOUNTING 984#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
985(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18 985(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
986#else 986#else
987(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 987(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
@@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
989 ;; 989 ;;
990 ld8 r20=[r16],16 // ar.fpsr 990 ld8 r20=[r16],16 // ar.fpsr
991 ld8.fill r15=[r17],16 991 ld8.fill r15=[r17],16
992#ifdef CONFIG_VIRT_CPU_ACCOUNTING 992#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
993(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred 993(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
994#endif 994#endif
995 ;; 995 ;;
@@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
997 ld8.fill r2=[r17] 997 ld8.fill r2=[r17]
998(pUStk) mov r17=1 998(pUStk) mov r17=1
999 ;; 999 ;;
1000#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1000#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1001 // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;; 1001 // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
1002 // mib : mov add br -> mib : ld8 add br 1002 // mib : mov add br -> mib : ld8 add br
1003 // bbb_ : br nop cover;; mbb_ : mov br cover;; 1003 // bbb_ : br nop cover;; mbb_ : mov br cover;;
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index e662f178b990..c4cd45d97749 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -529,7 +529,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
529 nop.i 0 529 nop.i 0
530 ;; 530 ;;
531 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 531 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
532#ifdef CONFIG_VIRT_CPU_ACCOUNTING 532#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
533 MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting 533 MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
534#else 534#else
535 nop.m 0 535 nop.m 0
@@ -555,7 +555,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
555 cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 555 cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
556 br.call.sptk.many b7=ia64_syscall_setup // B 556 br.call.sptk.many b7=ia64_syscall_setup // B
557 ;; 557 ;;
558#ifdef CONFIG_VIRT_CPU_ACCOUNTING 558#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
559 // mov.m r30=ar.itc is called in advance 559 // mov.m r30=ar.itc is called in advance
560 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2 560 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
561 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 561 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 4738ff7bd66a..9be4e497f3d3 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock)
1073sched_clock = ia64_native_sched_clock 1073sched_clock = ia64_native_sched_clock
1074#endif 1074#endif
1075 1075
1076#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1076#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1077GLOBAL_ENTRY(cycle_to_cputime) 1077GLOBAL_ENTRY(cycle_to_cputime)
1078 alloc r16=ar.pfs,1,0,0,0 1078 alloc r16=ar.pfs,1,0,0,0
1079 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1079 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
@@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime)
1091 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT 1091 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
1092 br.ret.sptk.many rp 1092 br.ret.sptk.many rp
1093END(cycle_to_cputime) 1093END(cycle_to_cputime)
1094#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 1094#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
1095 1095
1096#ifdef CONFIG_IA64_BRL_EMU 1096#ifdef CONFIG_IA64_BRL_EMU
1097 1097
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index fa25689fc453..689ffcaa284e 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -784,7 +784,7 @@ ENTRY(break_fault)
784 784
785(p8) adds r28=16,r28 // A switch cr.iip to next bundle 785(p8) adds r28=16,r28 // A switch cr.iip to next bundle
786(p9) adds r8=1,r8 // A increment ei to next slot 786(p9) adds r8=1,r8 // A increment ei to next slot
787#ifdef CONFIG_VIRT_CPU_ACCOUNTING 787#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
788 ;; 788 ;;
789 mov b6=r30 // I0 setup syscall handler branch reg early 789 mov b6=r30 // I0 setup syscall handler branch reg early
790#else 790#else
@@ -801,7 +801,7 @@ ENTRY(break_fault)
801 // 801 //
802/////////////////////////////////////////////////////////////////////// 802///////////////////////////////////////////////////////////////////////
803 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag 803 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
804#ifdef CONFIG_VIRT_CPU_ACCOUNTING 804#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
805 MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting 805 MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
806#else 806#else
807 mov b6=r30 // I0 setup syscall handler branch reg early 807 mov b6=r30 // I0 setup syscall handler branch reg early
@@ -817,7 +817,7 @@ ENTRY(break_fault)
817 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? 817 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
818 br.call.sptk.many b7=ia64_syscall_setup // B 818 br.call.sptk.many b7=ia64_syscall_setup // B
8191: 8191:
820#ifdef CONFIG_VIRT_CPU_ACCOUNTING 820#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
821 // mov.m r30=ar.itc is called in advance, and r13 is current 821 // mov.m r30=ar.itc is called in advance, and r13 is current
822 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A 822 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
823 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A 823 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
@@ -1043,7 +1043,7 @@ END(ia64_syscall_setup)
1043 DBG_FAULT(16) 1043 DBG_FAULT(16)
1044 FAULT(16) 1044 FAULT(16)
1045 1045
1046#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) 1046#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
1047 /* 1047 /*
1048 * There is no particular reason for this code to be here, other than 1048 * There is no particular reason for this code to be here, other than
1049 * that there happens to be space here that would go unused otherwise. 1049 * that there happens to be space here that would go unused otherwise.
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index d56753a11636..cc82a7d744c9 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -4,7 +4,7 @@
4#include "entry.h" 4#include "entry.h"
5#include "paravirt_inst.h" 5#include "paravirt_inst.h"
6 6
7#ifdef CONFIG_VIRT_CPU_ACCOUNTING 7#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
8/* read ar.itc in advance, and use it before leaving bank 0 */ 8/* read ar.itc in advance, and use it before leaving bank 0 */
9#define ACCOUNT_GET_STAMP \ 9#define ACCOUNT_GET_STAMP \
10(pUStk) mov.m r20=ar.itc; 10(pUStk) mov.m r20=ar.itc;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 88a794536bc0..fbaac1afb844 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = {
77}; 77};
78static struct clocksource *itc_clocksource; 78static struct clocksource *itc_clocksource;
79 79
80#ifdef CONFIG_VIRT_CPU_ACCOUNTING 80#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
81 81
82#include <linux/kernel_stat.h> 82#include <linux/kernel_stat.h>
83 83
@@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk)
136 136
137 account_system_time(tsk, 0, delta, delta); 137 account_system_time(tsk, 0, delta, delta);
138} 138}
139EXPORT_SYMBOL_GPL(vtime_account_system);
139 140
140void vtime_account_idle(struct task_struct *tsk) 141void vtime_account_idle(struct task_struct *tsk)
141{ 142{
142 account_idle_time(vtime_delta(tsk)); 143 account_idle_time(vtime_delta(tsk));
143} 144}
144 145
145#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 146#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
146 147
147static irqreturn_t 148static irqreturn_t
148timer_interrupt (int irq, void *dev_id) 149timer_interrupt (int irq, void *dev_id)
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index 29bb11ec6c64..4f35fc462385 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -1,6 +1,6 @@
1CONFIG_PPC64=y 1CONFIG_PPC64=y
2CONFIG_PPC_BOOK3E_64=y 2CONFIG_PPC_BOOK3E_64=y
3# CONFIG_VIRT_CPU_ACCOUNTING is not set 3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y 4CONFIG_SMP=y
5CONFIG_NR_CPUS=256 5CONFIG_NR_CPUS=256
6CONFIG_EXPERIMENTAL=y 6CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 88fa5c46f66f..f7df8362911f 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -1,6 +1,6 @@
1CONFIG_PPC64=y 1CONFIG_PPC64=y
2CONFIG_PPC_BOOK3E_64=y 2CONFIG_PPC_BOOK3E_64=y
3# CONFIG_VIRT_CPU_ACCOUNTING is not set 3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y 4CONFIG_SMP=y
5CONFIG_NR_CPUS=2 5CONFIG_NR_CPUS=2
6CONFIG_EXPERIMENTAL=y 6CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index 840a2c2d0430..bcedeea0df89 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -1,6 +1,6 @@
1CONFIG_PPC64=y 1CONFIG_PPC64=y
2CONFIG_ALTIVEC=y 2CONFIG_ALTIVEC=y
3# CONFIG_VIRT_CPU_ACCOUNTING is not set 3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y 4CONFIG_SMP=y
5CONFIG_NR_CPUS=2 5CONFIG_NR_CPUS=2
6CONFIG_EXPERIMENTAL=y 6CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 483733bd06d4..607559ab271f 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -8,7 +8,7 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 * 10 *
11 * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in 11 * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in
12 * the same units as the timebase. Otherwise we measure cpu time 12 * the same units as the timebase. Otherwise we measure cpu time
13 * in jiffies using the generic definitions. 13 * in jiffies using the generic definitions.
14 */ 14 */
@@ -16,7 +16,7 @@
16#ifndef __POWERPC_CPUTIME_H 16#ifndef __POWERPC_CPUTIME_H
17#define __POWERPC_CPUTIME_H 17#define __POWERPC_CPUTIME_H
18 18
19#ifndef CONFIG_VIRT_CPU_ACCOUNTING 19#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
20#include <asm-generic/cputime.h> 20#include <asm-generic/cputime.h>
21#ifdef __KERNEL__ 21#ifdef __KERNEL__
22static inline void setup_cputime_one_jiffy(void) { } 22static inline void setup_cputime_one_jiffy(void) { }
@@ -231,5 +231,5 @@ static inline cputime_t clock_t_to_cputime(const unsigned long clk)
231static inline void arch_vtime_task_switch(struct task_struct *tsk) { } 231static inline void arch_vtime_task_switch(struct task_struct *tsk) { }
232 232
233#endif /* __KERNEL__ */ 233#endif /* __KERNEL__ */
234#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 234#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
235#endif /* __POWERPC_CPUTIME_H */ 235#endif /* __POWERPC_CPUTIME_H */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 531fe0c3108f..b1e7f2af1016 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -145,7 +145,7 @@ struct dtl_entry {
145extern struct kmem_cache *dtl_cache; 145extern struct kmem_cache *dtl_cache;
146 146
147/* 147/*
148 * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls 148 * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls
149 * reading from the dispatch trace log. If other code wants to consume 149 * reading from the dispatch trace log. If other code wants to consume
150 * DTL entries, it can set this pointer to a function that will get 150 * DTL entries, it can set this pointer to a function that will get
151 * called once for each DTL entry that gets processed. 151 * called once for each DTL entry that gets processed.
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index ea2a86e8ff95..2d0e1f5d8339 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -24,7 +24,7 @@
24 * user_time and system_time fields in the paca. 24 * user_time and system_time fields in the paca.
25 */ 25 */
26 26
27#ifndef CONFIG_VIRT_CPU_ACCOUNTING 27#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
28#define ACCOUNT_CPU_USER_ENTRY(ra, rb) 28#define ACCOUNT_CPU_USER_ENTRY(ra, rb)
29#define ACCOUNT_CPU_USER_EXIT(ra, rb) 29#define ACCOUNT_CPU_USER_EXIT(ra, rb)
30#define ACCOUNT_STOLEN_TIME 30#define ACCOUNT_STOLEN_TIME
@@ -70,7 +70,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
70 70
71#endif /* CONFIG_PPC_SPLPAR */ 71#endif /* CONFIG_PPC_SPLPAR */
72 72
73#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 73#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
74 74
75/* 75/*
76 * Macros for storing registers into and loading registers from 76 * Macros for storing registers into and loading registers from
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index b310a0573625..a0ca42fb1541 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -94,7 +94,7 @@ system_call_common:
94 addi r9,r1,STACK_FRAME_OVERHEAD 94 addi r9,r1,STACK_FRAME_OVERHEAD
95 ld r11,exception_marker@toc(r2) 95 ld r11,exception_marker@toc(r2)
96 std r11,-16(r9) /* "regshere" marker */ 96 std r11,-16(r9) /* "regshere" marker */
97#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) 97#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
98BEGIN_FW_FTR_SECTION 98BEGIN_FW_FTR_SECTION
99 beq 33f 99 beq 33f
100 /* if from user, see if there are any DTL entries to process */ 100 /* if from user, see if there are any DTL entries to process */
@@ -110,7 +110,7 @@ BEGIN_FW_FTR_SECTION
110 addi r9,r1,STACK_FRAME_OVERHEAD 110 addi r9,r1,STACK_FRAME_OVERHEAD
11133: 11133:
112END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) 112END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
113#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ 113#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
114 114
115 /* 115 /*
116 * A syscall should always be called with interrupts enabled 116 * A syscall should always be called with interrupts enabled
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 6f6b1cccc916..2e04b37f67f9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -143,7 +143,7 @@ EXPORT_SYMBOL_GPL(ppc_proc_freq);
143unsigned long ppc_tb_freq; 143unsigned long ppc_tb_freq;
144EXPORT_SYMBOL_GPL(ppc_tb_freq); 144EXPORT_SYMBOL_GPL(ppc_tb_freq);
145 145
146#ifdef CONFIG_VIRT_CPU_ACCOUNTING 146#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
147/* 147/*
148 * Factors for converting from cputime_t (timebase ticks) to 148 * Factors for converting from cputime_t (timebase ticks) to
149 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds). 149 * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
@@ -347,6 +347,7 @@ void vtime_account_system(struct task_struct *tsk)
347 if (stolen) 347 if (stolen)
348 account_steal_time(stolen); 348 account_steal_time(stolen);
349} 349}
350EXPORT_SYMBOL_GPL(vtime_account_system);
350 351
351void vtime_account_idle(struct task_struct *tsk) 352void vtime_account_idle(struct task_struct *tsk)
352{ 353{
@@ -377,7 +378,7 @@ void vtime_account_user(struct task_struct *tsk)
377 account_user_time(tsk, utime, utimescaled); 378 account_user_time(tsk, utime, utimescaled);
378} 379}
379 380
380#else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ 381#else /* ! CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
381#define calc_cputime_factors() 382#define calc_cputime_factors()
382#endif 383#endif
383 384
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index a7648543c59e..0cc0ac07a55d 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -57,7 +57,7 @@ static u8 dtl_event_mask = 0x7;
57 */ 57 */
58static int dtl_buf_entries = N_DISPATCH_LOG; 58static int dtl_buf_entries = N_DISPATCH_LOG;
59 59
60#ifdef CONFIG_VIRT_CPU_ACCOUNTING 60#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
61struct dtl_ring { 61struct dtl_ring {
62 u64 write_index; 62 u64 write_index;
63 struct dtl_entry *write_ptr; 63 struct dtl_entry *write_ptr;
@@ -142,7 +142,7 @@ static u64 dtl_current_index(struct dtl *dtl)
142 return per_cpu(dtl_rings, dtl->cpu).write_index; 142 return per_cpu(dtl_rings, dtl->cpu).write_index;
143} 143}
144 144
145#else /* CONFIG_VIRT_CPU_ACCOUNTING */ 145#else /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
146 146
147static int dtl_start(struct dtl *dtl) 147static int dtl_start(struct dtl *dtl)
148{ 148{
@@ -188,7 +188,7 @@ static u64 dtl_current_index(struct dtl *dtl)
188{ 188{
189 return lppaca_of(dtl->cpu).dtl_idx; 189 return lppaca_of(dtl->cpu).dtl_idx;
190} 190}
191#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 191#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
192 192
193static int dtl_enable(struct dtl *dtl) 193static int dtl_enable(struct dtl *dtl)
194{ 194{
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ca55882465d6..527e12c9573b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -281,7 +281,7 @@ static struct notifier_block pci_dn_reconfig_nb = {
281 281
282struct kmem_cache *dtl_cache; 282struct kmem_cache *dtl_cache;
283 283
284#ifdef CONFIG_VIRT_CPU_ACCOUNTING 284#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
285/* 285/*
286 * Allocate space for the dispatch trace log for all possible cpus 286 * Allocate space for the dispatch trace log for all possible cpus
287 * and register the buffers with the hypervisor. This is used for 287 * and register the buffers with the hypervisor. This is used for
@@ -332,12 +332,12 @@ static int alloc_dispatch_logs(void)
332 332
333 return 0; 333 return 0;
334} 334}
335#else /* !CONFIG_VIRT_CPU_ACCOUNTING */ 335#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
336static inline int alloc_dispatch_logs(void) 336static inline int alloc_dispatch_logs(void)
337{ 337{
338 return 0; 338 return 0;
339} 339}
340#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ 340#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
341 341
342static int alloc_dispatch_log_kmem_cache(void) 342static int alloc_dispatch_log_kmem_cache(void)
343{ 343{
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index e84b8b68444a..ce9cc5aa2033 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -127,7 +127,7 @@ void vtime_account_user(struct task_struct *tsk)
127 * Update process times based on virtual cpu times stored by entry.S 127 * Update process times based on virtual cpu times stored by entry.S
128 * to the lowcore fields user_timer, system_timer & steal_clock. 128 * to the lowcore fields user_timer, system_timer & steal_clock.
129 */ 129 */
130void vtime_account(struct task_struct *tsk) 130void vtime_account_irq_enter(struct task_struct *tsk)
131{ 131{
132 struct thread_info *ti = task_thread_info(tsk); 132 struct thread_info *ti = task_thread_info(tsk);
133 u64 timer, system; 133 u64 timer, system;
@@ -145,10 +145,10 @@ void vtime_account(struct task_struct *tsk)
145 145
146 virt_timer_forward(system); 146 virt_timer_forward(system);
147} 147}
148EXPORT_SYMBOL_GPL(vtime_account); 148EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
149 149
150void vtime_account_system(struct task_struct *tsk) 150void vtime_account_system(struct task_struct *tsk)
151__attribute__((alias("vtime_account"))); 151__attribute__((alias("vtime_account_irq_enter")));
152EXPORT_SYMBOL_GPL(vtime_account_system); 152EXPORT_SYMBOL_GPL(vtime_account_system);
153 153
154void __kprobes vtime_stop_cpu(void) 154void __kprobes vtime_stop_cpu(void)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index d65464e43503..8d7012b7f402 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -899,6 +899,7 @@ static void apm_cpu_idle(void)
899 static int use_apm_idle; /* = 0 */ 899 static int use_apm_idle; /* = 0 */
900 static unsigned int last_jiffies; /* = 0 */ 900 static unsigned int last_jiffies; /* = 0 */
901 static unsigned int last_stime; /* = 0 */ 901 static unsigned int last_stime; /* = 0 */
902 cputime_t stime;
902 903
903 int apm_idle_done = 0; 904 int apm_idle_done = 0;
904 unsigned int jiffies_since_last_check = jiffies - last_jiffies; 905 unsigned int jiffies_since_last_check = jiffies - last_jiffies;
@@ -906,23 +907,23 @@ static void apm_cpu_idle(void)
906 907
907 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012"); 908 WARN_ONCE(1, "deprecated apm_cpu_idle will be deleted in 2012");
908recalc: 909recalc:
910 task_cputime(current, NULL, &stime);
909 if (jiffies_since_last_check > IDLE_CALC_LIMIT) { 911 if (jiffies_since_last_check > IDLE_CALC_LIMIT) {
910 use_apm_idle = 0; 912 use_apm_idle = 0;
911 last_jiffies = jiffies;
912 last_stime = current->stime;
913 } else if (jiffies_since_last_check > idle_period) { 913 } else if (jiffies_since_last_check > idle_period) {
914 unsigned int idle_percentage; 914 unsigned int idle_percentage;
915 915
916 idle_percentage = current->stime - last_stime; 916 idle_percentage = stime - last_stime;
917 idle_percentage *= 100; 917 idle_percentage *= 100;
918 idle_percentage /= jiffies_since_last_check; 918 idle_percentage /= jiffies_since_last_check;
919 use_apm_idle = (idle_percentage > idle_threshold); 919 use_apm_idle = (idle_percentage > idle_threshold);
920 if (apm_info.forbid_idle) 920 if (apm_info.forbid_idle)
921 use_apm_idle = 0; 921 use_apm_idle = 0;
922 last_jiffies = jiffies;
923 last_stime = current->stime;
924 } 922 }
925 923
924 last_jiffies = jiffies;
925 last_stime = stime;
926
926 bucket = IDLE_LEAKY_MAX; 927 bucket = IDLE_LEAKY_MAX;
927 928
928 while (!need_resched()) { 929 while (!need_resched()) {
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 5f21f629b7ae..deda591f70b9 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/mISDNif.h> 19#include <linux/mISDNif.h>
20#include <linux/kthread.h> 20#include <linux/kthread.h>
21#include <linux/sched.h>
21#include "core.h" 22#include "core.h"
22 23
23static u_int *debug; 24static u_int *debug;
@@ -202,6 +203,9 @@ static int
202mISDNStackd(void *data) 203mISDNStackd(void *data)
203{ 204{
204 struct mISDNstack *st = data; 205 struct mISDNstack *st = data;
206#ifdef MISDN_MSG_STATS
207 cputime_t utime, stime;
208#endif
205 int err = 0; 209 int err = 0;
206 210
207 sigfillset(&current->blocked); 211 sigfillset(&current->blocked);
@@ -303,9 +307,10 @@ mISDNStackd(void *data)
303 "msg %d sleep %d stopped\n", 307 "msg %d sleep %d stopped\n",
304 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt, 308 dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
305 st->stopped_cnt); 309 st->stopped_cnt);
310 task_cputime(st->thread, &utime, &stime);
306 printk(KERN_DEBUG 311 printk(KERN_DEBUG
307 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n", 312 "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
308 dev_name(&st->dev->dev), st->thread->utime, st->thread->stime); 313 dev_name(&st->dev->dev), utime, stime);
309 printk(KERN_DEBUG 314 printk(KERN_DEBUG
310 "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n", 315 "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
311 dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw); 316 dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 0c42cdbabecf..49d0b43458b7 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -33,6 +33,7 @@
33#include <linux/elf.h> 33#include <linux/elf.h>
34#include <linux/utsname.h> 34#include <linux/utsname.h>
35#include <linux/coredump.h> 35#include <linux/coredump.h>
36#include <linux/sched.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/param.h> 38#include <asm/param.h>
38#include <asm/page.h> 39#include <asm/page.h>
@@ -1320,8 +1321,11 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
1320 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1321 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1321 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1322 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1322 } else { 1323 } else {
1323 cputime_to_timeval(p->utime, &prstatus->pr_utime); 1324 cputime_t utime, stime;
1324 cputime_to_timeval(p->stime, &prstatus->pr_stime); 1325
1326 task_cputime(p, &utime, &stime);
1327 cputime_to_timeval(utime, &prstatus->pr_utime);
1328 cputime_to_timeval(stime, &prstatus->pr_stime);
1325 } 1329 }
1326 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); 1330 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1327 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); 1331 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index dc84732e554f..cb240dd3b402 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -1375,8 +1375,11 @@ static void fill_prstatus(struct elf_prstatus *prstatus,
1375 cputime_to_timeval(cputime.utime, &prstatus->pr_utime); 1375 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1376 cputime_to_timeval(cputime.stime, &prstatus->pr_stime); 1376 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
1377 } else { 1377 } else {
1378 cputime_to_timeval(p->utime, &prstatus->pr_utime); 1378 cputime_t utime, stime;
1379 cputime_to_timeval(p->stime, &prstatus->pr_stime); 1379
1380 task_cputime(p, &utime, &stime);
1381 cputime_to_timeval(utime, &prstatus->pr_utime);
1382 cputime_to_timeval(stime, &prstatus->pr_stime);
1380 } 1383 }
1381 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime); 1384 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1382 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime); 1385 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 6a91e6ffbcbd..f7ed9ee46eb9 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -449,7 +449,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
449 do { 449 do {
450 min_flt += t->min_flt; 450 min_flt += t->min_flt;
451 maj_flt += t->maj_flt; 451 maj_flt += t->maj_flt;
452 gtime += t->gtime; 452 gtime += task_gtime(t);
453 t = next_thread(t); 453 t = next_thread(t);
454 } while (t != task); 454 } while (t != task);
455 455
@@ -472,7 +472,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
472 min_flt = task->min_flt; 472 min_flt = task->min_flt;
473 maj_flt = task->maj_flt; 473 maj_flt = task->maj_flt;
474 task_cputime_adjusted(task, &utime, &stime); 474 task_cputime_adjusted(task, &utime, &stime);
475 gtime = task->gtime; 475 gtime = task_gtime(task);
476 } 476 }
477 477
478 /* scale priority and nice values from timeslices to -20..20 */ 478 /* scale priority and nice values from timeslices to -20..20 */
diff --git a/include/asm-generic/cputime.h b/include/asm-generic/cputime.h
index 9a62937c56ca..51969436b8b8 100644
--- a/include/asm-generic/cputime.h
+++ b/include/asm-generic/cputime.h
@@ -4,66 +4,12 @@
4#include <linux/time.h> 4#include <linux/time.h>
5#include <linux/jiffies.h> 5#include <linux/jiffies.h>
6 6
7typedef unsigned long __nocast cputime_t; 7#ifndef CONFIG_VIRT_CPU_ACCOUNTING
8 8# include <asm-generic/cputime_jiffies.h>
9#define cputime_one_jiffy jiffies_to_cputime(1) 9#endif
10#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
11#define cputime_to_scaled(__ct) (__ct)
12#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
13
14typedef u64 __nocast cputime64_t;
15
16#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
17#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
18
19#define nsecs_to_cputime64(__ct) \
20 jiffies64_to_cputime64(nsecs_to_jiffies64(__ct))
21
22
23/*
24 * Convert cputime to microseconds and back.
25 */
26#define cputime_to_usecs(__ct) \
27 jiffies_to_usecs(cputime_to_jiffies(__ct))
28#define usecs_to_cputime(__usec) \
29 jiffies_to_cputime(usecs_to_jiffies(__usec))
30#define usecs_to_cputime64(__usec) \
31 jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
32
33/*
34 * Convert cputime to seconds and back.
35 */
36#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
37#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
38
39/*
40 * Convert cputime to timespec and back.
41 */
42#define timespec_to_cputime(__val) \
43 jiffies_to_cputime(timespec_to_jiffies(__val))
44#define cputime_to_timespec(__ct,__val) \
45 jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
46
47/*
48 * Convert cputime to timeval and back.
49 */
50#define timeval_to_cputime(__val) \
51 jiffies_to_cputime(timeval_to_jiffies(__val))
52#define cputime_to_timeval(__ct,__val) \
53 jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
54
55/*
56 * Convert cputime to clock and back.
57 */
58#define cputime_to_clock_t(__ct) \
59 jiffies_to_clock_t(cputime_to_jiffies(__ct))
60#define clock_t_to_cputime(__x) \
61 jiffies_to_cputime(clock_t_to_jiffies(__x))
62 10
63/* 11#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
64 * Convert cputime64 to clock. 12# include <asm-generic/cputime_nsecs.h>
65 */ 13#endif
66#define cputime64_to_clock_t(__ct) \
67 jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
68 14
69#endif 15#endif
diff --git a/include/asm-generic/cputime_jiffies.h b/include/asm-generic/cputime_jiffies.h
new file mode 100644
index 000000000000..272ecba9f588
--- /dev/null
+++ b/include/asm-generic/cputime_jiffies.h
@@ -0,0 +1,72 @@
1#ifndef _ASM_GENERIC_CPUTIME_JIFFIES_H
2#define _ASM_GENERIC_CPUTIME_JIFFIES_H
3
4typedef unsigned long __nocast cputime_t;
5
6#define cputime_one_jiffy jiffies_to_cputime(1)
7#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
8#define cputime_to_scaled(__ct) (__ct)
9#define jiffies_to_cputime(__hz) (__force cputime_t)(__hz)
10
11typedef u64 __nocast cputime64_t;
12
13#define cputime64_to_jiffies64(__ct) (__force u64)(__ct)
14#define jiffies64_to_cputime64(__jif) (__force cputime64_t)(__jif)
15
16
17/*
18 * Convert nanoseconds to cputime
19 */
20#define nsecs_to_cputime64(__nsec) \
21 jiffies64_to_cputime64(nsecs_to_jiffies64(__nsec))
22#define nsecs_to_cputime(__nsec) \
23 jiffies_to_cputime(nsecs_to_jiffies(__nsec))
24
25
26/*
27 * Convert cputime to microseconds and back.
28 */
29#define cputime_to_usecs(__ct) \
30 jiffies_to_usecs(cputime_to_jiffies(__ct))
31#define usecs_to_cputime(__usec) \
32 jiffies_to_cputime(usecs_to_jiffies(__usec))
33#define usecs_to_cputime64(__usec) \
34 jiffies64_to_cputime64(nsecs_to_jiffies64((__usec) * 1000))
35
36/*
37 * Convert cputime to seconds and back.
38 */
39#define cputime_to_secs(jif) (cputime_to_jiffies(jif) / HZ)
40#define secs_to_cputime(sec) jiffies_to_cputime((sec) * HZ)
41
42/*
43 * Convert cputime to timespec and back.
44 */
45#define timespec_to_cputime(__val) \
46 jiffies_to_cputime(timespec_to_jiffies(__val))
47#define cputime_to_timespec(__ct,__val) \
48 jiffies_to_timespec(cputime_to_jiffies(__ct),__val)
49
50/*
51 * Convert cputime to timeval and back.
52 */
53#define timeval_to_cputime(__val) \
54 jiffies_to_cputime(timeval_to_jiffies(__val))
55#define cputime_to_timeval(__ct,__val) \
56 jiffies_to_timeval(cputime_to_jiffies(__ct),__val)
57
58/*
59 * Convert cputime to clock and back.
60 */
61#define cputime_to_clock_t(__ct) \
62 jiffies_to_clock_t(cputime_to_jiffies(__ct))
63#define clock_t_to_cputime(__x) \
64 jiffies_to_cputime(clock_t_to_jiffies(__x))
65
66/*
67 * Convert cputime64 to clock.
68 */
69#define cputime64_to_clock_t(__ct) \
70 jiffies_64_to_clock_t(cputime64_to_jiffies64(__ct))
71
72#endif
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
new file mode 100644
index 000000000000..b6485cafb7bd
--- /dev/null
+++ b/include/asm-generic/cputime_nsecs.h
@@ -0,0 +1,104 @@
1/*
2 * Definitions for measuring cputime in nsecs resolution.
3 *
4 * Based on <arch/ia64/include/asm/cputime.h>
5 *
6 * Copyright (C) 2007 FUJITSU LIMITED
7 * Copyright (C) 2007 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 */
15
16#ifndef _ASM_GENERIC_CPUTIME_NSECS_H
17#define _ASM_GENERIC_CPUTIME_NSECS_H
18
19typedef u64 __nocast cputime_t;
20typedef u64 __nocast cputime64_t;
21
22#define cputime_one_jiffy jiffies_to_cputime(1)
23
24/*
25 * Convert cputime <-> jiffies (HZ)
26 */
27#define cputime_to_jiffies(__ct) \
28 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
29#define cputime_to_scaled(__ct) (__ct)
30#define jiffies_to_cputime(__jif) \
31 (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
32#define cputime64_to_jiffies64(__ct) \
33 ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
34#define jiffies64_to_cputime64(__jif) \
35 (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
36
37
38/*
39 * Convert cputime <-> nanoseconds
40 */
41#define nsecs_to_cputime(__nsecs) ((__force u64)(__nsecs))
42
43
44/*
45 * Convert cputime <-> microseconds
46 */
47#define cputime_to_usecs(__ct) \
48 ((__force u64)(__ct) / NSEC_PER_USEC)
49#define usecs_to_cputime(__usecs) \
50 (__force cputime_t)((__usecs) * NSEC_PER_USEC)
51#define usecs_to_cputime64(__usecs) \
52 (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
53
54/*
55 * Convert cputime <-> seconds
56 */
57#define cputime_to_secs(__ct) \
58 ((__force u64)(__ct) / NSEC_PER_SEC)
59#define secs_to_cputime(__secs) \
60 (__force cputime_t)((__secs) * NSEC_PER_SEC)
61
62/*
63 * Convert cputime <-> timespec (nsec)
64 */
65static inline cputime_t timespec_to_cputime(const struct timespec *val)
66{
67 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
68 return (__force cputime_t) ret;
69}
70static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
71{
72 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
73 val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
74}
75
76/*
77 * Convert cputime <-> timeval (msec)
78 */
79static inline cputime_t timeval_to_cputime(struct timeval *val)
80{
81 u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
82 return (__force cputime_t) ret;
83}
84static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
85{
86 val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
87 val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
88}
89
90/*
91 * Convert cputime <-> clock (USER_HZ)
92 */
93#define cputime_to_clock_t(__ct) \
94 ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
95#define clock_t_to_cputime(__x) \
96 (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
97
98/*
99 * Convert cputime64 to clock.
100 */
101#define cputime64_to_clock_t(__ct) \
102 cputime_to_clock_t((__force cputime_t)__ct)
103
104#endif
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index e24339ccb7f0..b28d161c1091 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -3,12 +3,40 @@
3 3
4#ifdef CONFIG_CONTEXT_TRACKING 4#ifdef CONFIG_CONTEXT_TRACKING
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <linux/percpu.h>
7
8struct context_tracking {
9 /*
10 * When active is false, probes are unset in order
11 * to minimize overhead: TIF flags are cleared
12 * and calls to user_enter/exit are ignored. This
13 * may be further optimized using static keys.
14 */
15 bool active;
16 enum {
17 IN_KERNEL = 0,
18 IN_USER,
19 } state;
20};
21
22DECLARE_PER_CPU(struct context_tracking, context_tracking);
23
24static inline bool context_tracking_in_user(void)
25{
26 return __this_cpu_read(context_tracking.state) == IN_USER;
27}
28
29static inline bool context_tracking_active(void)
30{
31 return __this_cpu_read(context_tracking.active);
32}
6 33
7extern void user_enter(void); 34extern void user_enter(void);
8extern void user_exit(void); 35extern void user_exit(void);
9extern void context_tracking_task_switch(struct task_struct *prev, 36extern void context_tracking_task_switch(struct task_struct *prev,
10 struct task_struct *next); 37 struct task_struct *next);
11#else 38#else
39static inline bool context_tracking_in_user(void) { return false; }
12static inline void user_enter(void) { } 40static inline void user_enter(void) { }
13static inline void user_exit(void) { } 41static inline void user_exit(void) { }
14static inline void context_tracking_task_switch(struct task_struct *prev, 42static inline void context_tracking_task_switch(struct task_struct *prev,
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 624ef3f45c8e..7105d5cbb762 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -153,7 +153,7 @@ extern void rcu_nmi_exit(void);
153 */ 153 */
154#define __irq_enter() \ 154#define __irq_enter() \
155 do { \ 155 do { \
156 vtime_account_irq_enter(current); \ 156 account_irq_enter_time(current); \
157 add_preempt_count(HARDIRQ_OFFSET); \ 157 add_preempt_count(HARDIRQ_OFFSET); \
158 trace_hardirq_enter(); \ 158 trace_hardirq_enter(); \
159 } while (0) 159 } while (0)
@@ -169,7 +169,7 @@ extern void irq_enter(void);
169#define __irq_exit() \ 169#define __irq_exit() \
170 do { \ 170 do { \
171 trace_hardirq_exit(); \ 171 trace_hardirq_exit(); \
172 vtime_account_irq_exit(current); \ 172 account_irq_exit_time(current); \
173 sub_preempt_count(HARDIRQ_OFFSET); \ 173 sub_preempt_count(HARDIRQ_OFFSET); \
174 } while (0) 174 } while (0)
175 175
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 6d087c5f57f7..cc898b871cef 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -10,6 +10,7 @@
10#include <linux/pid_namespace.h> 10#include <linux/pid_namespace.h>
11#include <linux/user_namespace.h> 11#include <linux/user_namespace.h>
12#include <linux/securebits.h> 12#include <linux/securebits.h>
13#include <linux/seqlock.h>
13#include <net/net_namespace.h> 14#include <net/net_namespace.h>
14 15
15#ifdef CONFIG_SMP 16#ifdef CONFIG_SMP
@@ -141,6 +142,15 @@ extern struct task_group root_task_group;
141# define INIT_PERF_EVENTS(tsk) 142# define INIT_PERF_EVENTS(tsk)
142#endif 143#endif
143 144
145#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
146# define INIT_VTIME(tsk) \
147 .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \
148 .vtime_snap = 0, \
149 .vtime_snap_whence = VTIME_SYS,
150#else
151# define INIT_VTIME(tsk)
152#endif
153
144#define INIT_TASK_COMM "swapper" 154#define INIT_TASK_COMM "swapper"
145 155
146/* 156/*
@@ -210,6 +220,7 @@ extern struct task_group root_task_group;
210 INIT_TRACE_RECURSION \ 220 INIT_TRACE_RECURSION \
211 INIT_TASK_RCU_PREEMPT(tsk) \ 221 INIT_TASK_RCU_PREEMPT(tsk) \
212 INIT_CPUSET_SEQ \ 222 INIT_CPUSET_SEQ \
223 INIT_VTIME(tsk) \
213} 224}
214 225
215 226
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 66b70780e910..ed5f6ed6eb77 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -127,7 +127,7 @@ extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t)
127extern void account_steal_time(cputime_t); 127extern void account_steal_time(cputime_t);
128extern void account_idle_time(cputime_t); 128extern void account_idle_time(cputime_t);
129 129
130#ifdef CONFIG_VIRT_CPU_ACCOUNTING 130#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
131static inline void account_process_tick(struct task_struct *tsk, int user) 131static inline void account_process_tick(struct task_struct *tsk, int user)
132{ 132{
133 vtime_account_user(tsk); 133 vtime_account_user(tsk);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 2c497ab0d03d..b7996a768eb2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -22,6 +22,7 @@
22#include <linux/rcupdate.h> 22#include <linux/rcupdate.h>
23#include <linux/ratelimit.h> 23#include <linux/ratelimit.h>
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/irqflags.h>
25#include <asm/signal.h> 26#include <asm/signal.h>
26 27
27#include <linux/kvm.h> 28#include <linux/kvm.h>
@@ -740,15 +741,52 @@ static inline int kvm_deassign_device(struct kvm *kvm,
740} 741}
741#endif /* CONFIG_IOMMU_API */ 742#endif /* CONFIG_IOMMU_API */
742 743
743static inline void kvm_guest_enter(void) 744static inline void __guest_enter(void)
744{ 745{
745 BUG_ON(preemptible());
746 /* 746 /*
747 * This is running in ioctl context so we can avoid 747 * This is running in ioctl context so we can avoid
748 * the call to vtime_account() with its unnecessary idle check. 748 * the call to vtime_account() with its unnecessary idle check.
749 */ 749 */
750 vtime_account_system_irqsafe(current); 750 vtime_account_system(current);
751 current->flags |= PF_VCPU; 751 current->flags |= PF_VCPU;
752}
753
754static inline void __guest_exit(void)
755{
756 /*
757 * This is running in ioctl context so we can avoid
758 * the call to vtime_account() with its unnecessary idle check.
759 */
760 vtime_account_system(current);
761 current->flags &= ~PF_VCPU;
762}
763
764#ifdef CONFIG_CONTEXT_TRACKING
765extern void guest_enter(void);
766extern void guest_exit(void);
767
768#else /* !CONFIG_CONTEXT_TRACKING */
769static inline void guest_enter(void)
770{
771 __guest_enter();
772}
773
774static inline void guest_exit(void)
775{
776 __guest_exit();
777}
778#endif /* !CONFIG_CONTEXT_TRACKING */
779
780static inline void kvm_guest_enter(void)
781{
782 unsigned long flags;
783
784 BUG_ON(preemptible());
785
786 local_irq_save(flags);
787 guest_enter();
788 local_irq_restore(flags);
789
752 /* KVM does not hold any references to rcu protected data when it 790 /* KVM does not hold any references to rcu protected data when it
753 * switches CPU into a guest mode. In fact switching to a guest mode 791 * switches CPU into a guest mode. In fact switching to a guest mode
754 * is very similar to exiting to userspase from rcu point of view. In 792 * is very similar to exiting to userspase from rcu point of view. In
@@ -761,12 +799,11 @@ static inline void kvm_guest_enter(void)
761 799
762static inline void kvm_guest_exit(void) 800static inline void kvm_guest_exit(void)
763{ 801{
764 /* 802 unsigned long flags;
765 * This is running in ioctl context so we can avoid 803
766 * the call to vtime_account() with its unnecessary idle check. 804 local_irq_save(flags);
767 */ 805 guest_exit();
768 vtime_account_system_irqsafe(current); 806 local_irq_restore(flags);
769 current->flags &= ~PF_VCPU;
770} 807}
771 808
772/* 809/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 924e42a8df58..719ee0815e3a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1369,6 +1369,15 @@ struct task_struct {
1369#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1369#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1370 struct cputime prev_cputime; 1370 struct cputime prev_cputime;
1371#endif 1371#endif
1372#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1373 seqlock_t vtime_seqlock;
1374 unsigned long long vtime_snap;
1375 enum {
1376 VTIME_SLEEPING = 0,
1377 VTIME_USER,
1378 VTIME_SYS,
1379 } vtime_snap_whence;
1380#endif
1372 unsigned long nvcsw, nivcsw; /* context switch counts */ 1381 unsigned long nvcsw, nivcsw; /* context switch counts */
1373 struct timespec start_time; /* monotonic time */ 1382 struct timespec start_time; /* monotonic time */
1374 struct timespec real_start_time; /* boot based time */ 1383 struct timespec real_start_time; /* boot based time */
@@ -1793,6 +1802,37 @@ static inline void put_task_struct(struct task_struct *t)
1793 __put_task_struct(t); 1802 __put_task_struct(t);
1794} 1803}
1795 1804
1805#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1806extern void task_cputime(struct task_struct *t,
1807 cputime_t *utime, cputime_t *stime);
1808extern void task_cputime_scaled(struct task_struct *t,
1809 cputime_t *utimescaled, cputime_t *stimescaled);
1810extern cputime_t task_gtime(struct task_struct *t);
1811#else
1812static inline void task_cputime(struct task_struct *t,
1813 cputime_t *utime, cputime_t *stime)
1814{
1815 if (utime)
1816 *utime = t->utime;
1817 if (stime)
1818 *stime = t->stime;
1819}
1820
1821static inline void task_cputime_scaled(struct task_struct *t,
1822 cputime_t *utimescaled,
1823 cputime_t *stimescaled)
1824{
1825 if (utimescaled)
1826 *utimescaled = t->utimescaled;
1827 if (stimescaled)
1828 *stimescaled = t->stimescaled;
1829}
1830
1831static inline cputime_t task_gtime(struct task_struct *t)
1832{
1833 return t->gtime;
1834}
1835#endif
1796extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1836extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1797extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st); 1837extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1798 1838
diff --git a/include/linux/tsacct_kern.h b/include/linux/tsacct_kern.h
index 44893e5ec8f7..3251965bf4cc 100644
--- a/include/linux/tsacct_kern.h
+++ b/include/linux/tsacct_kern.h
@@ -23,12 +23,15 @@ static inline void bacct_add_tsk(struct user_namespace *user_ns,
23#ifdef CONFIG_TASK_XACCT 23#ifdef CONFIG_TASK_XACCT
24extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p); 24extern void xacct_add_tsk(struct taskstats *stats, struct task_struct *p);
25extern void acct_update_integrals(struct task_struct *tsk); 25extern void acct_update_integrals(struct task_struct *tsk);
26extern void acct_account_cputime(struct task_struct *tsk);
26extern void acct_clear_integrals(struct task_struct *tsk); 27extern void acct_clear_integrals(struct task_struct *tsk);
27#else 28#else
28static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) 29static inline void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
29{} 30{}
30static inline void acct_update_integrals(struct task_struct *tsk) 31static inline void acct_update_integrals(struct task_struct *tsk)
31{} 32{}
33static inline void acct_account_cputime(struct task_struct *tsk)
34{}
32static inline void acct_clear_integrals(struct task_struct *tsk) 35static inline void acct_clear_integrals(struct task_struct *tsk)
33{} 36{}
34#endif /* CONFIG_TASK_XACCT */ 37#endif /* CONFIG_TASK_XACCT */
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index ae30ab58431a..71a5782d8c59 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -6,15 +6,46 @@ struct task_struct;
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING 6#ifdef CONFIG_VIRT_CPU_ACCOUNTING
7extern void vtime_task_switch(struct task_struct *prev); 7extern void vtime_task_switch(struct task_struct *prev);
8extern void vtime_account_system(struct task_struct *tsk); 8extern void vtime_account_system(struct task_struct *tsk);
9extern void vtime_account_system_irqsafe(struct task_struct *tsk);
10extern void vtime_account_idle(struct task_struct *tsk); 9extern void vtime_account_idle(struct task_struct *tsk);
11extern void vtime_account_user(struct task_struct *tsk); 10extern void vtime_account_user(struct task_struct *tsk);
12extern void vtime_account(struct task_struct *tsk); 11extern void vtime_account_irq_enter(struct task_struct *tsk);
13#else 12
13#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
14static inline bool vtime_accounting_enabled(void) { return true; }
15#endif
16
17#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
18
14static inline void vtime_task_switch(struct task_struct *prev) { } 19static inline void vtime_task_switch(struct task_struct *prev) { }
15static inline void vtime_account_system(struct task_struct *tsk) { } 20static inline void vtime_account_system(struct task_struct *tsk) { }
16static inline void vtime_account_system_irqsafe(struct task_struct *tsk) { } 21static inline void vtime_account_user(struct task_struct *tsk) { }
17static inline void vtime_account(struct task_struct *tsk) { } 22static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
23static inline bool vtime_accounting_enabled(void) { return false; }
24#endif
25
26#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
27extern void arch_vtime_task_switch(struct task_struct *tsk);
28extern void vtime_account_irq_exit(struct task_struct *tsk);
29extern bool vtime_accounting_enabled(void);
30extern void vtime_user_enter(struct task_struct *tsk);
31static inline void vtime_user_exit(struct task_struct *tsk)
32{
33 vtime_account_user(tsk);
34}
35extern void vtime_guest_enter(struct task_struct *tsk);
36extern void vtime_guest_exit(struct task_struct *tsk);
37extern void vtime_init_idle(struct task_struct *tsk);
38#else
39static inline void vtime_account_irq_exit(struct task_struct *tsk)
40{
41 /* On hard|softirq exit we always account to hard|softirq cputime */
42 vtime_account_system(tsk);
43}
44static inline void vtime_user_enter(struct task_struct *tsk) { }
45static inline void vtime_user_exit(struct task_struct *tsk) { }
46static inline void vtime_guest_enter(struct task_struct *tsk) { }
47static inline void vtime_guest_exit(struct task_struct *tsk) { }
48static inline void vtime_init_idle(struct task_struct *tsk) { }
18#endif 49#endif
19 50
20#ifdef CONFIG_IRQ_TIME_ACCOUNTING 51#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -23,25 +54,15 @@ extern void irqtime_account_irq(struct task_struct *tsk);
23static inline void irqtime_account_irq(struct task_struct *tsk) { } 54static inline void irqtime_account_irq(struct task_struct *tsk) { }
24#endif 55#endif
25 56
26static inline void vtime_account_irq_enter(struct task_struct *tsk) 57static inline void account_irq_enter_time(struct task_struct *tsk)
27{ 58{
28 /* 59 vtime_account_irq_enter(tsk);
29 * Hardirq can interrupt idle task anytime. So we need vtime_account()
30 * that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING.
31 * Softirq can also interrupt idle task directly if it calls
32 * local_bh_enable(). Such case probably don't exist but we never know.
33 * Ksoftirqd is not concerned because idle time is flushed on context
34 * switch. Softirqs in the end of hardirqs are also not a problem because
35 * the idle time is flushed on hardirq time already.
36 */
37 vtime_account(tsk);
38 irqtime_account_irq(tsk); 60 irqtime_account_irq(tsk);
39} 61}
40 62
41static inline void vtime_account_irq_exit(struct task_struct *tsk) 63static inline void account_irq_exit_time(struct task_struct *tsk)
42{ 64{
43 /* On hard|softirq exit we always account to hard|softirq cputime */ 65 vtime_account_irq_exit(tsk);
44 vtime_account_system(tsk);
45 irqtime_account_irq(tsk); 66 irqtime_account_irq(tsk);
46} 67}
47 68
diff --git a/init/Kconfig b/init/Kconfig
index be8b7f55312d..a05f843e7e52 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -326,6 +326,9 @@ source "kernel/time/Kconfig"
326 326
327menu "CPU/Task time and stats accounting" 327menu "CPU/Task time and stats accounting"
328 328
329config VIRT_CPU_ACCOUNTING
330 bool
331
329choice 332choice
330 prompt "Cputime accounting" 333 prompt "Cputime accounting"
331 default TICK_CPU_ACCOUNTING if !PPC64 334 default TICK_CPU_ACCOUNTING if !PPC64
@@ -342,9 +345,10 @@ config TICK_CPU_ACCOUNTING
342 345
343 If unsure, say Y. 346 If unsure, say Y.
344 347
345config VIRT_CPU_ACCOUNTING 348config VIRT_CPU_ACCOUNTING_NATIVE
346 bool "Deterministic task and CPU time accounting" 349 bool "Deterministic task and CPU time accounting"
347 depends on HAVE_VIRT_CPU_ACCOUNTING 350 depends on HAVE_VIRT_CPU_ACCOUNTING
351 select VIRT_CPU_ACCOUNTING
348 help 352 help
349 Select this option to enable more accurate task and CPU time 353 Select this option to enable more accurate task and CPU time
350 accounting. This is done by reading a CPU counter on each 354 accounting. This is done by reading a CPU counter on each
@@ -354,6 +358,23 @@ config VIRT_CPU_ACCOUNTING
354 this also enables accounting of stolen time on logically-partitioned 358 this also enables accounting of stolen time on logically-partitioned
355 systems. 359 systems.
356 360
361config VIRT_CPU_ACCOUNTING_GEN
362 bool "Full dynticks CPU time accounting"
363 depends on HAVE_CONTEXT_TRACKING && 64BIT
364 select VIRT_CPU_ACCOUNTING
365 select CONTEXT_TRACKING
366 help
367 Select this option to enable task and CPU time accounting on full
368 dynticks systems. This accounting is implemented by watching every
369 kernel-user boundaries using the context tracking subsystem.
370 The accounting is thus performed at the expense of some significant
371 overhead.
372
373 For now this is only useful if you are working on the full
374 dynticks subsystem development.
375
376 If unsure, say N.
377
357config IRQ_TIME_ACCOUNTING 378config IRQ_TIME_ACCOUNTING
358 bool "Fine granularity task level IRQ time accounting" 379 bool "Fine granularity task level IRQ time accounting"
359 depends on HAVE_IRQ_TIME_ACCOUNTING 380 depends on HAVE_IRQ_TIME_ACCOUNTING
diff --git a/kernel/acct.c b/kernel/acct.c
index 051e071a06e7..e8b1627ab9c7 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -566,6 +566,7 @@ out:
566void acct_collect(long exitcode, int group_dead) 566void acct_collect(long exitcode, int group_dead)
567{ 567{
568 struct pacct_struct *pacct = &current->signal->pacct; 568 struct pacct_struct *pacct = &current->signal->pacct;
569 cputime_t utime, stime;
569 unsigned long vsize = 0; 570 unsigned long vsize = 0;
570 571
571 if (group_dead && current->mm) { 572 if (group_dead && current->mm) {
@@ -593,8 +594,9 @@ void acct_collect(long exitcode, int group_dead)
593 pacct->ac_flag |= ACORE; 594 pacct->ac_flag |= ACORE;
594 if (current->flags & PF_SIGNALED) 595 if (current->flags & PF_SIGNALED)
595 pacct->ac_flag |= AXSIG; 596 pacct->ac_flag |= AXSIG;
596 pacct->ac_utime += current->utime; 597 task_cputime(current, &utime, &stime);
597 pacct->ac_stime += current->stime; 598 pacct->ac_utime += utime;
599 pacct->ac_stime += stime;
598 pacct->ac_minflt += current->min_flt; 600 pacct->ac_minflt += current->min_flt;
599 pacct->ac_majflt += current->maj_flt; 601 pacct->ac_majflt += current->maj_flt;
600 spin_unlock_irq(&current->sighand->siglock); 602 spin_unlock_irq(&current->sighand->siglock);
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index e0e07fd55508..74f68f4dc6c2 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -1,24 +1,11 @@
1#include <linux/context_tracking.h> 1#include <linux/context_tracking.h>
2#include <linux/kvm_host.h>
2#include <linux/rcupdate.h> 3#include <linux/rcupdate.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/percpu.h>
5#include <linux/hardirq.h> 5#include <linux/hardirq.h>
6#include <linux/export.h>
6 7
7struct context_tracking { 8DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
8 /*
9 * When active is false, hooks are not set to
10 * minimize overhead: TIF flags are cleared
11 * and calls to user_enter/exit are ignored. This
12 * may be further optimized using static keys.
13 */
14 bool active;
15 enum {
16 IN_KERNEL = 0,
17 IN_USER,
18 } state;
19};
20
21static DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
22#ifdef CONFIG_CONTEXT_TRACKING_FORCE 9#ifdef CONFIG_CONTEXT_TRACKING_FORCE
23 .active = true, 10 .active = true,
24#endif 11#endif
@@ -44,8 +31,9 @@ void user_enter(void)
44 local_irq_save(flags); 31 local_irq_save(flags);
45 if (__this_cpu_read(context_tracking.active) && 32 if (__this_cpu_read(context_tracking.active) &&
46 __this_cpu_read(context_tracking.state) != IN_USER) { 33 __this_cpu_read(context_tracking.state) != IN_USER) {
47 __this_cpu_write(context_tracking.state, IN_USER); 34 vtime_user_enter(current);
48 rcu_user_enter(); 35 rcu_user_enter();
36 __this_cpu_write(context_tracking.state, IN_USER);
49 } 37 }
50 local_irq_restore(flags); 38 local_irq_restore(flags);
51} 39}
@@ -67,12 +55,31 @@ void user_exit(void)
67 55
68 local_irq_save(flags); 56 local_irq_save(flags);
69 if (__this_cpu_read(context_tracking.state) == IN_USER) { 57 if (__this_cpu_read(context_tracking.state) == IN_USER) {
70 __this_cpu_write(context_tracking.state, IN_KERNEL);
71 rcu_user_exit(); 58 rcu_user_exit();
59 vtime_user_exit(current);
60 __this_cpu_write(context_tracking.state, IN_KERNEL);
72 } 61 }
73 local_irq_restore(flags); 62 local_irq_restore(flags);
74} 63}
75 64
65void guest_enter(void)
66{
67 if (vtime_accounting_enabled())
68 vtime_guest_enter(current);
69 else
70 __guest_enter();
71}
72EXPORT_SYMBOL_GPL(guest_enter);
73
74void guest_exit(void)
75{
76 if (vtime_accounting_enabled())
77 vtime_guest_exit(current);
78 else
79 __guest_exit();
80}
81EXPORT_SYMBOL_GPL(guest_exit);
82
76void context_tracking_task_switch(struct task_struct *prev, 83void context_tracking_task_switch(struct task_struct *prev,
77 struct task_struct *next) 84 struct task_struct *next)
78{ 85{
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 3046a503242c..e5d5e8e1e030 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -224,11 +224,13 @@ void clear_tasks_mm_cpumask(int cpu)
224static inline void check_for_tasks(int cpu) 224static inline void check_for_tasks(int cpu)
225{ 225{
226 struct task_struct *p; 226 struct task_struct *p;
227 cputime_t utime, stime;
227 228
228 write_lock_irq(&tasklist_lock); 229 write_lock_irq(&tasklist_lock);
229 for_each_process(p) { 230 for_each_process(p) {
231 task_cputime(p, &utime, &stime);
230 if (task_cpu(p) == cpu && p->state == TASK_RUNNING && 232 if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
231 (p->utime || p->stime)) 233 (utime || stime))
232 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " 234 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
233 "(state = %ld, flags = %x)\n", 235 "(state = %ld, flags = %x)\n",
234 p->comm, task_pid_nr(p), cpu, 236 p->comm, task_pid_nr(p), cpu,
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 418b3f7053aa..d473988c1d0b 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -106,6 +106,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
106 unsigned long long t2, t3; 106 unsigned long long t2, t3;
107 unsigned long flags; 107 unsigned long flags;
108 struct timespec ts; 108 struct timespec ts;
109 cputime_t utime, stime, stimescaled, utimescaled;
109 110
110 /* Though tsk->delays accessed later, early exit avoids 111 /* Though tsk->delays accessed later, early exit avoids
111 * unnecessary returning of other data 112 * unnecessary returning of other data
@@ -114,12 +115,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
114 goto done; 115 goto done;
115 116
116 tmp = (s64)d->cpu_run_real_total; 117 tmp = (s64)d->cpu_run_real_total;
117 cputime_to_timespec(tsk->utime + tsk->stime, &ts); 118 task_cputime(tsk, &utime, &stime);
119 cputime_to_timespec(utime + stime, &ts);
118 tmp += timespec_to_ns(&ts); 120 tmp += timespec_to_ns(&ts);
119 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 121 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
120 122
121 tmp = (s64)d->cpu_scaled_run_real_total; 123 tmp = (s64)d->cpu_scaled_run_real_total;
122 cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts); 124 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
125 cputime_to_timespec(utimescaled + stimescaled, &ts);
123 tmp += timespec_to_ns(&ts); 126 tmp += timespec_to_ns(&ts);
124 d->cpu_scaled_run_real_total = 127 d->cpu_scaled_run_real_total =
125 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; 128 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
diff --git a/kernel/exit.c b/kernel/exit.c
index b4df21937216..7dd20408707c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,6 +85,7 @@ static void __exit_signal(struct task_struct *tsk)
85 bool group_dead = thread_group_leader(tsk); 85 bool group_dead = thread_group_leader(tsk);
86 struct sighand_struct *sighand; 86 struct sighand_struct *sighand;
87 struct tty_struct *uninitialized_var(tty); 87 struct tty_struct *uninitialized_var(tty);
88 cputime_t utime, stime;
88 89
89 sighand = rcu_dereference_check(tsk->sighand, 90 sighand = rcu_dereference_check(tsk->sighand,
90 lockdep_tasklist_lock_is_held()); 91 lockdep_tasklist_lock_is_held());
@@ -123,9 +124,10 @@ static void __exit_signal(struct task_struct *tsk)
123 * We won't ever get here for the group leader, since it 124 * We won't ever get here for the group leader, since it
124 * will have been the last reference on the signal_struct. 125 * will have been the last reference on the signal_struct.
125 */ 126 */
126 sig->utime += tsk->utime; 127 task_cputime(tsk, &utime, &stime);
127 sig->stime += tsk->stime; 128 sig->utime += utime;
128 sig->gtime += tsk->gtime; 129 sig->stime += stime;
130 sig->gtime += task_gtime(tsk);
129 sig->min_flt += tsk->min_flt; 131 sig->min_flt += tsk->min_flt;
130 sig->maj_flt += tsk->maj_flt; 132 sig->maj_flt += tsk->maj_flt;
131 sig->nvcsw += tsk->nvcsw; 133 sig->nvcsw += tsk->nvcsw;
@@ -1092,7 +1094,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1092 sig = p->signal; 1094 sig = p->signal;
1093 psig->cutime += tgutime + sig->cutime; 1095 psig->cutime += tgutime + sig->cutime;
1094 psig->cstime += tgstime + sig->cstime; 1096 psig->cstime += tgstime + sig->cstime;
1095 psig->cgtime += p->gtime + sig->gtime + sig->cgtime; 1097 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1096 psig->cmin_flt += 1098 psig->cmin_flt +=
1097 p->min_flt + sig->min_flt + sig->cmin_flt; 1099 p->min_flt + sig->min_flt + sig->cmin_flt;
1098 psig->cmaj_flt += 1100 psig->cmaj_flt +=
diff --git a/kernel/fork.c b/kernel/fork.c
index c535f33bbb9c..4133876d8cd2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1233,6 +1233,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1233#ifndef CONFIG_VIRT_CPU_ACCOUNTING 1233#ifndef CONFIG_VIRT_CPU_ACCOUNTING
1234 p->prev_cputime.utime = p->prev_cputime.stime = 0; 1234 p->prev_cputime.utime = p->prev_cputime.stime = 0;
1235#endif 1235#endif
1236#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1237 seqlock_init(&p->vtime_seqlock);
1238 p->vtime_snap = 0;
1239 p->vtime_snap_whence = VTIME_SLEEPING;
1240#endif
1241
1236#if defined(SPLIT_RSS_COUNTING) 1242#if defined(SPLIT_RSS_COUNTING)
1237 memset(&p->rss_stat, 0, sizeof(p->rss_stat)); 1243 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1238#endif 1244#endif
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index a278cad1d5d6..165d47698477 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -155,11 +155,19 @@ static void bump_cpu_timer(struct k_itimer *timer,
155 155
156static inline cputime_t prof_ticks(struct task_struct *p) 156static inline cputime_t prof_ticks(struct task_struct *p)
157{ 157{
158 return p->utime + p->stime; 158 cputime_t utime, stime;
159
160 task_cputime(p, &utime, &stime);
161
162 return utime + stime;
159} 163}
160static inline cputime_t virt_ticks(struct task_struct *p) 164static inline cputime_t virt_ticks(struct task_struct *p)
161{ 165{
162 return p->utime; 166 cputime_t utime;
167
168 task_cputime(p, &utime, NULL);
169
170 return utime;
163} 171}
164 172
165static int 173static int
@@ -471,18 +479,23 @@ static void cleanup_timers(struct list_head *head,
471 */ 479 */
472void posix_cpu_timers_exit(struct task_struct *tsk) 480void posix_cpu_timers_exit(struct task_struct *tsk)
473{ 481{
482 cputime_t utime, stime;
483
474 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 484 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
475 sizeof(unsigned long long)); 485 sizeof(unsigned long long));
486 task_cputime(tsk, &utime, &stime);
476 cleanup_timers(tsk->cpu_timers, 487 cleanup_timers(tsk->cpu_timers,
477 tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); 488 utime, stime, tsk->se.sum_exec_runtime);
478 489
479} 490}
480void posix_cpu_timers_exit_group(struct task_struct *tsk) 491void posix_cpu_timers_exit_group(struct task_struct *tsk)
481{ 492{
482 struct signal_struct *const sig = tsk->signal; 493 struct signal_struct *const sig = tsk->signal;
494 cputime_t utime, stime;
483 495
496 task_cputime(tsk, &utime, &stime);
484 cleanup_timers(tsk->signal->cpu_timers, 497 cleanup_timers(tsk->signal->cpu_timers,
485 tsk->utime + sig->utime, tsk->stime + sig->stime, 498 utime + sig->utime, stime + sig->stime,
486 tsk->se.sum_exec_runtime + sig->sum_sched_runtime); 499 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
487} 500}
488 501
@@ -1226,11 +1239,14 @@ static inline int task_cputime_expired(const struct task_cputime *sample,
1226static inline int fastpath_timer_check(struct task_struct *tsk) 1239static inline int fastpath_timer_check(struct task_struct *tsk)
1227{ 1240{
1228 struct signal_struct *sig; 1241 struct signal_struct *sig;
1242 cputime_t utime, stime;
1243
1244 task_cputime(tsk, &utime, &stime);
1229 1245
1230 if (!task_cputime_zero(&tsk->cputime_expires)) { 1246 if (!task_cputime_zero(&tsk->cputime_expires)) {
1231 struct task_cputime task_sample = { 1247 struct task_cputime task_sample = {
1232 .utime = tsk->utime, 1248 .utime = utime,
1233 .stime = tsk->stime, 1249 .stime = stime,
1234 .sum_exec_runtime = tsk->se.sum_exec_runtime 1250 .sum_exec_runtime = tsk->se.sum_exec_runtime
1235 }; 1251 };
1236 1252
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c5b089df7ea8..1dff78a9e2ab 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4667,6 +4667,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
4667 */ 4667 */
4668 idle->sched_class = &idle_sched_class; 4668 idle->sched_class = &idle_sched_class;
4669 ftrace_graph_init_idle_task(idle, cpu); 4669 ftrace_graph_init_idle_task(idle, cpu);
4670 vtime_init_idle(idle);
4670#if defined(CONFIG_SMP) 4671#if defined(CONFIG_SMP)
4671 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); 4672 sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
4672#endif 4673#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 825a956ccdb6..ccff2752725a 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -3,6 +3,7 @@
3#include <linux/tsacct_kern.h> 3#include <linux/tsacct_kern.h>
4#include <linux/kernel_stat.h> 4#include <linux/kernel_stat.h>
5#include <linux/static_key.h> 5#include <linux/static_key.h>
6#include <linux/context_tracking.h>
6#include "sched.h" 7#include "sched.h"
7 8
8 9
@@ -163,7 +164,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
163 task_group_account_field(p, index, (__force u64) cputime); 164 task_group_account_field(p, index, (__force u64) cputime);
164 165
165 /* Account for user time used */ 166 /* Account for user time used */
166 acct_update_integrals(p); 167 acct_account_cputime(p);
167} 168}
168 169
169/* 170/*
@@ -213,7 +214,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
213 task_group_account_field(p, index, (__force u64) cputime); 214 task_group_account_field(p, index, (__force u64) cputime);
214 215
215 /* Account for system time used */ 216 /* Account for system time used */
216 acct_update_integrals(p); 217 acct_account_cputime(p);
217} 218}
218 219
219/* 220/*
@@ -295,6 +296,7 @@ static __always_inline bool steal_account_process_tick(void)
295void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) 296void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
296{ 297{
297 struct signal_struct *sig = tsk->signal; 298 struct signal_struct *sig = tsk->signal;
299 cputime_t utime, stime;
298 struct task_struct *t; 300 struct task_struct *t;
299 301
300 times->utime = sig->utime; 302 times->utime = sig->utime;
@@ -308,16 +310,15 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
308 310
309 t = tsk; 311 t = tsk;
310 do { 312 do {
311 times->utime += t->utime; 313 task_cputime(tsk, &utime, &stime);
312 times->stime += t->stime; 314 times->utime += utime;
315 times->stime += stime;
313 times->sum_exec_runtime += task_sched_runtime(t); 316 times->sum_exec_runtime += task_sched_runtime(t);
314 } while_each_thread(tsk, t); 317 } while_each_thread(tsk, t);
315out: 318out:
316 rcu_read_unlock(); 319 rcu_read_unlock();
317} 320}
318 321
319#ifndef CONFIG_VIRT_CPU_ACCOUNTING
320
321#ifdef CONFIG_IRQ_TIME_ACCOUNTING 322#ifdef CONFIG_IRQ_TIME_ACCOUNTING
322/* 323/*
323 * Account a tick to a process and cpustat 324 * Account a tick to a process and cpustat
@@ -382,11 +383,12 @@ static void irqtime_account_idle_ticks(int ticks)
382 irqtime_account_process_tick(current, 0, rq); 383 irqtime_account_process_tick(current, 0, rq);
383} 384}
384#else /* CONFIG_IRQ_TIME_ACCOUNTING */ 385#else /* CONFIG_IRQ_TIME_ACCOUNTING */
385static void irqtime_account_idle_ticks(int ticks) {} 386static inline void irqtime_account_idle_ticks(int ticks) {}
386static void irqtime_account_process_tick(struct task_struct *p, int user_tick, 387static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
387 struct rq *rq) {} 388 struct rq *rq) {}
388#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ 389#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
389 390
391#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
390/* 392/*
391 * Account a single tick of cpu time. 393 * Account a single tick of cpu time.
392 * @p: the process that the cpu time gets accounted to 394 * @p: the process that the cpu time gets accounted to
@@ -397,6 +399,9 @@ void account_process_tick(struct task_struct *p, int user_tick)
397 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); 399 cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
398 struct rq *rq = this_rq(); 400 struct rq *rq = this_rq();
399 401
402 if (vtime_accounting_enabled())
403 return;
404
400 if (sched_clock_irqtime) { 405 if (sched_clock_irqtime) {
401 irqtime_account_process_tick(p, user_tick, rq); 406 irqtime_account_process_tick(p, user_tick, rq);
402 return; 407 return;
@@ -438,8 +443,7 @@ void account_idle_ticks(unsigned long ticks)
438 443
439 account_idle_time(jiffies_to_cputime(ticks)); 444 account_idle_time(jiffies_to_cputime(ticks));
440} 445}
441 446#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
442#endif
443 447
444/* 448/*
445 * Use precise platform statistics if available: 449 * Use precise platform statistics if available:
@@ -461,25 +465,20 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
461 *st = cputime.stime; 465 *st = cputime.stime;
462} 466}
463 467
464void vtime_account_system_irqsafe(struct task_struct *tsk)
465{
466 unsigned long flags;
467
468 local_irq_save(flags);
469 vtime_account_system(tsk);
470 local_irq_restore(flags);
471}
472EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe);
473
474#ifndef __ARCH_HAS_VTIME_TASK_SWITCH 468#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
475void vtime_task_switch(struct task_struct *prev) 469void vtime_task_switch(struct task_struct *prev)
476{ 470{
471 if (!vtime_accounting_enabled())
472 return;
473
477 if (is_idle_task(prev)) 474 if (is_idle_task(prev))
478 vtime_account_idle(prev); 475 vtime_account_idle(prev);
479 else 476 else
480 vtime_account_system(prev); 477 vtime_account_system(prev);
481 478
479#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
482 vtime_account_user(prev); 480 vtime_account_user(prev);
481#endif
483 arch_vtime_task_switch(prev); 482 arch_vtime_task_switch(prev);
484} 483}
485#endif 484#endif
@@ -493,21 +492,34 @@ void vtime_task_switch(struct task_struct *prev)
493 * vtime_account(). 492 * vtime_account().
494 */ 493 */
495#ifndef __ARCH_HAS_VTIME_ACCOUNT 494#ifndef __ARCH_HAS_VTIME_ACCOUNT
496void vtime_account(struct task_struct *tsk) 495void vtime_account_irq_enter(struct task_struct *tsk)
497{ 496{
498 if (in_interrupt() || !is_idle_task(tsk)) 497 if (!vtime_accounting_enabled())
499 vtime_account_system(tsk); 498 return;
500 else 499
501 vtime_account_idle(tsk); 500 if (!in_interrupt()) {
501 /*
502 * If we interrupted user, context_tracking_in_user()
503 * is 1 because the context tracking don't hook
504 * on irq entry/exit. This way we know if
505 * we need to flush user time on kernel entry.
506 */
507 if (context_tracking_in_user()) {
508 vtime_account_user(tsk);
509 return;
510 }
511
512 if (is_idle_task(tsk)) {
513 vtime_account_idle(tsk);
514 return;
515 }
516 }
517 vtime_account_system(tsk);
502} 518}
503EXPORT_SYMBOL_GPL(vtime_account); 519EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
504#endif /* __ARCH_HAS_VTIME_ACCOUNT */ 520#endif /* __ARCH_HAS_VTIME_ACCOUNT */
505 521
506#else 522#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
507
508#ifndef nsecs_to_cputime
509# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
510#endif
511 523
512static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total) 524static cputime_t scale_stime(cputime_t stime, cputime_t rtime, cputime_t total)
513{ 525{
@@ -568,11 +580,10 @@ static void cputime_adjust(struct task_cputime *curr,
568void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) 580void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
569{ 581{
570 struct task_cputime cputime = { 582 struct task_cputime cputime = {
571 .utime = p->utime,
572 .stime = p->stime,
573 .sum_exec_runtime = p->se.sum_exec_runtime, 583 .sum_exec_runtime = p->se.sum_exec_runtime,
574 }; 584 };
575 585
586 task_cputime(p, &cputime.utime, &cputime.stime);
576 cputime_adjust(&cputime, &p->prev_cputime, ut, st); 587 cputime_adjust(&cputime, &p->prev_cputime, ut, st);
577} 588}
578 589
@@ -586,4 +597,223 @@ void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime
586 thread_group_cputime(p, &cputime); 597 thread_group_cputime(p, &cputime);
587 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); 598 cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
588} 599}
589#endif 600#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
601
602#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
603static unsigned long long vtime_delta(struct task_struct *tsk)
604{
605 unsigned long long clock;
606
607 clock = sched_clock();
608 if (clock < tsk->vtime_snap)
609 return 0;
610
611 return clock - tsk->vtime_snap;
612}
613
614static cputime_t get_vtime_delta(struct task_struct *tsk)
615{
616 unsigned long long delta = vtime_delta(tsk);
617
618 WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING);
619 tsk->vtime_snap += delta;
620
621 /* CHECKME: always safe to convert nsecs to cputime? */
622 return nsecs_to_cputime(delta);
623}
624
625static void __vtime_account_system(struct task_struct *tsk)
626{
627 cputime_t delta_cpu = get_vtime_delta(tsk);
628
629 account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
630}
631
632void vtime_account_system(struct task_struct *tsk)
633{
634 if (!vtime_accounting_enabled())
635 return;
636
637 write_seqlock(&tsk->vtime_seqlock);
638 __vtime_account_system(tsk);
639 write_sequnlock(&tsk->vtime_seqlock);
640}
641
642void vtime_account_irq_exit(struct task_struct *tsk)
643{
644 if (!vtime_accounting_enabled())
645 return;
646
647 write_seqlock(&tsk->vtime_seqlock);
648 if (context_tracking_in_user())
649 tsk->vtime_snap_whence = VTIME_USER;
650 __vtime_account_system(tsk);
651 write_sequnlock(&tsk->vtime_seqlock);
652}
653
654void vtime_account_user(struct task_struct *tsk)
655{
656 cputime_t delta_cpu;
657
658 if (!vtime_accounting_enabled())
659 return;
660
661 delta_cpu = get_vtime_delta(tsk);
662
663 write_seqlock(&tsk->vtime_seqlock);
664 tsk->vtime_snap_whence = VTIME_SYS;
665 account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
666 write_sequnlock(&tsk->vtime_seqlock);
667}
668
669void vtime_user_enter(struct task_struct *tsk)
670{
671 if (!vtime_accounting_enabled())
672 return;
673
674 write_seqlock(&tsk->vtime_seqlock);
675 tsk->vtime_snap_whence = VTIME_USER;
676 __vtime_account_system(tsk);
677 write_sequnlock(&tsk->vtime_seqlock);
678}
679
680void vtime_guest_enter(struct task_struct *tsk)
681{
682 write_seqlock(&tsk->vtime_seqlock);
683 __vtime_account_system(tsk);
684 current->flags |= PF_VCPU;
685 write_sequnlock(&tsk->vtime_seqlock);
686}
687
688void vtime_guest_exit(struct task_struct *tsk)
689{
690 write_seqlock(&tsk->vtime_seqlock);
691 __vtime_account_system(tsk);
692 current->flags &= ~PF_VCPU;
693 write_sequnlock(&tsk->vtime_seqlock);
694}
695
696void vtime_account_idle(struct task_struct *tsk)
697{
698 cputime_t delta_cpu = get_vtime_delta(tsk);
699
700 account_idle_time(delta_cpu);
701}
702
703bool vtime_accounting_enabled(void)
704{
705 return context_tracking_active();
706}
707
708void arch_vtime_task_switch(struct task_struct *prev)
709{
710 write_seqlock(&prev->vtime_seqlock);
711 prev->vtime_snap_whence = VTIME_SLEEPING;
712 write_sequnlock(&prev->vtime_seqlock);
713
714 write_seqlock(&current->vtime_seqlock);
715 current->vtime_snap_whence = VTIME_SYS;
716 current->vtime_snap = sched_clock();
717 write_sequnlock(&current->vtime_seqlock);
718}
719
720void vtime_init_idle(struct task_struct *t)
721{
722 unsigned long flags;
723
724 write_seqlock_irqsave(&t->vtime_seqlock, flags);
725 t->vtime_snap_whence = VTIME_SYS;
726 t->vtime_snap = sched_clock();
727 write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
728}
729
730cputime_t task_gtime(struct task_struct *t)
731{
732 unsigned long flags;
733 unsigned int seq;
734 cputime_t gtime;
735
736 do {
737 seq = read_seqbegin_irqsave(&t->vtime_seqlock, flags);
738
739 gtime = t->gtime;
740 if (t->flags & PF_VCPU)
741 gtime += vtime_delta(t);
742
743 } while (read_seqretry_irqrestore(&t->vtime_seqlock, seq, flags));
744
745 return gtime;
746}
747
748/*
749 * Fetch cputime raw values from fields of task_struct and
750 * add up the pending nohz execution time since the last
751 * cputime snapshot.
752 */
753static void
754fetch_task_cputime(struct task_struct *t,
755 cputime_t *u_dst, cputime_t *s_dst,
756 cputime_t *u_src, cputime_t *s_src,
757 cputime_t *udelta, cputime_t *sdelta)
758{
759 unsigned long flags;
760 unsigned int seq;
761 unsigned long long delta;
762
763 do {
764 *udelta = 0;
765 *sdelta = 0;
766
767 seq = read_seqbegin_irqsave(&t->vtime_seqlock, flags);
768
769 if (u_dst)
770 *u_dst = *u_src;
771 if (s_dst)
772 *s_dst = *s_src;
773
774 /* Task is sleeping, nothing to add */
775 if (t->vtime_snap_whence == VTIME_SLEEPING ||
776 is_idle_task(t))
777 continue;
778
779 delta = vtime_delta(t);
780
781 /*
782 * Task runs either in user or kernel space, add pending nohz time to
783 * the right place.
784 */
785 if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
786 *udelta = delta;
787 } else {
788 if (t->vtime_snap_whence == VTIME_SYS)
789 *sdelta = delta;
790 }
791 } while (read_seqretry_irqrestore(&t->vtime_seqlock, seq, flags));
792}
793
794
795void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
796{
797 cputime_t udelta, sdelta;
798
799 fetch_task_cputime(t, utime, stime, &t->utime,
800 &t->stime, &udelta, &sdelta);
801 if (utime)
802 *utime += udelta;
803 if (stime)
804 *stime += sdelta;
805}
806
807void task_cputime_scaled(struct task_struct *t,
808 cputime_t *utimescaled, cputime_t *stimescaled)
809{
810 cputime_t udelta, sdelta;
811
812 fetch_task_cputime(t, utimescaled, stimescaled,
813 &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
814 if (utimescaled)
815 *utimescaled += cputime_to_scaled(udelta);
816 if (stimescaled)
817 *stimescaled += cputime_to_scaled(sdelta);
818}
819#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
diff --git a/kernel/signal.c b/kernel/signal.c
index 3d09cf6cde75..7f82adbad480 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1632,6 +1632,7 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
1632 unsigned long flags; 1632 unsigned long flags;
1633 struct sighand_struct *psig; 1633 struct sighand_struct *psig;
1634 bool autoreap = false; 1634 bool autoreap = false;
1635 cputime_t utime, stime;
1635 1636
1636 BUG_ON(sig == -1); 1637 BUG_ON(sig == -1);
1637 1638
@@ -1669,8 +1670,9 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
1669 task_uid(tsk)); 1670 task_uid(tsk));
1670 rcu_read_unlock(); 1671 rcu_read_unlock();
1671 1672
1672 info.si_utime = cputime_to_clock_t(tsk->utime + tsk->signal->utime); 1673 task_cputime(tsk, &utime, &stime);
1673 info.si_stime = cputime_to_clock_t(tsk->stime + tsk->signal->stime); 1674 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1675 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1674 1676
1675 info.si_status = tsk->exit_code & 0x7f; 1677 info.si_status = tsk->exit_code & 0x7f;
1676 if (tsk->exit_code & 0x80) 1678 if (tsk->exit_code & 0x80)
@@ -1734,6 +1736,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
1734 unsigned long flags; 1736 unsigned long flags;
1735 struct task_struct *parent; 1737 struct task_struct *parent;
1736 struct sighand_struct *sighand; 1738 struct sighand_struct *sighand;
1739 cputime_t utime, stime;
1737 1740
1738 if (for_ptracer) { 1741 if (for_ptracer) {
1739 parent = tsk->parent; 1742 parent = tsk->parent;
@@ -1752,8 +1755,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
1752 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); 1755 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1753 rcu_read_unlock(); 1756 rcu_read_unlock();
1754 1757
1755 info.si_utime = cputime_to_clock_t(tsk->utime); 1758 task_cputime(tsk, &utime, &stime);
1756 info.si_stime = cputime_to_clock_t(tsk->stime); 1759 info.si_utime = cputime_to_clock_t(utime);
1760 info.si_stime = cputime_to_clock_t(stime);
1757 1761
1758 info.si_code = why; 1762 info.si_code = why;
1759 switch (why) { 1763 switch (why) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ed567babe789..f5cc25f147a6 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void)
221 current->flags &= ~PF_MEMALLOC; 221 current->flags &= ~PF_MEMALLOC;
222 222
223 pending = local_softirq_pending(); 223 pending = local_softirq_pending();
224 vtime_account_irq_enter(current); 224 account_irq_enter_time(current);
225 225
226 __local_bh_disable((unsigned long)__builtin_return_address(0), 226 __local_bh_disable((unsigned long)__builtin_return_address(0),
227 SOFTIRQ_OFFSET); 227 SOFTIRQ_OFFSET);
@@ -272,7 +272,7 @@ restart:
272 272
273 lockdep_softirq_exit(); 273 lockdep_softirq_exit();
274 274
275 vtime_account_irq_exit(current); 275 account_irq_exit_time(current);
276 __local_bh_enable(SOFTIRQ_OFFSET); 276 __local_bh_enable(SOFTIRQ_OFFSET);
277 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 277 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
278} 278}
@@ -341,7 +341,7 @@ static inline void invoke_softirq(void)
341 */ 341 */
342void irq_exit(void) 342void irq_exit(void)
343{ 343{
344 vtime_account_irq_exit(current); 344 account_irq_exit_time(current);
345 trace_hardirq_exit(); 345 trace_hardirq_exit();
346 sub_preempt_count(IRQ_EXIT_OFFSET); 346 sub_preempt_count(IRQ_EXIT_OFFSET);
347 if (!in_interrupt() && local_softirq_pending()) 347 if (!in_interrupt() && local_softirq_pending())
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d58e552d9fd1..46dfb6d94b1c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -631,8 +631,11 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
631 631
632static void tick_nohz_account_idle_ticks(struct tick_sched *ts) 632static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
633{ 633{
634#ifndef CONFIG_VIRT_CPU_ACCOUNTING 634#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
635 unsigned long ticks; 635 unsigned long ticks;
636
637 if (vtime_accounting_enabled())
638 return;
636 /* 639 /*
637 * We stopped the tick in idle. Update process times would miss the 640 * We stopped the tick in idle. Update process times would miss the
638 * time we slept as update_process_times does only a 1 tick 641 * time we slept as update_process_times does only a 1 tick
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 625df0b44690..a1dd9a1b1327 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -32,6 +32,7 @@ void bacct_add_tsk(struct user_namespace *user_ns,
32{ 32{
33 const struct cred *tcred; 33 const struct cred *tcred;
34 struct timespec uptime, ts; 34 struct timespec uptime, ts;
35 cputime_t utime, stime, utimescaled, stimescaled;
35 u64 ac_etime; 36 u64 ac_etime;
36 37
37 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); 38 BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
@@ -65,10 +66,15 @@ void bacct_add_tsk(struct user_namespace *user_ns,
65 stats->ac_ppid = pid_alive(tsk) ? 66 stats->ac_ppid = pid_alive(tsk) ?
66 task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0; 67 task_tgid_nr_ns(rcu_dereference(tsk->real_parent), pid_ns) : 0;
67 rcu_read_unlock(); 68 rcu_read_unlock();
68 stats->ac_utime = cputime_to_usecs(tsk->utime); 69
69 stats->ac_stime = cputime_to_usecs(tsk->stime); 70 task_cputime(tsk, &utime, &stime);
70 stats->ac_utimescaled = cputime_to_usecs(tsk->utimescaled); 71 stats->ac_utime = cputime_to_usecs(utime);
71 stats->ac_stimescaled = cputime_to_usecs(tsk->stimescaled); 72 stats->ac_stime = cputime_to_usecs(stime);
73
74 task_cputime_scaled(tsk, &utimescaled, &stimescaled);
75 stats->ac_utimescaled = cputime_to_usecs(utimescaled);
76 stats->ac_stimescaled = cputime_to_usecs(stimescaled);
77
72 stats->ac_minflt = tsk->min_flt; 78 stats->ac_minflt = tsk->min_flt;
73 stats->ac_majflt = tsk->maj_flt; 79 stats->ac_majflt = tsk->maj_flt;
74 80
@@ -115,11 +121,8 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
115#undef KB 121#undef KB
116#undef MB 122#undef MB
117 123
118/** 124static void __acct_update_integrals(struct task_struct *tsk,
119 * acct_update_integrals - update mm integral fields in task_struct 125 cputime_t utime, cputime_t stime)
120 * @tsk: task_struct for accounting
121 */
122void acct_update_integrals(struct task_struct *tsk)
123{ 126{
124 if (likely(tsk->mm)) { 127 if (likely(tsk->mm)) {
125 cputime_t time, dtime; 128 cputime_t time, dtime;
@@ -128,7 +131,7 @@ void acct_update_integrals(struct task_struct *tsk)
128 u64 delta; 131 u64 delta;
129 132
130 local_irq_save(flags); 133 local_irq_save(flags);
131 time = tsk->stime + tsk->utime; 134 time = stime + utime;
132 dtime = time - tsk->acct_timexpd; 135 dtime = time - tsk->acct_timexpd;
133 jiffies_to_timeval(cputime_to_jiffies(dtime), &value); 136 jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
134 delta = value.tv_sec; 137 delta = value.tv_sec;
@@ -145,6 +148,27 @@ void acct_update_integrals(struct task_struct *tsk)
145} 148}
146 149
147/** 150/**
151 * acct_update_integrals - update mm integral fields in task_struct
152 * @tsk: task_struct for accounting
153 */
154void acct_update_integrals(struct task_struct *tsk)
155{
156 cputime_t utime, stime;
157
158 task_cputime(tsk, &utime, &stime);
159 __acct_update_integrals(tsk, utime, stime);
160}
161
162/**
163 * acct_account_cputime - update mm integral after cputime update
164 * @tsk: task_struct for accounting
165 */
166void acct_account_cputime(struct task_struct *tsk)
167{
168 __acct_update_integrals(tsk, tsk->utime, tsk->stime);
169}
170
171/**
148 * acct_clear_integrals - clear the mm integral fields in task_struct 172 * acct_clear_integrals - clear the mm integral fields in task_struct
149 * @tsk: task_struct whose accounting fields are cleared 173 * @tsk: task_struct whose accounting fields are cleared
150 */ 174 */