aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorHidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>2008-01-29 00:27:30 -0500
committerTony Luck <tony.luck@intel.com>2008-02-20 15:55:37 -0500
commitb64f34cdfe5bef9dfed1304c513220b0f2862eca (patch)
tree04cb9216a9de18afcb27f9bac3fda1f3c7bacbbd /arch/ia64/kernel
parent5d9c4a7de64d398604a978d267a6987f1f4025b7 (diff)
[IA64] VIRT_CPU_ACCOUNTING (accurate cpu time accounting)
This patch implements VIRT_CPU_ACCOUNTING for ia64, which enable us to use more accurate cpu time accounting. The VIRT_CPU_ACCOUNTING is an item of kernel config, which s390 and powerpc arch have. By turning this config on, these archs change the mechanism of cpu time accounting from tick-sampling based one to state-transition based one. The state-transition based accounting is done by checking time (cycle counter in processor) at every state-transition point, such as entrance/exit of kernel, interrupt, softirq etc. The difference between point to point is the actual time consumed during in the state. There is no doubt about that this value is more accurate than that of tick-sampling based accounting. Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/asm-offsets.c6
-rw-r--r--arch/ia64/kernel/entry.S65
-rw-r--r--arch/ia64/kernel/fsys.S26
-rw-r--r--arch/ia64/kernel/head.S20
-rw-r--r--arch/ia64/kernel/ivt.S69
-rw-r--r--arch/ia64/kernel/minstate.h14
-rw-r--r--arch/ia64/kernel/time.c78
7 files changed, 278 insertions, 0 deletions
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 0aebc6f79e95..5865130b0a92 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -39,6 +39,12 @@ void foo(void)
39 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 39 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
40 DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); 40 DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
41 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); 41 DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
42#ifdef CONFIG_VIRT_CPU_ACCOUNTING
43 DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
44 DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
45 DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
46 DEFINE(TI_AC_UTIME, offsetof(struct thread_info, ac_utime));
47#endif
42 48
43 BLANK(); 49 BLANK();
44 50
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 3c331c464b40..b0be4a280174 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -710,6 +710,16 @@ ENTRY(ia64_leave_syscall)
710(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk 710(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
711#endif 711#endif
712.work_processed_syscall: 712.work_processed_syscall:
713#ifdef CONFIG_VIRT_CPU_ACCOUNTING
714 adds r2=PT(LOADRS)+16,r12
715(pUStk) mov.m r22=ar.itc // fetch time at leave
716 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
717 ;;
718(p6) ld4 r31=[r18] // load current_thread_info()->flags
719 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
720 adds r3=PT(AR_BSPSTORE)+16,r12 // deferred
721 ;;
722#else
713 adds r2=PT(LOADRS)+16,r12 723 adds r2=PT(LOADRS)+16,r12
714 adds r3=PT(AR_BSPSTORE)+16,r12 724 adds r3=PT(AR_BSPSTORE)+16,r12
715 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 725 adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -718,6 +728,7 @@ ENTRY(ia64_leave_syscall)
718 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs" 728 ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
719 nop.i 0 729 nop.i 0
720 ;; 730 ;;
731#endif
721 mov r16=ar.bsp // M2 get existing backing store pointer 732 mov r16=ar.bsp // M2 get existing backing store pointer
722 ld8 r18=[r2],PT(R9)-PT(B6) // load b6 733 ld8 r18=[r2],PT(R9)-PT(B6) // load b6
723(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE? 734(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
@@ -737,12 +748,21 @@ ENTRY(ia64_leave_syscall)
737 748
738 ld8 r29=[r2],16 // M0|1 load cr.ipsr 749 ld8 r29=[r2],16 // M0|1 load cr.ipsr
739 ld8 r28=[r3],16 // M0|1 load cr.iip 750 ld8 r28=[r3],16 // M0|1 load cr.iip
751#ifdef CONFIG_VIRT_CPU_ACCOUNTING
752(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
753 ;;
754 ld8 r30=[r2],16 // M0|1 load cr.ifs
755 ld8 r25=[r3],16 // M0|1 load ar.unat
756(pUStk) add r15=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
757 ;;
758#else
740 mov r22=r0 // A clear r22 759 mov r22=r0 // A clear r22
741 ;; 760 ;;
742 ld8 r30=[r2],16 // M0|1 load cr.ifs 761 ld8 r30=[r2],16 // M0|1 load cr.ifs
743 ld8 r25=[r3],16 // M0|1 load ar.unat 762 ld8 r25=[r3],16 // M0|1 load ar.unat
744(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 763(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
745 ;; 764 ;;
765#endif
746 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs 766 ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
747(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 767(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
748 nop 0 768 nop 0
@@ -759,7 +779,11 @@ ENTRY(ia64_leave_syscall)
759 ld8.fill r1=[r3],16 // M0|1 load r1 779 ld8.fill r1=[r3],16 // M0|1 load r1
760(pUStk) mov r17=1 // A 780(pUStk) mov r17=1 // A
761 ;; 781 ;;
782#ifdef CONFIG_VIRT_CPU_ACCOUNTING
783(pUStk) st1 [r15]=r17 // M2|3
784#else
762(pUStk) st1 [r14]=r17 // M2|3 785(pUStk) st1 [r14]=r17 // M2|3
786#endif
763 ld8.fill r13=[r3],16 // M0|1 787 ld8.fill r13=[r3],16 // M0|1
764 mov f8=f0 // F clear f8 788 mov f8=f0 // F clear f8
765 ;; 789 ;;
@@ -775,12 +799,22 @@ ENTRY(ia64_leave_syscall)
775 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition 799 shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
776 cover // B add current frame into dirty partition & set cr.ifs 800 cover // B add current frame into dirty partition & set cr.ifs
777 ;; 801 ;;
802#ifdef CONFIG_VIRT_CPU_ACCOUNTING
803 mov r19=ar.bsp // M2 get new backing store pointer
804 st8 [r14]=r22 // M save time at leave
805 mov f10=f0 // F clear f10
806
807 mov r22=r0 // A clear r22
808 movl r14=__kernel_syscall_via_epc // X
809 ;;
810#else
778 mov r19=ar.bsp // M2 get new backing store pointer 811 mov r19=ar.bsp // M2 get new backing store pointer
779 mov f10=f0 // F clear f10 812 mov f10=f0 // F clear f10
780 813
781 nop.m 0 814 nop.m 0
782 movl r14=__kernel_syscall_via_epc // X 815 movl r14=__kernel_syscall_via_epc // X
783 ;; 816 ;;
817#endif
784 mov.m ar.csd=r0 // M2 clear ar.csd 818 mov.m ar.csd=r0 // M2 clear ar.csd
785 mov.m ar.ccv=r0 // M2 clear ar.ccv 819 mov.m ar.ccv=r0 // M2 clear ar.ccv
786 mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc) 820 mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
@@ -913,10 +947,18 @@ GLOBAL_ENTRY(ia64_leave_kernel)
913 adds r16=PT(CR_IPSR)+16,r12 947 adds r16=PT(CR_IPSR)+16,r12
914 adds r17=PT(CR_IIP)+16,r12 948 adds r17=PT(CR_IIP)+16,r12
915 949
950#ifdef CONFIG_VIRT_CPU_ACCOUNTING
951 .pred.rel.mutex pUStk,pKStk
952(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
953(pUStk) mov.m r22=ar.itc // M fetch time at leave
954 nop.i 0
955 ;;
956#else
916(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled 957(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
917 nop.i 0 958 nop.i 0
918 nop.i 0 959 nop.i 0
919 ;; 960 ;;
961#endif
920 ld8 r29=[r16],16 // load cr.ipsr 962 ld8 r29=[r16],16 // load cr.ipsr
921 ld8 r28=[r17],16 // load cr.iip 963 ld8 r28=[r17],16 // load cr.iip
922 ;; 964 ;;
@@ -938,15 +980,37 @@ GLOBAL_ENTRY(ia64_leave_kernel)
938 ;; 980 ;;
939 ld8.fill r12=[r16],16 981 ld8.fill r12=[r16],16
940 ld8.fill r13=[r17],16 982 ld8.fill r13=[r17],16
983#ifdef CONFIG_VIRT_CPU_ACCOUNTING
984(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
985#else
941(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 986(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
987#endif
942 ;; 988 ;;
943 ld8 r20=[r16],16 // ar.fpsr 989 ld8 r20=[r16],16 // ar.fpsr
944 ld8.fill r15=[r17],16 990 ld8.fill r15=[r17],16
991#ifdef CONFIG_VIRT_CPU_ACCOUNTING
992(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
993#endif
945 ;; 994 ;;
946 ld8.fill r14=[r16],16 995 ld8.fill r14=[r16],16
947 ld8.fill r2=[r17] 996 ld8.fill r2=[r17]
948(pUStk) mov r17=1 997(pUStk) mov r17=1
949 ;; 998 ;;
999#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1000 // mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
1001 // mib : mov add br -> mib : ld8 add br
1002 // bbb_ : br nop cover;; mbb_ : mov br cover;;
1003 //
1004 // no one require bsp in r16 if (pKStk) branch is selected.
1005(pUStk) st8 [r3]=r22 // save time at leave
1006(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
1007 shr.u r18=r19,16 // get byte size of existing "dirty" partition
1008 ;;
1009 ld8.fill r3=[r16] // deferred
1010 LOAD_PHYS_STACK_REG_SIZE(r17)
1011(pKStk) br.cond.dpnt skip_rbs_switch
1012 mov r16=ar.bsp // get existing backing store pointer
1013#else
950 ld8.fill r3=[r16] 1014 ld8.fill r3=[r16]
951(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack 1015(pUStk) st1 [r18]=r17 // restore current->thread.on_ustack
952 shr.u r18=r19,16 // get byte size of existing "dirty" partition 1016 shr.u r18=r19,16 // get byte size of existing "dirty" partition
@@ -954,6 +1018,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
954 mov r16=ar.bsp // get existing backing store pointer 1018 mov r16=ar.bsp // get existing backing store pointer
955 LOAD_PHYS_STACK_REG_SIZE(r17) 1019 LOAD_PHYS_STACK_REG_SIZE(r17)
956(pKStk) br.cond.dpnt skip_rbs_switch 1020(pKStk) br.cond.dpnt skip_rbs_switch
1021#endif
957 1022
958 /* 1023 /*
959 * Restore user backing store. 1024 * Restore user backing store.
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index 44841971f077..c932d86e2d81 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -660,7 +660,11 @@ GLOBAL_ENTRY(fsys_bubble_down)
660 nop.i 0 660 nop.i 0
661 ;; 661 ;;
662 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0 662 mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
663#ifdef CONFIG_VIRT_CPU_ACCOUNTING
664 mov.m r30=ar.itc // M get cycle for accounting
665#else
663 nop.m 0 666 nop.m 0
667#endif
664 nop.i 0 668 nop.i 0
665 ;; 669 ;;
666 mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore 670 mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore
@@ -682,6 +686,28 @@ GLOBAL_ENTRY(fsys_bubble_down)
682 cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1 686 cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
683 br.call.sptk.many b7=ia64_syscall_setup // B 687 br.call.sptk.many b7=ia64_syscall_setup // B
684 ;; 688 ;;
689#ifdef CONFIG_VIRT_CPU_ACCOUNTING
690 // mov.m r30=ar.itc is called in advance
691 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
692 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
693 ;;
694 ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
695 ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at leave kernel
696 ;;
697 ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
698 ld8 r21=[r17] // cumulated utime
699 sub r22=r19,r18 // stime before leave kernel
700 ;;
701 st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // update stamp
702 sub r18=r30,r19 // elapsed time in user mode
703 ;;
704 add r20=r20,r22 // sum stime
705 add r21=r21,r18 // sum utime
706 ;;
707 st8 [r16]=r20 // update stime
708 st8 [r17]=r21 // update utime
709 ;;
710#endif
685 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 711 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
686 mov rp=r14 // I0 set the real return addr 712 mov rp=r14 // I0 set the real return addr
687 and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A 713 and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index d3a41d5f8d12..ddeab4e36fd5 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1002,6 +1002,26 @@ GLOBAL_ENTRY(sched_clock)
1002 br.ret.sptk.many rp 1002 br.ret.sptk.many rp
1003END(sched_clock) 1003END(sched_clock)
1004 1004
1005#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1006GLOBAL_ENTRY(cycle_to_cputime)
1007 alloc r16=ar.pfs,1,0,0,0
1008 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1009 ;;
1010 ldf8 f8=[r8]
1011 ;;
1012 setf.sig f9=r32
1013 ;;
1014 xmpy.lu f10=f9,f8 // calculate low 64 bits of 128-bit product (4 cyc)
1015 xmpy.hu f11=f9,f8 // calculate high 64 bits of 128-bit product
1016 ;;
1017 getf.sig r8=f10 // (5 cyc)
1018 getf.sig r9=f11
1019 ;;
1020 shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
1021 br.ret.sptk.many rp
1022END(cycle_to_cputime)
1023#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
1024
1005GLOBAL_ENTRY(start_kernel_thread) 1025GLOBAL_ENTRY(start_kernel_thread)
1006 .prologue 1026 .prologue
1007 .save rp, r0 // this is the end of the call-chain 1027 .save rp, r0 // this is the end of the call-chain
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 34f44d8be00d..6678c49daba3 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -805,8 +805,13 @@ ENTRY(break_fault)
805 805
806(p8) adds r28=16,r28 // A switch cr.iip to next bundle 806(p8) adds r28=16,r28 // A switch cr.iip to next bundle
807(p9) adds r8=1,r8 // A increment ei to next slot 807(p9) adds r8=1,r8 // A increment ei to next slot
808#ifdef CONFIG_VIRT_CPU_ACCOUNTING
809 ;;
810 mov b6=r30 // I0 setup syscall handler branch reg early
811#else
808 nop.i 0 812 nop.i 0
809 ;; 813 ;;
814#endif
810 815
811 mov.m r25=ar.unat // M2 (5 cyc) 816 mov.m r25=ar.unat // M2 (5 cyc)
812 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr 817 dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
@@ -817,7 +822,11 @@ ENTRY(break_fault)
817 // 822 //
818/////////////////////////////////////////////////////////////////////// 823///////////////////////////////////////////////////////////////////////
819 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag 824 st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
825#ifdef CONFIG_VIRT_CPU_ACCOUNTING
826 mov.m r30=ar.itc // M get cycle for accounting
827#else
820 mov b6=r30 // I0 setup syscall handler branch reg early 828 mov b6=r30 // I0 setup syscall handler branch reg early
829#endif
821 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already? 830 cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
822 831
823 and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit 832 and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
@@ -829,6 +838,30 @@ ENTRY(break_fault)
829 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited? 838 cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
830 br.call.sptk.many b7=ia64_syscall_setup // B 839 br.call.sptk.many b7=ia64_syscall_setup // B
8311: 8401:
841#ifdef CONFIG_VIRT_CPU_ACCOUNTING
842 // mov.m r30=ar.itc is called in advance, and r13 is current
843 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
844 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
845(pKStk) br.cond.spnt .skip_accounting // B unlikely skip
846 ;;
847 ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // M get last stamp
848 ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // M time at leave
849 ;;
850 ld8 r20=[r16],TI_AC_STAMP-TI_AC_STIME // M cumulated stime
851 ld8 r21=[r17] // M cumulated utime
852 sub r22=r19,r18 // A stime before leave
853 ;;
854 st8 [r16]=r30,TI_AC_STIME-TI_AC_STAMP // M update stamp
855 sub r18=r30,r19 // A elapsed time in user
856 ;;
857 add r20=r20,r22 // A sum stime
858 add r21=r21,r18 // A sum utime
859 ;;
860 st8 [r16]=r20 // M update stime
861 st8 [r17]=r21 // M update utime
862 ;;
863.skip_accounting:
864#endif
832 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0 865 mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
833 nop 0 866 nop 0
834 bsw.1 // B (6 cyc) regs are saved, switch to bank 1 867 bsw.1 // B (6 cyc) regs are saved, switch to bank 1
@@ -928,6 +961,7 @@ END(interrupt)
928 * - r27: saved ar.rsc 961 * - r27: saved ar.rsc
929 * - r28: saved cr.iip 962 * - r28: saved cr.iip
930 * - r29: saved cr.ipsr 963 * - r29: saved cr.ipsr
964 * - r30: ar.itc for accounting (don't touch)
931 * - r31: saved pr 965 * - r31: saved pr
932 * - b0: original contents (to be saved) 966 * - b0: original contents (to be saved)
933 * On exit: 967 * On exit:
@@ -1090,6 +1124,41 @@ END(dispatch_illegal_op_fault)
1090 DBG_FAULT(16) 1124 DBG_FAULT(16)
1091 FAULT(16) 1125 FAULT(16)
1092 1126
1127#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1128 /*
1129 * There is no particular reason for this code to be here, other than
1130 * that there happens to be space here that would go unused otherwise.
1131 * If this fault ever gets "unreserved", simply moved the following
1132 * code to a more suitable spot...
1133 *
1134 * account_sys_enter is called from SAVE_MIN* macros if accounting is
1135 * enabled and if the macro is entered from user mode.
1136 */
1137ENTRY(account_sys_enter)
1138 // mov.m r20=ar.itc is called in advance, and r13 is current
1139 add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13
1140 add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13
1141 ;;
1142 ld8 r18=[r16],TI_AC_STIME-TI_AC_STAMP // time at last check in kernel
1143 ld8 r19=[r17],TI_AC_UTIME-TI_AC_LEAVE // time at left from kernel
1144 ;;
1145 ld8 r23=[r16],TI_AC_STAMP-TI_AC_STIME // cumulated stime
1146 ld8 r21=[r17] // cumulated utime
1147 sub r22=r19,r18 // stime before leave kernel
1148 ;;
1149 st8 [r16]=r20,TI_AC_STIME-TI_AC_STAMP // update stamp
1150 sub r18=r20,r19 // elapsed time in user mode
1151 ;;
1152 add r23=r23,r22 // sum stime
1153 add r21=r21,r18 // sum utime
1154 ;;
1155 st8 [r16]=r23 // update stime
1156 st8 [r17]=r21 // update utime
1157 ;;
1158 br.ret.sptk.many rp
1159END(account_sys_enter)
1160#endif
1161
1093 .org ia64_ivt+0x4400 1162 .org ia64_ivt+0x4400
1094///////////////////////////////////////////////////////////////////////////////////////// 1163/////////////////////////////////////////////////////////////////////////////////////////
1095// 0x4400 Entry 17 (size 64 bundles) Reserved 1164// 0x4400 Entry 17 (size 64 bundles) Reserved
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index c9ac8bada786..7c548ac52bbc 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -3,6 +3,18 @@
3 3
4#include "entry.h" 4#include "entry.h"
5 5
6#ifdef CONFIG_VIRT_CPU_ACCOUNTING
7/* read ar.itc in advance, and use it before leaving bank 0 */
8#define ACCOUNT_GET_STAMP \
9(pUStk) mov.m r20=ar.itc;
10#define ACCOUNT_SYS_ENTER \
11(pUStk) br.call.spnt rp=account_sys_enter \
12 ;;
13#else
14#define ACCOUNT_GET_STAMP
15#define ACCOUNT_SYS_ENTER
16#endif
17
6/* 18/*
7 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves 19 * DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
8 * the minimum state necessary that allows us to turn psr.ic back 20 * the minimum state necessary that allows us to turn psr.ic back
@@ -122,11 +134,13 @@
122 ;; \ 134 ;; \
123.mem.offset 0,0; st8.spill [r16]=r2,16; \ 135.mem.offset 0,0; st8.spill [r16]=r2,16; \
124.mem.offset 8,0; st8.spill [r17]=r3,16; \ 136.mem.offset 8,0; st8.spill [r17]=r3,16; \
137 ACCOUNT_GET_STAMP \
125 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \ 138 adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
126 ;; \ 139 ;; \
127 EXTRA; \ 140 EXTRA; \
128 movl r1=__gp; /* establish kernel global pointer */ \ 141 movl r1=__gp; /* establish kernel global pointer */ \
129 ;; \ 142 ;; \
143 ACCOUNT_SYS_ENTER \
130 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \ 144 bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
131 ;; 145 ;;
132 146
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 17fda5293c67..48e15a51782f 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -59,6 +59,84 @@ static struct clocksource clocksource_itc = {
59}; 59};
60static struct clocksource *itc_clocksource; 60static struct clocksource *itc_clocksource;
61 61
62#ifdef CONFIG_VIRT_CPU_ACCOUNTING
63
64#include <linux/kernel_stat.h>
65
66extern cputime_t cycle_to_cputime(u64 cyc);
67
68/*
69 * Called from the context switch with interrupts disabled, to charge all
70 * accumulated times to the current process, and to prepare accounting on
71 * the next process.
72 */
73void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next)
74{
75 struct thread_info *pi = task_thread_info(prev);
76 struct thread_info *ni = task_thread_info(next);
77 cputime_t delta_stime, delta_utime;
78 __u64 now;
79
80 now = ia64_get_itc();
81
82 delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp));
83 account_system_time(prev, 0, delta_stime);
84 account_system_time_scaled(prev, delta_stime);
85
86 if (pi->ac_utime) {
87 delta_utime = cycle_to_cputime(pi->ac_utime);
88 account_user_time(prev, delta_utime);
89 account_user_time_scaled(prev, delta_utime);
90 }
91
92 pi->ac_stamp = ni->ac_stamp = now;
93 ni->ac_stime = ni->ac_utime = 0;
94}
95
96/*
97 * Account time for a transition between system, hard irq or soft irq state.
98 * Note that this function is called with interrupts enabled.
99 */
100void account_system_vtime(struct task_struct *tsk)
101{
102 struct thread_info *ti = task_thread_info(tsk);
103 unsigned long flags;
104 cputime_t delta_stime;
105 __u64 now;
106
107 local_irq_save(flags);
108
109 now = ia64_get_itc();
110
111 delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp));
112 account_system_time(tsk, 0, delta_stime);
113 account_system_time_scaled(tsk, delta_stime);
114 ti->ac_stime = 0;
115
116 ti->ac_stamp = now;
117
118 local_irq_restore(flags);
119}
120
121/*
122 * Called from the timer interrupt handler to charge accumulated user time
123 * to the current process. Must be called with interrupts disabled.
124 */
125void account_process_tick(struct task_struct *p, int user_tick)
126{
127 struct thread_info *ti = task_thread_info(p);
128 cputime_t delta_utime;
129
130 if (ti->ac_utime) {
131 delta_utime = cycle_to_cputime(ti->ac_utime);
132 account_user_time(p, delta_utime);
133 account_user_time_scaled(p, delta_utime);
134 ti->ac_utime = 0;
135 }
136}
137
138#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
139
62static irqreturn_t 140static irqreturn_t
63timer_interrupt (int irq, void *dev_id) 141timer_interrupt (int irq, void *dev_id)
64{ 142{