diff options
| author | Frederic Weisbecker <fweisbec@gmail.com> | 2012-09-08 09:23:11 -0400 |
|---|---|---|
| committer | Frederic Weisbecker <fweisbec@gmail.com> | 2012-09-25 09:31:31 -0400 |
| commit | bf9fae9f5e4ca8dce4708812f9ad6281e61df109 (patch) | |
| tree | 02318ac3db48dd993a4a430de5de66a337895d16 | |
| parent | bc2a27cd27271c5257989a57f511be86b26f5e54 (diff) | |
cputime: Use a proper subsystem naming for vtime related APIs
Use a naming based on vtime as a prefix for virtual based
cputime accounting APIs:
- account_system_vtime() -> vtime_account()
- account_switch_vtime() -> vtime_task_switch()
It makes it easier to allow for further declension such
as vtime_account_system(), vtime_account_idle(), ... if we
want to find out the context we account to from generic code.
This also make it better to know on which subsystem these APIs
refer to.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
| -rw-r--r-- | arch/ia64/kernel/time.c | 6 | ||||
| -rw-r--r-- | arch/powerpc/kernel/time.c | 10 | ||||
| -rw-r--r-- | arch/s390/kernel/vtime.c | 6 | ||||
| -rw-r--r-- | include/linux/hardirq.h | 8 | ||||
| -rw-r--r-- | include/linux/kernel_stat.h | 4 | ||||
| -rw-r--r-- | include/linux/kvm_host.h | 4 | ||||
| -rw-r--r-- | kernel/sched/core.c | 2 | ||||
| -rw-r--r-- | kernel/sched/cputime.c | 8 | ||||
| -rw-r--r-- | kernel/softirq.c | 6 |
9 files changed, 27 insertions, 27 deletions
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 6247197b9877..16bb6eda879d 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
| @@ -88,7 +88,7 @@ extern cputime_t cycle_to_cputime(u64 cyc); | |||
| 88 | * accumulated times to the current process, and to prepare accounting on | 88 | * accumulated times to the current process, and to prepare accounting on |
| 89 | * the next process. | 89 | * the next process. |
| 90 | */ | 90 | */ |
| 91 | void account_switch_vtime(struct task_struct *prev) | 91 | void vtime_task_switch(struct task_struct *prev) |
| 92 | { | 92 | { |
| 93 | struct thread_info *pi = task_thread_info(prev); | 93 | struct thread_info *pi = task_thread_info(prev); |
| 94 | struct thread_info *ni = task_thread_info(current); | 94 | struct thread_info *ni = task_thread_info(current); |
| @@ -116,7 +116,7 @@ void account_switch_vtime(struct task_struct *prev) | |||
| 116 | * Account time for a transition between system, hard irq or soft irq state. | 116 | * Account time for a transition between system, hard irq or soft irq state. |
| 117 | * Note that this function is called with interrupts enabled. | 117 | * Note that this function is called with interrupts enabled. |
| 118 | */ | 118 | */ |
| 119 | void account_system_vtime(struct task_struct *tsk) | 119 | void vtime_account(struct task_struct *tsk) |
| 120 | { | 120 | { |
| 121 | struct thread_info *ti = task_thread_info(tsk); | 121 | struct thread_info *ti = task_thread_info(tsk); |
| 122 | unsigned long flags; | 122 | unsigned long flags; |
| @@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk) | |||
| 138 | 138 | ||
| 139 | local_irq_restore(flags); | 139 | local_irq_restore(flags); |
| 140 | } | 140 | } |
| 141 | EXPORT_SYMBOL_GPL(account_system_vtime); | 141 | EXPORT_SYMBOL_GPL(vtime_account); |
| 142 | 142 | ||
| 143 | /* | 143 | /* |
| 144 | * Called from the timer interrupt handler to charge accumulated user time | 144 | * Called from the timer interrupt handler to charge accumulated user time |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 49da7f06e643..39899d7ebda0 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
| @@ -291,7 +291,7 @@ static inline u64 calculate_stolen_time(u64 stop_tb) | |||
| 291 | * Account time for a transition between system, hard irq | 291 | * Account time for a transition between system, hard irq |
| 292 | * or soft irq state. | 292 | * or soft irq state. |
| 293 | */ | 293 | */ |
| 294 | void account_system_vtime(struct task_struct *tsk) | 294 | void vtime_account(struct task_struct *tsk) |
| 295 | { | 295 | { |
| 296 | u64 now, nowscaled, delta, deltascaled; | 296 | u64 now, nowscaled, delta, deltascaled; |
| 297 | unsigned long flags; | 297 | unsigned long flags; |
| @@ -343,14 +343,14 @@ void account_system_vtime(struct task_struct *tsk) | |||
| 343 | } | 343 | } |
| 344 | local_irq_restore(flags); | 344 | local_irq_restore(flags); |
| 345 | } | 345 | } |
| 346 | EXPORT_SYMBOL_GPL(account_system_vtime); | 346 | EXPORT_SYMBOL_GPL(vtime_account); |
| 347 | 347 | ||
| 348 | /* | 348 | /* |
| 349 | * Transfer the user and system times accumulated in the paca | 349 | * Transfer the user and system times accumulated in the paca |
| 350 | * by the exception entry and exit code to the generic process | 350 | * by the exception entry and exit code to the generic process |
| 351 | * user and system time records. | 351 | * user and system time records. |
| 352 | * Must be called with interrupts disabled. | 352 | * Must be called with interrupts disabled. |
| 353 | * Assumes that account_system_vtime() has been called recently | 353 | * Assumes that vtime_account() has been called recently |
| 354 | * (i.e. since the last entry from usermode) so that | 354 | * (i.e. since the last entry from usermode) so that |
| 355 | * get_paca()->user_time_scaled is up to date. | 355 | * get_paca()->user_time_scaled is up to date. |
| 356 | */ | 356 | */ |
| @@ -366,9 +366,9 @@ void account_process_tick(struct task_struct *tsk, int user_tick) | |||
| 366 | account_user_time(tsk, utime, utimescaled); | 366 | account_user_time(tsk, utime, utimescaled); |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | void account_switch_vtime(struct task_struct *prev) | 369 | void vtime_task_switch(struct task_struct *prev) |
| 370 | { | 370 | { |
| 371 | account_system_vtime(prev); | 371 | vtime_account(prev); |
| 372 | account_process_tick(prev, 0); | 372 | account_process_tick(prev, 0); |
| 373 | } | 373 | } |
| 374 | 374 | ||
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 449ac22cc71b..cb5093c26d16 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
| @@ -99,7 +99,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) | |||
| 99 | return virt_timer_forward(user + system); | 99 | return virt_timer_forward(user + system); |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | void account_switch_vtime(struct task_struct *prev) | 102 | void vtime_task_switch(struct task_struct *prev) |
| 103 | { | 103 | { |
| 104 | struct thread_info *ti; | 104 | struct thread_info *ti; |
| 105 | 105 | ||
| @@ -122,7 +122,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick) | |||
| 122 | * Update process times based on virtual cpu times stored by entry.S | 122 | * Update process times based on virtual cpu times stored by entry.S |
| 123 | * to the lowcore fields user_timer, system_timer & steal_clock. | 123 | * to the lowcore fields user_timer, system_timer & steal_clock. |
| 124 | */ | 124 | */ |
| 125 | void account_system_vtime(struct task_struct *tsk) | 125 | void vtime_account(struct task_struct *tsk) |
| 126 | { | 126 | { |
| 127 | struct thread_info *ti = task_thread_info(tsk); | 127 | struct thread_info *ti = task_thread_info(tsk); |
| 128 | u64 timer, system; | 128 | u64 timer, system; |
| @@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk) | |||
| 138 | 138 | ||
| 139 | virt_timer_forward(system); | 139 | virt_timer_forward(system); |
| 140 | } | 140 | } |
| 141 | EXPORT_SYMBOL_GPL(account_system_vtime); | 141 | EXPORT_SYMBOL_GPL(vtime_account); |
| 142 | 142 | ||
| 143 | void __kprobes vtime_stop_cpu(void) | 143 | void __kprobes vtime_stop_cpu(void) |
| 144 | { | 144 | { |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 305f23cd7cff..cab3da3d0949 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -132,11 +132,11 @@ extern void synchronize_irq(unsigned int irq); | |||
| 132 | struct task_struct; | 132 | struct task_struct; |
| 133 | 133 | ||
| 134 | #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) | 134 | #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) |
| 135 | static inline void account_system_vtime(struct task_struct *tsk) | 135 | static inline void vtime_account(struct task_struct *tsk) |
| 136 | { | 136 | { |
| 137 | } | 137 | } |
| 138 | #else | 138 | #else |
| 139 | extern void account_system_vtime(struct task_struct *tsk); | 139 | extern void vtime_account(struct task_struct *tsk); |
| 140 | #endif | 140 | #endif |
| 141 | 141 | ||
| 142 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) | 142 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
| @@ -162,7 +162,7 @@ extern void rcu_nmi_exit(void); | |||
| 162 | */ | 162 | */ |
| 163 | #define __irq_enter() \ | 163 | #define __irq_enter() \ |
| 164 | do { \ | 164 | do { \ |
| 165 | account_system_vtime(current); \ | 165 | vtime_account(current); \ |
| 166 | add_preempt_count(HARDIRQ_OFFSET); \ | 166 | add_preempt_count(HARDIRQ_OFFSET); \ |
| 167 | trace_hardirq_enter(); \ | 167 | trace_hardirq_enter(); \ |
| 168 | } while (0) | 168 | } while (0) |
| @@ -178,7 +178,7 @@ extern void irq_enter(void); | |||
| 178 | #define __irq_exit() \ | 178 | #define __irq_exit() \ |
| 179 | do { \ | 179 | do { \ |
| 180 | trace_hardirq_exit(); \ | 180 | trace_hardirq_exit(); \ |
| 181 | account_system_vtime(current); \ | 181 | vtime_account(current); \ |
| 182 | sub_preempt_count(HARDIRQ_OFFSET); \ | 182 | sub_preempt_count(HARDIRQ_OFFSET); \ |
| 183 | } while (0) | 183 | } while (0) |
| 184 | 184 | ||
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index bbe5d15d6597..ca0944b92f4a 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h | |||
| @@ -131,9 +131,9 @@ extern void account_steal_ticks(unsigned long ticks); | |||
| 131 | extern void account_idle_ticks(unsigned long ticks); | 131 | extern void account_idle_ticks(unsigned long ticks); |
| 132 | 132 | ||
| 133 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 133 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
| 134 | extern void account_switch_vtime(struct task_struct *prev); | 134 | extern void vtime_task_switch(struct task_struct *prev); |
| 135 | #else | 135 | #else |
| 136 | static inline void account_switch_vtime(struct task_struct *prev) { } | 136 | static inline void vtime_task_switch(struct task_struct *prev) { } |
| 137 | #endif | 137 | #endif |
| 138 | 138 | ||
| 139 | #endif /* _LINUX_KERNEL_STAT_H */ | 139 | #endif /* _LINUX_KERNEL_STAT_H */ |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b70b48b01098..8a59e0abe5fa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -685,7 +685,7 @@ static inline int kvm_deassign_device(struct kvm *kvm, | |||
| 685 | static inline void kvm_guest_enter(void) | 685 | static inline void kvm_guest_enter(void) |
| 686 | { | 686 | { |
| 687 | BUG_ON(preemptible()); | 687 | BUG_ON(preemptible()); |
| 688 | account_system_vtime(current); | 688 | vtime_account(current); |
| 689 | current->flags |= PF_VCPU; | 689 | current->flags |= PF_VCPU; |
| 690 | /* KVM does not hold any references to rcu protected data when it | 690 | /* KVM does not hold any references to rcu protected data when it |
| 691 | * switches CPU into a guest mode. In fact switching to a guest mode | 691 | * switches CPU into a guest mode. In fact switching to a guest mode |
| @@ -699,7 +699,7 @@ static inline void kvm_guest_enter(void) | |||
| 699 | 699 | ||
| 700 | static inline void kvm_guest_exit(void) | 700 | static inline void kvm_guest_exit(void) |
| 701 | { | 701 | { |
| 702 | account_system_vtime(current); | 702 | vtime_account(current); |
| 703 | current->flags &= ~PF_VCPU; | 703 | current->flags &= ~PF_VCPU; |
| 704 | } | 704 | } |
| 705 | 705 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ba144b121f3d..21e4dcff18f3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1764,7 +1764,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) | |||
| 1764 | * Manfred Spraul <manfred@colorfullife.com> | 1764 | * Manfred Spraul <manfred@colorfullife.com> |
| 1765 | */ | 1765 | */ |
| 1766 | prev_state = prev->state; | 1766 | prev_state = prev->state; |
| 1767 | account_switch_vtime(prev); | 1767 | vtime_task_switch(prev); |
| 1768 | finish_arch_switch(prev); | 1768 | finish_arch_switch(prev); |
| 1769 | perf_event_task_sched_in(prev, current); | 1769 | perf_event_task_sched_in(prev, current); |
| 1770 | finish_lock_switch(rq, prev); | 1770 | finish_lock_switch(rq, prev); |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 372692bd5376..53f5b12f2821 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -10,11 +10,11 @@ | |||
| 10 | 10 | ||
| 11 | /* | 11 | /* |
| 12 | * There are no locks covering percpu hardirq/softirq time. | 12 | * There are no locks covering percpu hardirq/softirq time. |
| 13 | * They are only modified in account_system_vtime, on corresponding CPU | 13 | * They are only modified in vtime_account, on corresponding CPU |
| 14 | * with interrupts disabled. So, writes are safe. | 14 | * with interrupts disabled. So, writes are safe. |
| 15 | * They are read and saved off onto struct rq in update_rq_clock(). | 15 | * They are read and saved off onto struct rq in update_rq_clock(). |
| 16 | * This may result in other CPU reading this CPU's irq time and can | 16 | * This may result in other CPU reading this CPU's irq time and can |
| 17 | * race with irq/account_system_vtime on this CPU. We would either get old | 17 | * race with irq/vtime_account on this CPU. We would either get old |
| 18 | * or new value with a side effect of accounting a slice of irq time to wrong | 18 | * or new value with a side effect of accounting a slice of irq time to wrong |
| 19 | * task when irq is in progress while we read rq->clock. That is a worthy | 19 | * task when irq is in progress while we read rq->clock. That is a worthy |
| 20 | * compromise in place of having locks on each irq in account_system_time. | 20 | * compromise in place of having locks on each irq in account_system_time. |
| @@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq); | |||
| 43 | * Called before incrementing preempt_count on {soft,}irq_enter | 43 | * Called before incrementing preempt_count on {soft,}irq_enter |
| 44 | * and before decrementing preempt_count on {soft,}irq_exit. | 44 | * and before decrementing preempt_count on {soft,}irq_exit. |
| 45 | */ | 45 | */ |
| 46 | void account_system_vtime(struct task_struct *curr) | 46 | void vtime_account(struct task_struct *curr) |
| 47 | { | 47 | { |
| 48 | unsigned long flags; | 48 | unsigned long flags; |
| 49 | s64 delta; | 49 | s64 delta; |
| @@ -73,7 +73,7 @@ void account_system_vtime(struct task_struct *curr) | |||
| 73 | irq_time_write_end(); | 73 | irq_time_write_end(); |
| 74 | local_irq_restore(flags); | 74 | local_irq_restore(flags); |
| 75 | } | 75 | } |
| 76 | EXPORT_SYMBOL_GPL(account_system_vtime); | 76 | EXPORT_SYMBOL_GPL(vtime_account); |
| 77 | 77 | ||
| 78 | static int irqtime_account_hi_update(void) | 78 | static int irqtime_account_hi_update(void) |
| 79 | { | 79 | { |
diff --git a/kernel/softirq.c b/kernel/softirq.c index b73e681df09e..d55e3159f928 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -220,7 +220,7 @@ asmlinkage void __do_softirq(void) | |||
| 220 | current->flags &= ~PF_MEMALLOC; | 220 | current->flags &= ~PF_MEMALLOC; |
| 221 | 221 | ||
| 222 | pending = local_softirq_pending(); | 222 | pending = local_softirq_pending(); |
| 223 | account_system_vtime(current); | 223 | vtime_account(current); |
| 224 | 224 | ||
| 225 | __local_bh_disable((unsigned long)__builtin_return_address(0), | 225 | __local_bh_disable((unsigned long)__builtin_return_address(0), |
| 226 | SOFTIRQ_OFFSET); | 226 | SOFTIRQ_OFFSET); |
| @@ -271,7 +271,7 @@ restart: | |||
| 271 | 271 | ||
| 272 | lockdep_softirq_exit(); | 272 | lockdep_softirq_exit(); |
| 273 | 273 | ||
| 274 | account_system_vtime(current); | 274 | vtime_account(current); |
| 275 | __local_bh_enable(SOFTIRQ_OFFSET); | 275 | __local_bh_enable(SOFTIRQ_OFFSET); |
| 276 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); | 276 | tsk_restore_flags(current, old_flags, PF_MEMALLOC); |
| 277 | } | 277 | } |
| @@ -340,7 +340,7 @@ static inline void invoke_softirq(void) | |||
| 340 | */ | 340 | */ |
| 341 | void irq_exit(void) | 341 | void irq_exit(void) |
| 342 | { | 342 | { |
| 343 | account_system_vtime(current); | 343 | vtime_account(current); |
| 344 | trace_hardirq_exit(); | 344 | trace_hardirq_exit(); |
| 345 | sub_preempt_count(IRQ_EXIT_OFFSET); | 345 | sub_preempt_count(IRQ_EXIT_OFFSET); |
| 346 | if (!in_interrupt() && local_softirq_pending()) | 346 | if (!in_interrupt() && local_softirq_pending()) |
