aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2012-09-08 09:23:11 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2012-09-25 09:31:31 -0400
commitbf9fae9f5e4ca8dce4708812f9ad6281e61df109 (patch)
tree02318ac3db48dd993a4a430de5de66a337895d16 /kernel
parentbc2a27cd27271c5257989a57f511be86b26f5e54 (diff)
cputime: Use a proper subsystem naming for vtime related APIs
Use a naming based on vtime as a prefix for virtual based cputime accounting APIs: - account_system_vtime() -> vtime_account() - account_switch_vtime() -> vtime_task_switch() It makes it easier to allow for further declension such as vtime_account_system(), vtime_account_idle(), ... if we want to find out the context we account to from generic code. This also make it better to know on which subsystem these APIs refer to. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cputime.c8
-rw-r--r--kernel/softirq.c6
3 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ba144b121f3d..21e4dcff18f3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1764,7 +1764,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
1764 * Manfred Spraul <manfred@colorfullife.com> 1764 * Manfred Spraul <manfred@colorfullife.com>
1765 */ 1765 */
1766 prev_state = prev->state; 1766 prev_state = prev->state;
1767 account_switch_vtime(prev); 1767 vtime_task_switch(prev);
1768 finish_arch_switch(prev); 1768 finish_arch_switch(prev);
1769 perf_event_task_sched_in(prev, current); 1769 perf_event_task_sched_in(prev, current);
1770 finish_lock_switch(rq, prev); 1770 finish_lock_switch(rq, prev);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 372692bd5376..53f5b12f2821 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -10,11 +10,11 @@
10 10
11/* 11/*
12 * There are no locks covering percpu hardirq/softirq time. 12 * There are no locks covering percpu hardirq/softirq time.
13 * They are only modified in account_system_vtime, on corresponding CPU 13 * They are only modified in vtime_account, on corresponding CPU
14 * with interrupts disabled. So, writes are safe. 14 * with interrupts disabled. So, writes are safe.
15 * They are read and saved off onto struct rq in update_rq_clock(). 15 * They are read and saved off onto struct rq in update_rq_clock().
16 * This may result in other CPU reading this CPU's irq time and can 16 * This may result in other CPU reading this CPU's irq time and can
17 * race with irq/account_system_vtime on this CPU. We would either get old 17 * race with irq/vtime_account on this CPU. We would either get old
18 * or new value with a side effect of accounting a slice of irq time to wrong 18 * or new value with a side effect of accounting a slice of irq time to wrong
19 * task when irq is in progress while we read rq->clock. That is a worthy 19 * task when irq is in progress while we read rq->clock. That is a worthy
20 * compromise in place of having locks on each irq in account_system_time. 20 * compromise in place of having locks on each irq in account_system_time.
@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
43 * Called before incrementing preempt_count on {soft,}irq_enter 43 * Called before incrementing preempt_count on {soft,}irq_enter
44 * and before decrementing preempt_count on {soft,}irq_exit. 44 * and before decrementing preempt_count on {soft,}irq_exit.
45 */ 45 */
46void account_system_vtime(struct task_struct *curr) 46void vtime_account(struct task_struct *curr)
47{ 47{
48 unsigned long flags; 48 unsigned long flags;
49 s64 delta; 49 s64 delta;
@@ -73,7 +73,7 @@ void account_system_vtime(struct task_struct *curr)
73 irq_time_write_end(); 73 irq_time_write_end();
74 local_irq_restore(flags); 74 local_irq_restore(flags);
75} 75}
76EXPORT_SYMBOL_GPL(account_system_vtime); 76EXPORT_SYMBOL_GPL(vtime_account);
77 77
78static int irqtime_account_hi_update(void) 78static int irqtime_account_hi_update(void)
79{ 79{
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b73e681df09e..d55e3159f928 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -220,7 +220,7 @@ asmlinkage void __do_softirq(void)
220 current->flags &= ~PF_MEMALLOC; 220 current->flags &= ~PF_MEMALLOC;
221 221
222 pending = local_softirq_pending(); 222 pending = local_softirq_pending();
223 account_system_vtime(current); 223 vtime_account(current);
224 224
225 __local_bh_disable((unsigned long)__builtin_return_address(0), 225 __local_bh_disable((unsigned long)__builtin_return_address(0),
226 SOFTIRQ_OFFSET); 226 SOFTIRQ_OFFSET);
@@ -271,7 +271,7 @@ restart:
271 271
272 lockdep_softirq_exit(); 272 lockdep_softirq_exit();
273 273
274 account_system_vtime(current); 274 vtime_account(current);
275 __local_bh_enable(SOFTIRQ_OFFSET); 275 __local_bh_enable(SOFTIRQ_OFFSET);
276 tsk_restore_flags(current, old_flags, PF_MEMALLOC); 276 tsk_restore_flags(current, old_flags, PF_MEMALLOC);
277} 277}
@@ -340,7 +340,7 @@ static inline void invoke_softirq(void)
340 */ 340 */
341void irq_exit(void) 341void irq_exit(void)
342{ 342{
343 account_system_vtime(current); 343 vtime_account(current);
344 trace_hardirq_exit(); 344 trace_hardirq_exit();
345 sub_preempt_count(IRQ_EXIT_OFFSET); 345 sub_preempt_count(IRQ_EXIT_OFFSET);
346 if (!in_interrupt() && local_softirq_pending()) 346 if (!in_interrupt() && local_softirq_pending())