aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorGlauber Costa <glommer@redhat.com>2011-07-11 15:28:17 -0400
committerAvi Kivity <avi@redhat.com>2011-07-14 05:59:46 -0400
commite6e6685accfa81f509fadfc9624bc7c3862d75c4 (patch)
tree8d274b61c6011ad4d5844f2fdc15459b8216cc90 /kernel
parent747f2925836b678d2a0de980d70101fd35620f2a (diff)
KVM guest: Steal time accounting
This patch accounts steal time time in account_process_tick. If one or more tick is considered stolen in the current accounting cycle, user/system accounting is skipped. Idle is fine, since the hypervisor does not report steal time if the guest is halted. Accounting steal time from the core scheduler give us the advantage of direct acess to the runqueue data. In a later opportunity, it can be used to tweak cpu power and make the scheduler aware of the time it lost. [avi: <asm/paravirt.h> doesn't exist on many archs] Signed-off-by: Glauber Costa <glommer@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Peter Zijlstra <peterz@infradead.org> Tested-by: Eric B Munson <emunson@mgebm.net> CC: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> CC: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 3f2e502d609b..f98a28b19b2a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -75,6 +75,9 @@
75#include <asm/tlb.h> 75#include <asm/tlb.h>
76#include <asm/irq_regs.h> 76#include <asm/irq_regs.h>
77#include <asm/mutex.h> 77#include <asm/mutex.h>
78#ifdef CONFIG_PARAVIRT
79#include <asm/paravirt.h>
80#endif
78 81
79#include "sched_cpupri.h" 82#include "sched_cpupri.h"
80#include "workqueue_sched.h" 83#include "workqueue_sched.h"
@@ -528,6 +531,9 @@ struct rq {
528#ifdef CONFIG_IRQ_TIME_ACCOUNTING 531#ifdef CONFIG_IRQ_TIME_ACCOUNTING
529 u64 prev_irq_time; 532 u64 prev_irq_time;
530#endif 533#endif
534#ifdef CONFIG_PARAVIRT
535 u64 prev_steal_time;
536#endif
531 537
532 /* calc_load related fields */ 538 /* calc_load related fields */
533 unsigned long calc_load_update; 539 unsigned long calc_load_update;
@@ -1953,6 +1959,18 @@ void account_system_vtime(struct task_struct *curr)
1953} 1959}
1954EXPORT_SYMBOL_GPL(account_system_vtime); 1960EXPORT_SYMBOL_GPL(account_system_vtime);
1955 1961
1962#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
1963
1964#ifdef CONFIG_PARAVIRT
1965static inline u64 steal_ticks(u64 steal)
1966{
1967 if (unlikely(steal > NSEC_PER_SEC))
1968 return div_u64(steal, TICK_NSEC);
1969
1970 return __iter_div_u64_rem(steal, TICK_NSEC, &steal);
1971}
1972#endif
1973
1956static void update_rq_clock_task(struct rq *rq, s64 delta) 1974static void update_rq_clock_task(struct rq *rq, s64 delta)
1957{ 1975{
1958 s64 irq_delta; 1976 s64 irq_delta;
@@ -3845,6 +3863,25 @@ void account_idle_time(cputime_t cputime)
3845 cpustat->idle = cputime64_add(cpustat->idle, cputime64); 3863 cpustat->idle = cputime64_add(cpustat->idle, cputime64);
3846} 3864}
3847 3865
3866static __always_inline bool steal_account_process_tick(void)
3867{
3868#ifdef CONFIG_PARAVIRT
3869 if (static_branch(&paravirt_steal_enabled)) {
3870 u64 steal, st = 0;
3871
3872 steal = paravirt_steal_clock(smp_processor_id());
3873 steal -= this_rq()->prev_steal_time;
3874
3875 st = steal_ticks(steal);
3876 this_rq()->prev_steal_time += st * TICK_NSEC;
3877
3878 account_steal_time(st);
3879 return st;
3880 }
3881#endif
3882 return false;
3883}
3884
3848#ifndef CONFIG_VIRT_CPU_ACCOUNTING 3885#ifndef CONFIG_VIRT_CPU_ACCOUNTING
3849 3886
3850#ifdef CONFIG_IRQ_TIME_ACCOUNTING 3887#ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -3876,6 +3913,9 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
3876 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); 3913 cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
3877 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; 3914 struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
3878 3915
3916 if (steal_account_process_tick())
3917 return;
3918
3879 if (irqtime_account_hi_update()) { 3919 if (irqtime_account_hi_update()) {
3880 cpustat->irq = cputime64_add(cpustat->irq, tmp); 3920 cpustat->irq = cputime64_add(cpustat->irq, tmp);
3881 } else if (irqtime_account_si_update()) { 3921 } else if (irqtime_account_si_update()) {
@@ -3929,6 +3969,9 @@ void account_process_tick(struct task_struct *p, int user_tick)
3929 return; 3969 return;
3930 } 3970 }
3931 3971
3972 if (steal_account_process_tick())
3973 return;
3974
3932 if (user_tick) 3975 if (user_tick)
3933 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); 3976 account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
3934 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) 3977 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))