diff options
author | Anton Blanchard <anton@samba.org> | 2010-02-02 17:46:13 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-08 02:57:37 -0500 |
commit | fa535a77bd3fa32b9215ba375d6a202fe73e1dd6 (patch) | |
tree | a82c7c10a3a6eaf19e873863c98a0d5c83fd965e /kernel/sched.c | |
parent | 0c9cf2efd74dbc90354e2ccc7dbd6bad68ec6c4d (diff) |
sched: cpuacct: Use bigger percpu counter batch values for stats counters
When CONFIG_VIRT_CPU_ACCOUNTING and CONFIG_CGROUP_CPUACCT are
enabled we can call cpuacct_update_stats with values much larger
than percpu_counter_batch. This means the call to
percpu_counter_add will always add to the global count which is
protected by a spinlock and we end up with a global spinlock in
the scheduler.
Based on an idea by KOSAKI Motohiro, this patch scales the batch
value by cputime_one_jiffy such that we have the same batch
limit as we would if CONFIG_VIRT_CPU_ACCOUNTING was disabled.
His patch did this once at boot but that initialisation happened
too early on PowerPC (before time_init) and it was never updated
at runtime as a result of a hotplug cpu add/remove.
This patch instead scales percpu_counter_batch by
cputime_one_jiffy at runtime, which keeps the batch correct even
after cpu hotplug operations. We cap it at INT_MAX in case of
overflow.
For architectures that do not support
CONFIG_VIRT_CPU_ACCOUNTING, cputime_one_jiffy is the constant 1
and gcc is smart enough to optimise min(s32
percpu_counter_batch, INT_MAX) to just percpu_counter_batch at
least on x86 and PowerPC. So there is no need to add an #ifdef.
On a 64 thread PowerPC box with CONFIG_VIRT_CPU_ACCOUNTING and
CONFIG_CGROUP_CPUACCT enabled, a context switch microbenchmark
is 234x faster and almost matches a CONFIG_CGROUP_CPUACCT
disabled kernel:
CONFIG_CGROUP_CPUACCT disabled: 16906698 ctx switches/sec
CONFIG_CGROUP_CPUACCT enabled: 61720 ctx switches/sec
CONFIG_CGROUP_CPUACCT + patch: 16663217 ctx switches/sec
Tested with:
wget http://ozlabs.org/~anton/junkcode/context_switch.c
make context_switch
for i in `seq 0 63`; do taskset -c $i ./context_switch & done
vmstat 1
Signed-off-by: Anton Blanchard <anton@samba.org>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Tested-by: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index f96be9370b75..bae6fcfe6d75 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8998,12 +8998,30 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | |||
8998 | } | 8998 | } |
8999 | 8999 | ||
9000 | /* | 9000 | /* |
9001 | * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large | ||
9002 | * in cputime_t units. As a result, cpuacct_update_stats calls | ||
9003 | * percpu_counter_add with values large enough to always overflow the | ||
9004 | * per cpu batch limit causing bad SMP scalability. | ||
9005 | * | ||
9006 | * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we | ||
9007 | * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled | ||
9008 | * and enabled. We cap it at INT_MAX which is the largest allowed batch value. | ||
9009 | */ | ||
9010 | #ifdef CONFIG_SMP | ||
9011 | #define CPUACCT_BATCH \ | ||
9012 | min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX) | ||
9013 | #else | ||
9014 | #define CPUACCT_BATCH 0 | ||
9015 | #endif | ||
9016 | |||
9017 | /* | ||
9001 | * Charge the system/user time to the task's accounting group. | 9018 | * Charge the system/user time to the task's accounting group. |
9002 | */ | 9019 | */ |
9003 | static void cpuacct_update_stats(struct task_struct *tsk, | 9020 | static void cpuacct_update_stats(struct task_struct *tsk, |
9004 | enum cpuacct_stat_index idx, cputime_t val) | 9021 | enum cpuacct_stat_index idx, cputime_t val) |
9005 | { | 9022 | { |
9006 | struct cpuacct *ca; | 9023 | struct cpuacct *ca; |
9024 | int batch = CPUACCT_BATCH; | ||
9007 | 9025 | ||
9008 | if (unlikely(!cpuacct_subsys.active)) | 9026 | if (unlikely(!cpuacct_subsys.active)) |
9009 | return; | 9027 | return; |
@@ -9012,7 +9030,7 @@ static void cpuacct_update_stats(struct task_struct *tsk, | |||
9012 | ca = task_ca(tsk); | 9030 | ca = task_ca(tsk); |
9013 | 9031 | ||
9014 | do { | 9032 | do { |
9015 | percpu_counter_add(&ca->cpustat[idx], val); | 9033 | __percpu_counter_add(&ca->cpustat[idx], val, batch); |
9016 | ca = ca->parent; | 9034 | ca = ca->parent; |
9017 | } while (ca); | 9035 | } while (ca); |
9018 | rcu_read_unlock(); | 9036 | rcu_read_unlock(); |