aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-07 20:02:58 -0500
commit72eb6a791459c87a0340318840bb3bd9252b627b (patch)
tree3bfb8ad99f9c7e511f37f72d57b56a2cea06d753 /lib
parent23d69b09b78c4876e134f104a3814c30747c53f1 (diff)
parent55ee4ef30241a62b700f79517e6d5ef2ddbefa67 (diff)
Merge branch 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (30 commits) gameport: use this_cpu_read instead of lookup x86: udelay: Use this_cpu_read to avoid address calculation x86: Use this_cpu_inc_return for nmi counter x86: Replace uses of current_cpu_data with this_cpu ops x86: Use this_cpu_ops to optimize code vmstat: User per cpu atomics to avoid interrupt disable / enable irq_work: Use per cpu atomics instead of regular atomics cpuops: Use cmpxchg for xchg to avoid lock semantics x86: this_cpu_cmpxchg and this_cpu_xchg operations percpu: Generic this_cpu_cmpxchg() and this_cpu_xchg support percpu,x86: relocate this_cpu_add_return() and friends connector: Use this_cpu operations xen: Use this_cpu_inc_return taskstats: Use this_cpu_ops random: Use this_cpu_inc_return fs: Use this_cpu_inc_return in buffer.c highmem: Use this_cpu_xx_return() operations vmstat: Use this_cpu_inc_return for vm statistics x86: Support for this_cpu_add, sub, dec, inc_return percpu: Generic support for this_cpu_add, sub, dec, inc_return ... Fixed up conflicts: in arch/x86/kernel/{apic/nmi.c, apic/x2apic_uv_x.c, process.c} as per Tejun.
Diffstat (limited to 'lib')
-rw-r--r--lib/percpu_counter.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 604678d7d06d..28f2c33c6b53 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -72,18 +72,16 @@ EXPORT_SYMBOL(percpu_counter_set);
72void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) 72void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
73{ 73{
74 s64 count; 74 s64 count;
75 s32 *pcount;
76 75
77 preempt_disable(); 76 preempt_disable();
78 pcount = this_cpu_ptr(fbc->counters); 77 count = __this_cpu_read(*fbc->counters) + amount;
79 count = *pcount + amount;
80 if (count >= batch || count <= -batch) { 78 if (count >= batch || count <= -batch) {
81 spin_lock(&fbc->lock); 79 spin_lock(&fbc->lock);
82 fbc->count += count; 80 fbc->count += count;
83 *pcount = 0; 81 __this_cpu_write(*fbc->counters, 0);
84 spin_unlock(&fbc->lock); 82 spin_unlock(&fbc->lock);
85 } else { 83 } else {
86 *pcount = count; 84 __this_cpu_write(*fbc->counters, count);
87 } 85 }
88 preempt_enable(); 86 preempt_enable();
89} 87}