aboutsummaryrefslogtreecommitdiffstats
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-11 16:23:48 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-11 16:23:48 -0400
commitfd048088306656824958e7783ffcee27e241b361 (patch)
treebe11bebe3bbd2cac88ff27bd3c7450339d21bdc7 /lib/percpu_counter.c
parent5c3c4d9b5810c9aabd8c05219c62ca088aa83eb0 (diff)
parent03010a3350301baac2154fa66de925ae2981b7e3 (diff)
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (43 commits) ext4: Rename ext4dev to ext4 ext4: Avoid double dirtying of super block in ext4_put_super() Update ext4 MAINTAINERS file Hook ext4 to the vfs fiemap interface. generic block based fiemap implementation ocfs2: fiemap support vfs: vfs-level fiemap interface ext4: fix xattr deadlock jbd2: Fix buffer head leak when writing the commit block ext4: Add debugging markers that can be used by systemtap jbd2: abort instead of waiting for nonexistent transaction ext4: fix initialization of UNINIT bitmap blocks ext4: Remove old legacy block allocator ext4: Use readahead when reading an inode from the inode table ext4: Improve the documentation for ext4's /proc tunables ext4: Combine proc file handling into a single set of functions ext4: move /proc setup and teardown out of mballoc.c ext4: Don't use 'struct dentry' for internal lookups ext4/jbd2: Avoid WARN() messages when failing to write to the superblock ext4: use percpu data structures for lg_prealloc_list ...
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c8
1 files changed, 3 insertions, 5 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 4a8ba4bf5f6f..a8663890a88c 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
52 * Add up all the per-cpu counts, return the result. This is a more accurate 52 * Add up all the per-cpu counts, return the result. This is a more accurate
53 * but much slower version of percpu_counter_read_positive() 53 * but much slower version of percpu_counter_read_positive()
54 */ 54 */
55s64 __percpu_counter_sum(struct percpu_counter *fbc, int set) 55s64 __percpu_counter_sum(struct percpu_counter *fbc)
56{ 56{
57 s64 ret; 57 s64 ret;
58 int cpu; 58 int cpu;
@@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
62 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount; 64 ret += *pcount;
65 if (set) 65 *pcount = 0;
66 *pcount = 0;
67 } 66 }
68 if (set) 67 fbc->count = ret;
69 fbc->count = ret;
70 68
71 spin_unlock(&fbc->lock); 69 spin_unlock(&fbc->lock);
72 return ret; 70 return ret;