aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2008-07-11 19:27:31 -0400
committerTheodore Ts'o <tytso@mit.edu>2008-07-11 19:27:31 -0400
commite8ced39d5e8911c662d4d69a342b9d053eaaac4e (patch)
treecd4d643bfbd37c67ff9bd2feff2c0b477a56f117
parent64769240bd07f446f83660bb143bb609d8ab4910 (diff)
percpu_counter: new function percpu_counter_sum_and_set
Delayed allocation need to check free blocks at every write time. percpu_counter_read_positive() is not quit accurate. delayed allocation need a more accurate accounting, but using percpu_counter_sum_positive() is frequently is quite expensive. This patch added a new function to update center counter when sum per-cpu counter, to increase the accurate rate for next percpu_counter_read() and require less calling expensive percpu_counter_sum(). Signed-off-by: Mingming Cao <cmm@us.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--include/linux/percpu_counter.h12
-rw-r--r--lib/percpu_counter.c7
3 files changed, 16 insertions, 5 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 25f63d8c1b3d..6369bacf0dcb 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -1621,7 +1621,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
1621#ifdef CONFIG_SMP 1621#ifdef CONFIG_SMP
1622 if (free_blocks - root_blocks < FBC_BATCH) 1622 if (free_blocks - root_blocks < FBC_BATCH)
1623 free_blocks = 1623 free_blocks =
1624 percpu_counter_sum_positive(&sbi->s_freeblocks_counter); 1624 percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
1625#endif 1625#endif
1626 if (free_blocks - root_blocks < nblocks) 1626 if (free_blocks - root_blocks < nblocks)
1627 return free_blocks - root_blocks; 1627 return free_blocks - root_blocks;
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 9007ccdfc112..208388835357 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
35void percpu_counter_destroy(struct percpu_counter *fbc); 35void percpu_counter_destroy(struct percpu_counter *fbc);
36void percpu_counter_set(struct percpu_counter *fbc, s64 amount); 36void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
37void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); 37void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
38s64 __percpu_counter_sum(struct percpu_counter *fbc); 38s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
39 39
40static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) 40static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
41{ 41{
@@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
44 44
45static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) 45static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
46{ 46{
47 s64 ret = __percpu_counter_sum(fbc); 47 s64 ret = __percpu_counter_sum(fbc, 0);
48 return ret < 0 ? 0 : ret; 48 return ret < 0 ? 0 : ret;
49} 49}
50 50
51static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
52{
53 return __percpu_counter_sum(fbc, 1);
54}
55
56
51static inline s64 percpu_counter_sum(struct percpu_counter *fbc) 57static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
52{ 58{
53 return __percpu_counter_sum(fbc); 59 return __percpu_counter_sum(fbc, 0);
54} 60}
55 61
56static inline s64 percpu_counter_read(struct percpu_counter *fbc) 62static inline s64 percpu_counter_read(struct percpu_counter *fbc)
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 119174494cb5..4a8ba4bf5f6f 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
52 * Add up all the per-cpu counts, return the result. This is a more accurate 52 * Add up all the per-cpu counts, return the result. This is a more accurate
53 * but much slower version of percpu_counter_read_positive() 53 * but much slower version of percpu_counter_read_positive()
54 */ 54 */
55s64 __percpu_counter_sum(struct percpu_counter *fbc) 55s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
56{ 56{
57 s64 ret; 57 s64 ret;
58 int cpu; 58 int cpu;
@@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
62 for_each_online_cpu(cpu) { 62 for_each_online_cpu(cpu) {
63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 63 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
64 ret += *pcount; 64 ret += *pcount;
65 if (set)
66 *pcount = 0;
65 } 67 }
68 if (set)
69 fbc->count = ret;
70
66 spin_unlock(&fbc->lock); 71 spin_unlock(&fbc->lock);
67 return ret; 72 return ret;
68} 73}