aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu_counter.h
diff options
context:
space:
mode:
authorMingming Cao <cmm@us.ibm.com>2006-06-23 05:05:41 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:43:06 -0400
commit0216bfcffe424a5473daa4da47440881b36c1f41 (patch)
tree80eaa49bfc644b070e57c251285048992ac6fafc /include/linux/percpu_counter.h
parent3cbc564024d8f174202f023e8a2991782f6a9431 (diff)
[PATCH] percpu counter data type changes to suppport more than 2**31 ext3 free blocks counter
The percpu counter data type are changed in this set of patches to support more users like ext3 who need more than 32 bit to store the free blocks total in the filesystem. - Generic perpcu counters data type changes. The size of the global counter and local counter were explictly specified using s64 and s32. The global counter is changed from long to s64, while the local counter is changed from long to s32, so we could avoid doing 64 bit update in most cases. - Users of the percpu counters are updated to make use of the new percpu_counter_init() routine now taking an additional parameter to allow users to pass the initial value of the global counter. Signed-off-by: Mingming Cao <cmm@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r--include/linux/percpu_counter.h38
1 files changed, 20 insertions, 18 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 66b5de404f22..f5aa593ccf32 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -10,13 +10,14 @@
10#include <linux/smp.h> 10#include <linux/smp.h>
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/types.h>
13 14
14#ifdef CONFIG_SMP 15#ifdef CONFIG_SMP
15 16
16struct percpu_counter { 17struct percpu_counter {
17 spinlock_t lock; 18 spinlock_t lock;
18 long count; 19 s64 count;
19 long *counters; 20 s32 *counters;
20}; 21};
21 22
22#if NR_CPUS >= 16 23#if NR_CPUS >= 16
@@ -25,11 +26,11 @@ struct percpu_counter {
25#define FBC_BATCH (NR_CPUS*4) 26#define FBC_BATCH (NR_CPUS*4)
26#endif 27#endif
27 28
28static inline void percpu_counter_init(struct percpu_counter *fbc) 29static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
29{ 30{
30 spin_lock_init(&fbc->lock); 31 spin_lock_init(&fbc->lock);
31 fbc->count = 0; 32 fbc->count = amount;
32 fbc->counters = alloc_percpu(long); 33 fbc->counters = alloc_percpu(s32);
33} 34}
34 35
35static inline void percpu_counter_destroy(struct percpu_counter *fbc) 36static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -37,10 +38,10 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
37 free_percpu(fbc->counters); 38 free_percpu(fbc->counters);
38} 39}
39 40
40void percpu_counter_mod(struct percpu_counter *fbc, long amount); 41void percpu_counter_mod(struct percpu_counter *fbc, s32 amount);
41long percpu_counter_sum(struct percpu_counter *fbc); 42s64 percpu_counter_sum(struct percpu_counter *fbc);
42 43
43static inline long percpu_counter_read(struct percpu_counter *fbc) 44static inline s64 percpu_counter_read(struct percpu_counter *fbc)
44{ 45{
45 return fbc->count; 46 return fbc->count;
46} 47}
@@ -48,13 +49,14 @@ static inline long percpu_counter_read(struct percpu_counter *fbc)
48/* 49/*
49 * It is possible for the percpu_counter_read() to return a small negative 50 * It is possible for the percpu_counter_read() to return a small negative
50 * number for some counter which should never be negative. 51 * number for some counter which should never be negative.
52 *
51 */ 53 */
52static inline long percpu_counter_read_positive(struct percpu_counter *fbc) 54static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
53{ 55{
54 long ret = fbc->count; 56 s64 ret = fbc->count;
55 57
56 barrier(); /* Prevent reloads of fbc->count */ 58 barrier(); /* Prevent reloads of fbc->count */
57 if (ret > 0) 59 if (ret >= 0)
58 return ret; 60 return ret;
59 return 1; 61 return 1;
60} 62}
@@ -62,12 +64,12 @@ static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
62#else 64#else
63 65
64struct percpu_counter { 66struct percpu_counter {
65 long count; 67 s64 count;
66}; 68};
67 69
68static inline void percpu_counter_init(struct percpu_counter *fbc) 70static inline void percpu_counter_init(struct percpu_counter *fbc, s64 amount)
69{ 71{
70 fbc->count = 0; 72 fbc->count = amount;
71} 73}
72 74
73static inline void percpu_counter_destroy(struct percpu_counter *fbc) 75static inline void percpu_counter_destroy(struct percpu_counter *fbc)
@@ -75,24 +77,24 @@ static inline void percpu_counter_destroy(struct percpu_counter *fbc)
75} 77}
76 78
77static inline void 79static inline void
78percpu_counter_mod(struct percpu_counter *fbc, long amount) 80percpu_counter_mod(struct percpu_counter *fbc, s32 amount)
79{ 81{
80 preempt_disable(); 82 preempt_disable();
81 fbc->count += amount; 83 fbc->count += amount;
82 preempt_enable(); 84 preempt_enable();
83} 85}
84 86
85static inline long percpu_counter_read(struct percpu_counter *fbc) 87static inline s64 percpu_counter_read(struct percpu_counter *fbc)
86{ 88{
87 return fbc->count; 89 return fbc->count;
88} 90}
89 91
90static inline long percpu_counter_read_positive(struct percpu_counter *fbc) 92static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
91{ 93{
92 return fbc->count; 94 return fbc->count;
93} 95}
94 96
95static inline long percpu_counter_sum(struct percpu_counter *fbc) 97static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
96{ 98{
97 return percpu_counter_read_positive(fbc); 99 return percpu_counter_read_positive(fbc);
98} 100}