diff options
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r-- | include/linux/percpu_counter.h | 107 |
1 files changed, 107 insertions, 0 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h new file mode 100644 index 000000000000..bd6708e2c027 --- /dev/null +++ b/include/linux/percpu_counter.h | |||
@@ -0,0 +1,107 @@ | |||
1 | #ifndef _LINUX_PERCPU_COUNTER_H | ||
2 | #define _LINUX_PERCPU_COUNTER_H | ||
3 | /* | ||
4 | * A simple "approximate counter" for use in ext2 and ext3 superblocks. | ||
5 | * | ||
6 | * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. | ||
7 | */ | ||
8 | |||
9 | #include <linux/config.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/threads.h> | ||
13 | #include <linux/percpu.h> | ||
14 | |||
15 | #ifdef CONFIG_SMP | ||
16 | |||
17 | struct percpu_counter { | ||
18 | spinlock_t lock; | ||
19 | long count; | ||
20 | long *counters; | ||
21 | }; | ||
22 | |||
23 | #if NR_CPUS >= 16 | ||
24 | #define FBC_BATCH (NR_CPUS*2) | ||
25 | #else | ||
26 | #define FBC_BATCH (NR_CPUS*4) | ||
27 | #endif | ||
28 | |||
29 | static inline void percpu_counter_init(struct percpu_counter *fbc) | ||
30 | { | ||
31 | spin_lock_init(&fbc->lock); | ||
32 | fbc->count = 0; | ||
33 | fbc->counters = alloc_percpu(long); | ||
34 | } | ||
35 | |||
36 | static inline void percpu_counter_destroy(struct percpu_counter *fbc) | ||
37 | { | ||
38 | free_percpu(fbc->counters); | ||
39 | } | ||
40 | |||
41 | void percpu_counter_mod(struct percpu_counter *fbc, long amount); | ||
42 | |||
43 | static inline long percpu_counter_read(struct percpu_counter *fbc) | ||
44 | { | ||
45 | return fbc->count; | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * It is possible for the percpu_counter_read() to return a small negative | ||
50 | * number for some counter which should never be negative. | ||
51 | */ | ||
52 | static inline long percpu_counter_read_positive(struct percpu_counter *fbc) | ||
53 | { | ||
54 | long ret = fbc->count; | ||
55 | |||
56 | barrier(); /* Prevent reloads of fbc->count */ | ||
57 | if (ret > 0) | ||
58 | return ret; | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | #else | ||
63 | |||
64 | struct percpu_counter { | ||
65 | long count; | ||
66 | }; | ||
67 | |||
68 | static inline void percpu_counter_init(struct percpu_counter *fbc) | ||
69 | { | ||
70 | fbc->count = 0; | ||
71 | } | ||
72 | |||
73 | static inline void percpu_counter_destroy(struct percpu_counter *fbc) | ||
74 | { | ||
75 | } | ||
76 | |||
77 | static inline void | ||
78 | percpu_counter_mod(struct percpu_counter *fbc, long amount) | ||
79 | { | ||
80 | preempt_disable(); | ||
81 | fbc->count += amount; | ||
82 | preempt_enable(); | ||
83 | } | ||
84 | |||
85 | static inline long percpu_counter_read(struct percpu_counter *fbc) | ||
86 | { | ||
87 | return fbc->count; | ||
88 | } | ||
89 | |||
90 | static inline long percpu_counter_read_positive(struct percpu_counter *fbc) | ||
91 | { | ||
92 | return fbc->count; | ||
93 | } | ||
94 | |||
95 | #endif /* CONFIG_SMP */ | ||
96 | |||
97 | static inline void percpu_counter_inc(struct percpu_counter *fbc) | ||
98 | { | ||
99 | percpu_counter_mod(fbc, 1); | ||
100 | } | ||
101 | |||
102 | static inline void percpu_counter_dec(struct percpu_counter *fbc) | ||
103 | { | ||
104 | percpu_counter_mod(fbc, -1); | ||
105 | } | ||
106 | |||
107 | #endif /* _LINUX_PERCPU_COUNTER_H */ | ||