aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu_counter.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/linux/percpu_counter.h
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'include/linux/percpu_counter.h')
-rw-r--r--include/linux/percpu_counter.h107
1 files changed, 107 insertions, 0 deletions
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
new file mode 100644
index 000000000000..bd6708e2c027
--- /dev/null
+++ b/include/linux/percpu_counter.h
@@ -0,0 +1,107 @@
1#ifndef _LINUX_PERCPU_COUNTER_H
2#define _LINUX_PERCPU_COUNTER_H
3/*
4 * A simple "approximate counter" for use in ext2 and ext3 superblocks.
5 *
6 * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
7 */
8
9#include <linux/config.h>
10#include <linux/spinlock.h>
11#include <linux/smp.h>
12#include <linux/threads.h>
13#include <linux/percpu.h>
14
15#ifdef CONFIG_SMP
16
17struct percpu_counter {
18 spinlock_t lock;
19 long count;
20 long *counters;
21};
22
23#if NR_CPUS >= 16
24#define FBC_BATCH (NR_CPUS*2)
25#else
26#define FBC_BATCH (NR_CPUS*4)
27#endif
28
29static inline void percpu_counter_init(struct percpu_counter *fbc)
30{
31 spin_lock_init(&fbc->lock);
32 fbc->count = 0;
33 fbc->counters = alloc_percpu(long);
34}
35
36static inline void percpu_counter_destroy(struct percpu_counter *fbc)
37{
38 free_percpu(fbc->counters);
39}
40
41void percpu_counter_mod(struct percpu_counter *fbc, long amount);
42
43static inline long percpu_counter_read(struct percpu_counter *fbc)
44{
45 return fbc->count;
46}
47
48/*
49 * It is possible for the percpu_counter_read() to return a small negative
50 * number for some counter which should never be negative.
51 */
52static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
53{
54 long ret = fbc->count;
55
56 barrier(); /* Prevent reloads of fbc->count */
57 if (ret > 0)
58 return ret;
59 return 1;
60}
61
62#else
63
64struct percpu_counter {
65 long count;
66};
67
68static inline void percpu_counter_init(struct percpu_counter *fbc)
69{
70 fbc->count = 0;
71}
72
73static inline void percpu_counter_destroy(struct percpu_counter *fbc)
74{
75}
76
77static inline void
78percpu_counter_mod(struct percpu_counter *fbc, long amount)
79{
80 preempt_disable();
81 fbc->count += amount;
82 preempt_enable();
83}
84
85static inline long percpu_counter_read(struct percpu_counter *fbc)
86{
87 return fbc->count;
88}
89
90static inline long percpu_counter_read_positive(struct percpu_counter *fbc)
91{
92 return fbc->count;
93}
94
95#endif /* CONFIG_SMP */
96
97static inline void percpu_counter_inc(struct percpu_counter *fbc)
98{
99 percpu_counter_mod(fbc, 1);
100}
101
102static inline void percpu_counter_dec(struct percpu_counter *fbc)
103{
104 percpu_counter_mod(fbc, -1);
105}
106
107#endif /* _LINUX_PERCPU_COUNTER_H */