diff options
| author | Ravikiran G Thirumalai <kiran@scalex86.org> | 2006-06-23 05:05:40 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:43:06 -0400 |
| commit | 3cbc564024d8f174202f023e8a2991782f6a9431 (patch) | |
| tree | 5d523ef9694b7a0bf07d4db58718f4654afa3f04 /lib | |
| parent | d09042da7284a86ffbdd18695f517a71514ed598 (diff) | |
[PATCH] percpu_counters: create lib/percpu_counter.c
- Move percpu_counter routines from mm/swap.c to lib/percpu_counter.c
Signed-off-by: Ravikiran Thirumalai <kiran@scalex86.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Makefile | 1 | ||||
| -rw-r--r-- | lib/percpu_counter.c | 46 |
2 files changed, 47 insertions, 0 deletions
diff --git a/lib/Makefile b/lib/Makefile index b830c9a15541..79358ad1f113 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
| @@ -46,6 +46,7 @@ obj-$(CONFIG_TEXTSEARCH) += textsearch.o | |||
| 46 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o | 46 | obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o |
| 47 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o | 47 | obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o |
| 48 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o | 48 | obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o |
| 49 | obj-$(CONFIG_SMP) += percpu_counter.o | ||
| 49 | 50 | ||
| 50 | obj-$(CONFIG_SWIOTLB) += swiotlb.o | 51 | obj-$(CONFIG_SWIOTLB) += swiotlb.o |
| 51 | 52 | ||
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c new file mode 100644 index 000000000000..7a87003f8e8f --- /dev/null +++ b/lib/percpu_counter.c | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | /* | ||
| 2 | * Fast batching percpu counters. | ||
| 3 | */ | ||
| 4 | |||
| 5 | #include <linux/percpu_counter.h> | ||
| 6 | #include <linux/module.h> | ||
| 7 | |||
| 8 | void percpu_counter_mod(struct percpu_counter *fbc, long amount) | ||
| 9 | { | ||
| 10 | long count; | ||
| 11 | long *pcount; | ||
| 12 | int cpu = get_cpu(); | ||
| 13 | |||
| 14 | pcount = per_cpu_ptr(fbc->counters, cpu); | ||
| 15 | count = *pcount + amount; | ||
| 16 | if (count >= FBC_BATCH || count <= -FBC_BATCH) { | ||
| 17 | spin_lock(&fbc->lock); | ||
| 18 | fbc->count += count; | ||
| 19 | *pcount = 0; | ||
| 20 | spin_unlock(&fbc->lock); | ||
| 21 | } else { | ||
| 22 | *pcount = count; | ||
| 23 | } | ||
| 24 | put_cpu(); | ||
| 25 | } | ||
| 26 | EXPORT_SYMBOL(percpu_counter_mod); | ||
| 27 | |||
| 28 | /* | ||
| 29 | * Add up all the per-cpu counts, return the result. This is a more accurate | ||
| 30 | * but much slower version of percpu_counter_read_positive() | ||
| 31 | */ | ||
| 32 | long percpu_counter_sum(struct percpu_counter *fbc) | ||
| 33 | { | ||
| 34 | long ret; | ||
| 35 | int cpu; | ||
| 36 | |||
| 37 | spin_lock(&fbc->lock); | ||
| 38 | ret = fbc->count; | ||
| 39 | for_each_possible_cpu(cpu) { | ||
| 40 | long *pcount = per_cpu_ptr(fbc->counters, cpu); | ||
| 41 | ret += *pcount; | ||
| 42 | } | ||
| 43 | spin_unlock(&fbc->lock); | ||
| 44 | return ret < 0 ? 0 : ret; | ||
| 45 | } | ||
| 46 | EXPORT_SYMBOL(percpu_counter_sum); | ||
