aboutsummaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c25
1 files changed, 23 insertions, 2 deletions
diff --git a/mm/swap.c b/mm/swap.c
index cce3dda59c59..e9ec06d845e8 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -489,13 +489,34 @@ void percpu_counter_mod(struct percpu_counter *fbc, long amount)
489 if (count >= FBC_BATCH || count <= -FBC_BATCH) { 489 if (count >= FBC_BATCH || count <= -FBC_BATCH) {
490 spin_lock(&fbc->lock); 490 spin_lock(&fbc->lock);
491 fbc->count += count; 491 fbc->count += count;
492 *pcount = 0;
492 spin_unlock(&fbc->lock); 493 spin_unlock(&fbc->lock);
493 count = 0; 494 } else {
495 *pcount = count;
494 } 496 }
495 *pcount = count;
496 put_cpu(); 497 put_cpu();
497} 498}
498EXPORT_SYMBOL(percpu_counter_mod); 499EXPORT_SYMBOL(percpu_counter_mod);
500
501/*
502 * Add up all the per-cpu counts, return the result. This is a more accurate
503 * but much slower version of percpu_counter_read_positive()
504 */
505long percpu_counter_sum(struct percpu_counter *fbc)
506{
507 long ret;
508 int cpu;
509
510 spin_lock(&fbc->lock);
511 ret = fbc->count;
512 for_each_cpu(cpu) {
513 long *pcount = per_cpu_ptr(fbc->counters, cpu);
514 ret += *pcount;
515 }
516 spin_unlock(&fbc->lock);
517 return ret < 0 ? 0 : ret;
518}
519EXPORT_SYMBOL(percpu_counter_sum);
499#endif 520#endif
500 521
501/* 522/*