aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shli@fusionio.com>2013-10-24 04:06:45 -0400
committerJens Axboe <axboe@kernel.dk>2013-10-25 06:55:59 -0400
commit098faf5805c80f951ce5e8b4a6842382ad793c38 (patch)
tree7479b126910fc14f0b1690b9414c8bf9033dce70
parent71fe07d040626de7b72244bf6de889c2e0f5aea3 (diff)
percpu_counter: make APIs irq safe
In my usage, sometimes the percpu APIs are called with irq locked, sometimes not. lockdep complains there is potential deadlock. Let's always use percpucounter lock in irq safe way. There should be no performance penality, as all those are slow code path. Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--lib/percpu_counter.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 93c5d5ecff4e..7473ee3b4ee7 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -60,14 +60,15 @@ static inline void debug_percpu_counter_deactivate(struct percpu_counter *fbc)
60void percpu_counter_set(struct percpu_counter *fbc, s64 amount) 60void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
61{ 61{
62 int cpu; 62 int cpu;
63 unsigned long flags;
63 64
64 raw_spin_lock(&fbc->lock); 65 raw_spin_lock_irqsave(&fbc->lock, flags);
65 for_each_possible_cpu(cpu) { 66 for_each_possible_cpu(cpu) {
66 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
67 *pcount = 0; 68 *pcount = 0;
68 } 69 }
69 fbc->count = amount; 70 fbc->count = amount;
70 raw_spin_unlock(&fbc->lock); 71 raw_spin_unlock_irqrestore(&fbc->lock, flags);
71} 72}
72EXPORT_SYMBOL(percpu_counter_set); 73EXPORT_SYMBOL(percpu_counter_set);
73 74
@@ -78,9 +79,10 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
78 preempt_disable(); 79 preempt_disable();
79 count = __this_cpu_read(*fbc->counters) + amount; 80 count = __this_cpu_read(*fbc->counters) + amount;
80 if (count >= batch || count <= -batch) { 81 if (count >= batch || count <= -batch) {
81 raw_spin_lock(&fbc->lock); 82 unsigned long flags;
83 raw_spin_lock_irqsave(&fbc->lock, flags);
82 fbc->count += count; 84 fbc->count += count;
83 raw_spin_unlock(&fbc->lock); 85 raw_spin_unlock_irqrestore(&fbc->lock, flags);
84 __this_cpu_write(*fbc->counters, 0); 86 __this_cpu_write(*fbc->counters, 0);
85 } else { 87 } else {
86 __this_cpu_write(*fbc->counters, count); 88 __this_cpu_write(*fbc->counters, count);
@@ -97,14 +99,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
97{ 99{
98 s64 ret; 100 s64 ret;
99 int cpu; 101 int cpu;
102 unsigned long flags;
100 103
101 raw_spin_lock(&fbc->lock); 104 raw_spin_lock_irqsave(&fbc->lock, flags);
102 ret = fbc->count; 105 ret = fbc->count;
103 for_each_online_cpu(cpu) { 106 for_each_online_cpu(cpu) {
104 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); 107 s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
105 ret += *pcount; 108 ret += *pcount;
106 } 109 }
107 raw_spin_unlock(&fbc->lock); 110 raw_spin_unlock_irqrestore(&fbc->lock, flags);
108 return ret; 111 return ret;
109} 112}
110EXPORT_SYMBOL(__percpu_counter_sum); 113EXPORT_SYMBOL(__percpu_counter_sum);