diff options
| author | Eric Dumazet <edumazet@google.com> | 2017-01-20 09:34:22 -0500 |
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2017-01-20 10:06:56 -0500 |
| commit | aaf0f2fa682861e47a4f6a8762d2b8a9a4a51077 (patch) | |
| tree | bc57d4a655a39f065e6a606913be6a25de9ab55a /lib | |
| parent | 44b4b461a0fb407507b46ea76a71376d74de7058 (diff) | |
percpu_counter: percpu_counter_hotcpu_callback() cleanup
In commit ebd8fef304f9 ("percpu_counter: make percpu_counters_lock
irq-safe") we disabled irqs in percpu_counter_hotcpu_callback()
We can grab every counter spinlock without having to disable
irqs again.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/percpu_counter.c | 5 |
1 files changed, 2 insertions, 3 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index c8cebb137076..9c21000df0b5 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
| @@ -176,13 +176,12 @@ static int percpu_counter_cpu_dead(unsigned int cpu) | |||
| 176 | spin_lock_irq(&percpu_counters_lock); | 176 | spin_lock_irq(&percpu_counters_lock); |
| 177 | list_for_each_entry(fbc, &percpu_counters, list) { | 177 | list_for_each_entry(fbc, &percpu_counters, list) { |
| 178 | s32 *pcount; | 178 | s32 *pcount; |
| 179 | unsigned long flags; | ||
| 180 | 179 | ||
| 181 | raw_spin_lock_irqsave(&fbc->lock, flags); | 180 | raw_spin_lock(&fbc->lock); |
| 182 | pcount = per_cpu_ptr(fbc->counters, cpu); | 181 | pcount = per_cpu_ptr(fbc->counters, cpu); |
| 183 | fbc->count += *pcount; | 182 | fbc->count += *pcount; |
| 184 | *pcount = 0; | 183 | *pcount = 0; |
| 185 | raw_spin_unlock_irqrestore(&fbc->lock, flags); | 184 | raw_spin_unlock(&fbc->lock); |
| 186 | } | 185 | } |
| 187 | spin_unlock_irq(&percpu_counters_lock); | 186 | spin_unlock_irq(&percpu_counters_lock); |
| 188 | #endif | 187 | #endif |
