aboutsummaryrefslogtreecommitdiffstats
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-07 20:51:29 -0400
committerTejun Heo <tj@kernel.org>2014-09-07 20:51:29 -0400
commitebd8fef304f99da84d4a52ad056f6137ac9652d4 (patch)
tree8fc0366c43264d684b0ade18f82c00182db26040 /lib/percpu_counter.c
parent1a4d76076cda69b0abf15463a8cebc172406da25 (diff)
percpu_counter: make percpu_counters_lock irq-safe
percpu_counter is scheduled to grow @gfp support to allow atomic initialization. This patch makes percpu_counters_lock irq-safe so that it can be safely used from atomic contexts. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 7dd33577b905..3fde78275cd1 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -115,6 +115,8 @@ EXPORT_SYMBOL(__percpu_counter_sum);
115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, 115int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
116 struct lock_class_key *key) 116 struct lock_class_key *key)
117{ 117{
118 unsigned long flags __maybe_unused;
119
118 raw_spin_lock_init(&fbc->lock); 120 raw_spin_lock_init(&fbc->lock);
119 lockdep_set_class(&fbc->lock, key); 121 lockdep_set_class(&fbc->lock, key);
120 fbc->count = amount; 122 fbc->count = amount;
@@ -126,9 +128,9 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
126 128
127#ifdef CONFIG_HOTPLUG_CPU 129#ifdef CONFIG_HOTPLUG_CPU
128 INIT_LIST_HEAD(&fbc->list); 130 INIT_LIST_HEAD(&fbc->list);
129 spin_lock(&percpu_counters_lock); 131 spin_lock_irqsave(&percpu_counters_lock, flags);
130 list_add(&fbc->list, &percpu_counters); 132 list_add(&fbc->list, &percpu_counters);
131 spin_unlock(&percpu_counters_lock); 133 spin_unlock_irqrestore(&percpu_counters_lock, flags);
132#endif 134#endif
133 return 0; 135 return 0;
134} 136}
@@ -136,15 +138,17 @@ EXPORT_SYMBOL(__percpu_counter_init);
136 138
137void percpu_counter_destroy(struct percpu_counter *fbc) 139void percpu_counter_destroy(struct percpu_counter *fbc)
138{ 140{
141 unsigned long flags __maybe_unused;
142
139 if (!fbc->counters) 143 if (!fbc->counters)
140 return; 144 return;
141 145
142 debug_percpu_counter_deactivate(fbc); 146 debug_percpu_counter_deactivate(fbc);
143 147
144#ifdef CONFIG_HOTPLUG_CPU 148#ifdef CONFIG_HOTPLUG_CPU
145 spin_lock(&percpu_counters_lock); 149 spin_lock_irqsave(&percpu_counters_lock, flags);
146 list_del(&fbc->list); 150 list_del(&fbc->list);
147 spin_unlock(&percpu_counters_lock); 151 spin_unlock_irqrestore(&percpu_counters_lock, flags);
148#endif 152#endif
149 free_percpu(fbc->counters); 153 free_percpu(fbc->counters);
150 fbc->counters = NULL; 154 fbc->counters = NULL;
@@ -173,7 +177,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
173 return NOTIFY_OK; 177 return NOTIFY_OK;
174 178
175 cpu = (unsigned long)hcpu; 179 cpu = (unsigned long)hcpu;
176 spin_lock(&percpu_counters_lock); 180 spin_lock_irq(&percpu_counters_lock);
177 list_for_each_entry(fbc, &percpu_counters, list) { 181 list_for_each_entry(fbc, &percpu_counters, list) {
178 s32 *pcount; 182 s32 *pcount;
179 unsigned long flags; 183 unsigned long flags;
@@ -184,7 +188,7 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
184 *pcount = 0; 188 *pcount = 0;
185 raw_spin_unlock_irqrestore(&fbc->lock, flags); 189 raw_spin_unlock_irqrestore(&fbc->lock, flags);
186 } 190 }
187 spin_unlock(&percpu_counters_lock); 191 spin_unlock_irq(&percpu_counters_lock);
188#endif 192#endif
189 return NOTIFY_OK; 193 return NOTIFY_OK;
190} 194}