aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorClark Williams <williams@redhat.com>2018-10-26 18:10:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 19:38:15 -0400
commit026d1eaf5ef1a5d6258b46e4e411cd9f5ab8c41d (patch)
tree264348a04b2cdc0c9c01453cfcc03f1ff5b1c7c8
parentdf06b37ffe5a442503b7095b77b0a970df515459 (diff)
mm/kasan/quarantine.c: make quarantine_lock a raw_spinlock_t
The static lock quarantine_lock is used in quarantine.c to protect the quarantine queue datastructures. It is taken inside quarantine queue manipulation routines (quarantine_put(), quarantine_reduce() and quarantine_remove_cache()), with IRQs disabled. This is not a problem on a stock kernel but is problematic on an RT kernel where spin locks are sleeping spinlocks, which can sleep and can not be acquired with disabled interrupts. Convert the quarantine_lock to a raw spinlock_t. The usage of quarantine_lock is confined to quarantine.c and the work performed while the lock is held is used for debug purpose. [bigeasy@linutronix.de: slightly altered the commit message] Link: http://lkml.kernel.org/r/20181010214945.5owshc3mlrh74z4b@linutronix.de Signed-off-by: Clark Williams <williams@redhat.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Dmitry Vyukov <dvyukov@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/kasan/quarantine.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index 3a8ddf8baf7d..b209dbaefde8 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -103,7 +103,7 @@ static int quarantine_head;
103static int quarantine_tail; 103static int quarantine_tail;
104/* Total size of all objects in global_quarantine across all batches. */ 104/* Total size of all objects in global_quarantine across all batches. */
105static unsigned long quarantine_size; 105static unsigned long quarantine_size;
106static DEFINE_SPINLOCK(quarantine_lock); 106static DEFINE_RAW_SPINLOCK(quarantine_lock);
107DEFINE_STATIC_SRCU(remove_cache_srcu); 107DEFINE_STATIC_SRCU(remove_cache_srcu);
108 108
109/* Maximum size of the global queue. */ 109/* Maximum size of the global queue. */
@@ -190,7 +190,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
190 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { 190 if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) {
191 qlist_move_all(q, &temp); 191 qlist_move_all(q, &temp);
192 192
193 spin_lock(&quarantine_lock); 193 raw_spin_lock(&quarantine_lock);
194 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes); 194 WRITE_ONCE(quarantine_size, quarantine_size + temp.bytes);
195 qlist_move_all(&temp, &global_quarantine[quarantine_tail]); 195 qlist_move_all(&temp, &global_quarantine[quarantine_tail]);
196 if (global_quarantine[quarantine_tail].bytes >= 196 if (global_quarantine[quarantine_tail].bytes >=
@@ -203,7 +203,7 @@ void quarantine_put(struct kasan_free_meta *info, struct kmem_cache *cache)
203 if (new_tail != quarantine_head) 203 if (new_tail != quarantine_head)
204 quarantine_tail = new_tail; 204 quarantine_tail = new_tail;
205 } 205 }
206 spin_unlock(&quarantine_lock); 206 raw_spin_unlock(&quarantine_lock);
207 } 207 }
208 208
209 local_irq_restore(flags); 209 local_irq_restore(flags);
@@ -230,7 +230,7 @@ void quarantine_reduce(void)
230 * expected case). 230 * expected case).
231 */ 231 */
232 srcu_idx = srcu_read_lock(&remove_cache_srcu); 232 srcu_idx = srcu_read_lock(&remove_cache_srcu);
233 spin_lock_irqsave(&quarantine_lock, flags); 233 raw_spin_lock_irqsave(&quarantine_lock, flags);
234 234
235 /* 235 /*
236 * Update quarantine size in case of hotplug. Allocate a fraction of 236 * Update quarantine size in case of hotplug. Allocate a fraction of
@@ -254,7 +254,7 @@ void quarantine_reduce(void)
254 quarantine_head = 0; 254 quarantine_head = 0;
255 } 255 }
256 256
257 spin_unlock_irqrestore(&quarantine_lock, flags); 257 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
258 258
259 qlist_free_all(&to_free, NULL); 259 qlist_free_all(&to_free, NULL);
260 srcu_read_unlock(&remove_cache_srcu, srcu_idx); 260 srcu_read_unlock(&remove_cache_srcu, srcu_idx);
@@ -310,17 +310,17 @@ void quarantine_remove_cache(struct kmem_cache *cache)
310 */ 310 */
311 on_each_cpu(per_cpu_remove_cache, cache, 1); 311 on_each_cpu(per_cpu_remove_cache, cache, 1);
312 312
313 spin_lock_irqsave(&quarantine_lock, flags); 313 raw_spin_lock_irqsave(&quarantine_lock, flags);
314 for (i = 0; i < QUARANTINE_BATCHES; i++) { 314 for (i = 0; i < QUARANTINE_BATCHES; i++) {
315 if (qlist_empty(&global_quarantine[i])) 315 if (qlist_empty(&global_quarantine[i]))
316 continue; 316 continue;
317 qlist_move_cache(&global_quarantine[i], &to_free, cache); 317 qlist_move_cache(&global_quarantine[i], &to_free, cache);
318 /* Scanning whole quarantine can take a while. */ 318 /* Scanning whole quarantine can take a while. */
319 spin_unlock_irqrestore(&quarantine_lock, flags); 319 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
320 cond_resched(); 320 cond_resched();
321 spin_lock_irqsave(&quarantine_lock, flags); 321 raw_spin_lock_irqsave(&quarantine_lock, flags);
322 } 322 }
323 spin_unlock_irqrestore(&quarantine_lock, flags); 323 raw_spin_unlock_irqrestore(&quarantine_lock, flags);
324 324
325 qlist_free_all(&to_free, cache); 325 qlist_free_all(&to_free, cache);
326 326