summaryrefslogtreecommitdiffstats
path: root/kernel/audit_tree.c
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-12-14 08:40:05 -0500
committerPaul Moore <paul@paul-moore.com>2017-01-03 15:56:38 -0500
commitbe29d20f3f5db1f0b4e49a4f6eeedf840e2bf9b1 (patch)
tree129d3e33fe04b96e2af0b8adf633019a316f10e8 /kernel/audit_tree.c
parente3ba730702af370563f66cb610b71aa0ca67955e (diff)
audit: Fix sleep in atomic
Audit tree code was happily adding new notification marks while holding spinlocks. Since fsnotify_add_mark() acquires group->mark_mutex this can lead to sleeping while holding a spinlock, deadlocks due to lock inversion, and probably other fun. Fix the problem by acquiring group->mark_mutex earlier. CC: Paul Moore <paul@paul-moore.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Paul Moore <paul@paul-moore.com>
Diffstat (limited to 'kernel/audit_tree.c')
-rw-r--r--kernel/audit_tree.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index b4b58400531f..862969014cf6 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -231,9 +231,11 @@ static void untag_chunk(struct node *p)
231 if (size) 231 if (size)
232 new = alloc_chunk(size); 232 new = alloc_chunk(size);
233 233
234 mutex_lock(&entry->group->mark_mutex);
234 spin_lock(&entry->lock); 235 spin_lock(&entry->lock);
235 if (chunk->dead || !entry->inode) { 236 if (chunk->dead || !entry->inode) {
236 spin_unlock(&entry->lock); 237 spin_unlock(&entry->lock);
238 mutex_unlock(&entry->group->mark_mutex);
237 if (new) 239 if (new)
238 free_chunk(new); 240 free_chunk(new);
239 goto out; 241 goto out;
@@ -251,6 +253,7 @@ static void untag_chunk(struct node *p)
251 list_del_rcu(&chunk->hash); 253 list_del_rcu(&chunk->hash);
252 spin_unlock(&hash_lock); 254 spin_unlock(&hash_lock);
253 spin_unlock(&entry->lock); 255 spin_unlock(&entry->lock);
256 mutex_unlock(&entry->group->mark_mutex);
254 fsnotify_destroy_mark(entry, audit_tree_group); 257 fsnotify_destroy_mark(entry, audit_tree_group);
255 goto out; 258 goto out;
256 } 259 }
@@ -258,8 +261,8 @@ static void untag_chunk(struct node *p)
258 if (!new) 261 if (!new)
259 goto Fallback; 262 goto Fallback;
260 263
261 if (fsnotify_add_mark(&new->mark, 264 if (fsnotify_add_mark_locked(&new->mark, entry->group, entry->inode,
262 entry->group, entry->inode, NULL, 1)) { 265 NULL, 1)) {
263 fsnotify_put_mark(&new->mark); 266 fsnotify_put_mark(&new->mark);
264 goto Fallback; 267 goto Fallback;
265 } 268 }
@@ -293,6 +296,7 @@ static void untag_chunk(struct node *p)
293 owner->root = new; 296 owner->root = new;
294 spin_unlock(&hash_lock); 297 spin_unlock(&hash_lock);
295 spin_unlock(&entry->lock); 298 spin_unlock(&entry->lock);
299 mutex_unlock(&entry->group->mark_mutex);
296 fsnotify_destroy_mark(entry, audit_tree_group); 300 fsnotify_destroy_mark(entry, audit_tree_group);
297 fsnotify_put_mark(&new->mark); /* drop initial reference */ 301 fsnotify_put_mark(&new->mark); /* drop initial reference */
298 goto out; 302 goto out;
@@ -309,6 +313,7 @@ Fallback:
309 put_tree(owner); 313 put_tree(owner);
310 spin_unlock(&hash_lock); 314 spin_unlock(&hash_lock);
311 spin_unlock(&entry->lock); 315 spin_unlock(&entry->lock);
316 mutex_unlock(&entry->group->mark_mutex);
312out: 317out:
313 fsnotify_put_mark(entry); 318 fsnotify_put_mark(entry);
314 spin_lock(&hash_lock); 319 spin_lock(&hash_lock);
@@ -386,18 +391,21 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
386 391
387 chunk_entry = &chunk->mark; 392 chunk_entry = &chunk->mark;
388 393
394 mutex_lock(&old_entry->group->mark_mutex);
389 spin_lock(&old_entry->lock); 395 spin_lock(&old_entry->lock);
390 if (!old_entry->inode) { 396 if (!old_entry->inode) {
391 /* old_entry is being shot, lets just lie */ 397 /* old_entry is being shot, lets just lie */
392 spin_unlock(&old_entry->lock); 398 spin_unlock(&old_entry->lock);
399 mutex_unlock(&old_entry->group->mark_mutex);
393 fsnotify_put_mark(old_entry); 400 fsnotify_put_mark(old_entry);
394 free_chunk(chunk); 401 free_chunk(chunk);
395 return -ENOENT; 402 return -ENOENT;
396 } 403 }
397 404
398 if (fsnotify_add_mark(chunk_entry, 405 if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
399 old_entry->group, old_entry->inode, NULL, 1)) { 406 old_entry->inode, NULL, 1)) {
400 spin_unlock(&old_entry->lock); 407 spin_unlock(&old_entry->lock);
408 mutex_unlock(&old_entry->group->mark_mutex);
401 fsnotify_put_mark(chunk_entry); 409 fsnotify_put_mark(chunk_entry);
402 fsnotify_put_mark(old_entry); 410 fsnotify_put_mark(old_entry);
403 return -ENOSPC; 411 return -ENOSPC;
@@ -413,6 +421,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
413 chunk->dead = 1; 421 chunk->dead = 1;
414 spin_unlock(&chunk_entry->lock); 422 spin_unlock(&chunk_entry->lock);
415 spin_unlock(&old_entry->lock); 423 spin_unlock(&old_entry->lock);
424 mutex_unlock(&old_entry->group->mark_mutex);
416 425
417 fsnotify_destroy_mark(chunk_entry, audit_tree_group); 426 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
418 427
@@ -445,6 +454,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
445 spin_unlock(&hash_lock); 454 spin_unlock(&hash_lock);
446 spin_unlock(&chunk_entry->lock); 455 spin_unlock(&chunk_entry->lock);
447 spin_unlock(&old_entry->lock); 456 spin_unlock(&old_entry->lock);
457 mutex_unlock(&old_entry->group->mark_mutex);
448 fsnotify_destroy_mark(old_entry, audit_tree_group); 458 fsnotify_destroy_mark(old_entry, audit_tree_group);
449 fsnotify_put_mark(chunk_entry); /* drop initial reference */ 459 fsnotify_put_mark(chunk_entry); /* drop initial reference */
450 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ 460 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */