summaryrefslogtreecommitdiffstats
path: root/fs/inode.c
diff options
context:
space:
mode:
authorJosef Bacik <jbacik@fb.com>2015-03-04 16:52:52 -0500
committerJosef Bacik <jbacik@fb.com>2015-08-18 13:20:09 -0400
commitac05fbb40062411ea1b722aa2cede7feaa94f1b4 (patch)
tree302f21a7e8e25efd99010fe09fc2057262eadf19 /fs/inode.c
parentc7f5408493aeb01532927b2276316797a03ed6ee (diff)
inode: don't softlockup when evicting inodes
On a box with a lot of ram (148gb) I can make the box softlockup after running an fs_mark job that creates hundreds of millions of empty files. This is because we never generate enough memory pressure to keep the number of inodes on our unused list low, so when we go to unmount we have to evict ~100 million inodes. This makes one processor a very unhappy person, so add a cond_resched() in dispose_list() and if we need a resched when processing the s_inodes list do that and run dispose_list() on what we've currently culled. Thanks, Signed-off-by: Josef Bacik <jbacik@fb.com> Reviewed-by: Jan Kara <jack@suse.cz>
Diffstat (limited to 'fs/inode.c')
-rw-r--r--fs/inode.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/fs/inode.c b/fs/inode.c
index f09148e07198..78a17b8859e1 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -575,6 +575,7 @@ static void dispose_list(struct list_head *head)
575 list_del_init(&inode->i_lru); 575 list_del_init(&inode->i_lru);
576 576
577 evict(inode); 577 evict(inode);
578 cond_resched();
578 } 579 }
579} 580}
580 581
@@ -592,6 +593,7 @@ void evict_inodes(struct super_block *sb)
592 struct inode *inode, *next; 593 struct inode *inode, *next;
593 LIST_HEAD(dispose); 594 LIST_HEAD(dispose);
594 595
596again:
595 spin_lock(&sb->s_inode_list_lock); 597 spin_lock(&sb->s_inode_list_lock);
596 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 598 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
597 if (atomic_read(&inode->i_count)) 599 if (atomic_read(&inode->i_count))
@@ -607,6 +609,18 @@ void evict_inodes(struct super_block *sb)
607 inode_lru_list_del(inode); 609 inode_lru_list_del(inode);
608 spin_unlock(&inode->i_lock); 610 spin_unlock(&inode->i_lock);
609 list_add(&inode->i_lru, &dispose); 611 list_add(&inode->i_lru, &dispose);
612
613 /*
614 * We can have a ton of inodes to evict at unmount time given
615 * enough memory, check to see if we need to go to sleep for a
616 * bit so we don't livelock.
617 */
618 if (need_resched()) {
619 spin_unlock(&sb->s_inode_list_lock);
620 cond_resched();
621 dispose_list(&dispose);
622 goto again;
623 }
610 } 624 }
611 spin_unlock(&sb->s_inode_list_lock); 625 spin_unlock(&sb->s_inode_list_lock);
612 626