aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorMiao Xie <miaox@cn.fujitsu.com>2013-01-22 05:49:00 -0500
committerJosef Bacik <jbacik@fusionio.com>2013-01-24 12:51:27 -0500
commit1eafa6c73791e4f312324ddad9cbcaf6a1b6052b (patch)
tree59ffd2a324039af64354fbaaaa116c9366acba6f /fs
parentc9f01bfe0ca411b4751d7fdbb9d602035ba52f75 (diff)
Btrfs: fix repeated delalloc work allocation
btrfs_start_delalloc_inodes() locks the delalloc_inodes list, fetches the first inode, unlocks the list, triggers btrfs_alloc_delalloc_work/ btrfs_queue_worker for this inode, and then it locks the list, checks the head of the list again. But because we don't delete the first inode that it deals with before, it will fetch the same inode. As a result, this function allocates a huge amount of btrfs_delalloc_work structures, and OOM happens. Fix this problem by splice this delalloc list. Reported-by: Alex Lyakas <alex.btrfs@zadarastorage.com> Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/inode.c55
1 files changed, 41 insertions, 14 deletions
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9bc6c40b182d..ca7ace7b7b52 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -7585,41 +7585,61 @@ void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work)
7585 */ 7585 */
7586int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) 7586int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7587{ 7587{
7588 struct list_head *head = &root->fs_info->delalloc_inodes;
7589 struct btrfs_inode *binode; 7588 struct btrfs_inode *binode;
7590 struct inode *inode; 7589 struct inode *inode;
7591 struct btrfs_delalloc_work *work, *next; 7590 struct btrfs_delalloc_work *work, *next;
7592 struct list_head works; 7591 struct list_head works;
7592 struct list_head splice;
7593 int ret = 0; 7593 int ret = 0;
7594 7594
7595 if (root->fs_info->sb->s_flags & MS_RDONLY) 7595 if (root->fs_info->sb->s_flags & MS_RDONLY)
7596 return -EROFS; 7596 return -EROFS;
7597 7597
7598 INIT_LIST_HEAD(&works); 7598 INIT_LIST_HEAD(&works);
7599 7599 INIT_LIST_HEAD(&splice);
7600again:
7600 spin_lock(&root->fs_info->delalloc_lock); 7601 spin_lock(&root->fs_info->delalloc_lock);
7601 while (!list_empty(head)) { 7602 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
7602 binode = list_entry(head->next, struct btrfs_inode, 7603 while (!list_empty(&splice)) {
7604 binode = list_entry(splice.next, struct btrfs_inode,
7603 delalloc_inodes); 7605 delalloc_inodes);
7606
7607 list_del_init(&binode->delalloc_inodes);
7608
7604 inode = igrab(&binode->vfs_inode); 7609 inode = igrab(&binode->vfs_inode);
7605 if (!inode) 7610 if (!inode)
7606 list_del_init(&binode->delalloc_inodes); 7611 continue;
7612
7613 list_add_tail(&binode->delalloc_inodes,
7614 &root->fs_info->delalloc_inodes);
7607 spin_unlock(&root->fs_info->delalloc_lock); 7615 spin_unlock(&root->fs_info->delalloc_lock);
7608 if (inode) { 7616
7609 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput); 7617 work = btrfs_alloc_delalloc_work(inode, 0, delay_iput);
7610 if (!work) { 7618 if (unlikely(!work)) {
7611 ret = -ENOMEM; 7619 ret = -ENOMEM;
7612 goto out; 7620 goto out;
7613 }
7614 list_add_tail(&work->list, &works);
7615 btrfs_queue_worker(&root->fs_info->flush_workers,
7616 &work->work);
7617 } 7621 }
7622 list_add_tail(&work->list, &works);
7623 btrfs_queue_worker(&root->fs_info->flush_workers,
7624 &work->work);
7625
7618 cond_resched(); 7626 cond_resched();
7619 spin_lock(&root->fs_info->delalloc_lock); 7627 spin_lock(&root->fs_info->delalloc_lock);
7620 } 7628 }
7621 spin_unlock(&root->fs_info->delalloc_lock); 7629 spin_unlock(&root->fs_info->delalloc_lock);
7622 7630
7631 list_for_each_entry_safe(work, next, &works, list) {
7632 list_del_init(&work->list);
7633 btrfs_wait_and_free_delalloc_work(work);
7634 }
7635
7636 spin_lock(&root->fs_info->delalloc_lock);
7637 if (!list_empty(&root->fs_info->delalloc_inodes)) {
7638 spin_unlock(&root->fs_info->delalloc_lock);
7639 goto again;
7640 }
7641 spin_unlock(&root->fs_info->delalloc_lock);
7642
7623 /* the filemap_flush will queue IO into the worker threads, but 7643 /* the filemap_flush will queue IO into the worker threads, but
7624 * we have to make sure the IO is actually started and that 7644 * we have to make sure the IO is actually started and that
7625 * ordered extents get created before we return 7645 * ordered extents get created before we return
@@ -7632,11 +7652,18 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
7632 atomic_read(&root->fs_info->async_delalloc_pages) == 0)); 7652 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
7633 } 7653 }
7634 atomic_dec(&root->fs_info->async_submit_draining); 7654 atomic_dec(&root->fs_info->async_submit_draining);
7655 return 0;
7635out: 7656out:
7636 list_for_each_entry_safe(work, next, &works, list) { 7657 list_for_each_entry_safe(work, next, &works, list) {
7637 list_del_init(&work->list); 7658 list_del_init(&work->list);
7638 btrfs_wait_and_free_delalloc_work(work); 7659 btrfs_wait_and_free_delalloc_work(work);
7639 } 7660 }
7661
7662 if (!list_empty_careful(&splice)) {
7663 spin_lock(&root->fs_info->delalloc_lock);
7664 list_splice_tail(&splice, &root->fs_info->delalloc_inodes);
7665 spin_unlock(&root->fs_info->delalloc_lock);
7666 }
7640 return ret; 7667 return ret;
7641} 7668}
7642 7669