diff options
Diffstat (limited to 'fs/btrfs/disk-io.c')
-rw-r--r-- | fs/btrfs/disk-io.c | 63 |
1 files changed, 26 insertions, 37 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b0ab41da91d1..3f0b6d1936e8 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1664,9 +1664,8 @@ static int cleaner_kthread(void *arg) | |||
1664 | struct btrfs_root *root = arg; | 1664 | struct btrfs_root *root = arg; |
1665 | struct btrfs_fs_info *fs_info = root->fs_info; | 1665 | struct btrfs_fs_info *fs_info = root->fs_info; |
1666 | int again; | 1666 | int again; |
1667 | struct btrfs_trans_handle *trans; | ||
1668 | 1667 | ||
1669 | do { | 1668 | while (1) { |
1670 | again = 0; | 1669 | again = 0; |
1671 | 1670 | ||
1672 | /* Make the cleaner go to sleep early. */ | 1671 | /* Make the cleaner go to sleep early. */ |
@@ -1715,42 +1714,16 @@ static int cleaner_kthread(void *arg) | |||
1715 | */ | 1714 | */ |
1716 | btrfs_delete_unused_bgs(fs_info); | 1715 | btrfs_delete_unused_bgs(fs_info); |
1717 | sleep: | 1716 | sleep: |
1717 | if (kthread_should_park()) | ||
1718 | kthread_parkme(); | ||
1719 | if (kthread_should_stop()) | ||
1720 | return 0; | ||
1718 | if (!again) { | 1721 | if (!again) { |
1719 | set_current_state(TASK_INTERRUPTIBLE); | 1722 | set_current_state(TASK_INTERRUPTIBLE); |
1720 | if (!kthread_should_stop()) | 1723 | schedule(); |
1721 | schedule(); | ||
1722 | __set_current_state(TASK_RUNNING); | 1724 | __set_current_state(TASK_RUNNING); |
1723 | } | 1725 | } |
1724 | } while (!kthread_should_stop()); | ||
1725 | |||
1726 | /* | ||
1727 | * Transaction kthread is stopped before us and wakes us up. | ||
1728 | * However we might have started a new transaction and COWed some | ||
1729 | * tree blocks when deleting unused block groups for example. So | ||
1730 | * make sure we commit the transaction we started to have a clean | ||
1731 | * shutdown when evicting the btree inode - if it has dirty pages | ||
1732 | * when we do the final iput() on it, eviction will trigger a | ||
1733 | * writeback for it which will fail with null pointer dereferences | ||
1734 | * since work queues and other resources were already released and | ||
1735 | * destroyed by the time the iput/eviction/writeback is made. | ||
1736 | */ | ||
1737 | trans = btrfs_attach_transaction(root); | ||
1738 | if (IS_ERR(trans)) { | ||
1739 | if (PTR_ERR(trans) != -ENOENT) | ||
1740 | btrfs_err(fs_info, | ||
1741 | "cleaner transaction attach returned %ld", | ||
1742 | PTR_ERR(trans)); | ||
1743 | } else { | ||
1744 | int ret; | ||
1745 | |||
1746 | ret = btrfs_commit_transaction(trans); | ||
1747 | if (ret) | ||
1748 | btrfs_err(fs_info, | ||
1749 | "cleaner open transaction commit returned %d", | ||
1750 | ret); | ||
1751 | } | 1726 | } |
1752 | |||
1753 | return 0; | ||
1754 | } | 1727 | } |
1755 | 1728 | ||
1756 | static int transaction_kthread(void *arg) | 1729 | static int transaction_kthread(void *arg) |
@@ -3931,6 +3904,13 @@ void close_ctree(struct btrfs_fs_info *fs_info) | |||
3931 | int ret; | 3904 | int ret; |
3932 | 3905 | ||
3933 | set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); | 3906 | set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); |
3907 | /* | ||
3908 | * We don't want the cleaner to start new transactions, add more delayed | ||
3909 | * iputs, etc. while we're closing. We can't use kthread_stop() yet | ||
3910 | * because that frees the task_struct, and the transaction kthread might | ||
3911 | * still try to wake up the cleaner. | ||
3912 | */ | ||
3913 | kthread_park(fs_info->cleaner_kthread); | ||
3934 | 3914 | ||
3935 | /* wait for the qgroup rescan worker to stop */ | 3915 | /* wait for the qgroup rescan worker to stop */ |
3936 | btrfs_qgroup_wait_for_completion(fs_info, false); | 3916 | btrfs_qgroup_wait_for_completion(fs_info, false); |
@@ -3958,9 +3938,8 @@ void close_ctree(struct btrfs_fs_info *fs_info) | |||
3958 | 3938 | ||
3959 | if (!sb_rdonly(fs_info->sb)) { | 3939 | if (!sb_rdonly(fs_info->sb)) { |
3960 | /* | 3940 | /* |
3961 | * If the cleaner thread is stopped and there are | 3941 | * The cleaner kthread is stopped, so do one final pass over |
3962 | * block groups queued for removal, the deletion will be | 3942 | * unused block groups. |
3963 | * skipped when we quit the cleaner thread. | ||
3964 | */ | 3943 | */ |
3965 | btrfs_delete_unused_bgs(fs_info); | 3944 | btrfs_delete_unused_bgs(fs_info); |
3966 | 3945 | ||
@@ -4359,13 +4338,23 @@ static int btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, | |||
4359 | unpin = pinned_extents; | 4338 | unpin = pinned_extents; |
4360 | again: | 4339 | again: |
4361 | while (1) { | 4340 | while (1) { |
4341 | /* | ||
4342 | * The btrfs_finish_extent_commit() may get the same range as | ||
4343 | * ours between find_first_extent_bit and clear_extent_dirty. | ||
4344 | * Hence, hold the unused_bg_unpin_mutex to avoid double unpin | ||
4345 | * the same extent range. | ||
4346 | */ | ||
4347 | mutex_lock(&fs_info->unused_bg_unpin_mutex); | ||
4362 | ret = find_first_extent_bit(unpin, 0, &start, &end, | 4348 | ret = find_first_extent_bit(unpin, 0, &start, &end, |
4363 | EXTENT_DIRTY, NULL); | 4349 | EXTENT_DIRTY, NULL); |
4364 | if (ret) | 4350 | if (ret) { |
4351 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | ||
4365 | break; | 4352 | break; |
4353 | } | ||
4366 | 4354 | ||
4367 | clear_extent_dirty(unpin, start, end); | 4355 | clear_extent_dirty(unpin, start, end); |
4368 | btrfs_error_unpin_extent_range(fs_info, start, end); | 4356 | btrfs_error_unpin_extent_range(fs_info, start, end); |
4357 | mutex_unlock(&fs_info->unused_bg_unpin_mutex); | ||
4369 | cond_resched(); | 4358 | cond_resched(); |
4370 | } | 4359 | } |
4371 | 4360 | ||