diff options
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r-- | fs/btrfs/scrub.c | 158 |
1 files changed, 116 insertions, 42 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 0afcd452fcb3..2544805544f0 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -158,12 +158,20 @@ struct scrub_fixup_nodatasum { | |||
158 | int mirror_num; | 158 | int mirror_num; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | struct scrub_nocow_inode { | ||
162 | u64 inum; | ||
163 | u64 offset; | ||
164 | u64 root; | ||
165 | struct list_head list; | ||
166 | }; | ||
167 | |||
161 | struct scrub_copy_nocow_ctx { | 168 | struct scrub_copy_nocow_ctx { |
162 | struct scrub_ctx *sctx; | 169 | struct scrub_ctx *sctx; |
163 | u64 logical; | 170 | u64 logical; |
164 | u64 len; | 171 | u64 len; |
165 | int mirror_num; | 172 | int mirror_num; |
166 | u64 physical_for_dev_replace; | 173 | u64 physical_for_dev_replace; |
174 | struct list_head inodes; | ||
167 | struct btrfs_work work; | 175 | struct btrfs_work work; |
168 | }; | 176 | }; |
169 | 177 | ||
@@ -245,7 +253,7 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); | |||
245 | static int write_page_nocow(struct scrub_ctx *sctx, | 253 | static int write_page_nocow(struct scrub_ctx *sctx, |
246 | u64 physical_for_dev_replace, struct page *page); | 254 | u64 physical_for_dev_replace, struct page *page); |
247 | static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, | 255 | static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, |
248 | void *ctx); | 256 | struct scrub_copy_nocow_ctx *ctx); |
249 | static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | 257 | static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, |
250 | int mirror_num, u64 physical_for_dev_replace); | 258 | int mirror_num, u64 physical_for_dev_replace); |
251 | static void copy_nocow_pages_worker(struct btrfs_work *work); | 259 | static void copy_nocow_pages_worker(struct btrfs_work *work); |
@@ -2709,8 +2717,6 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
2709 | mutex_unlock(&fs_info->scrub_lock); | 2717 | mutex_unlock(&fs_info->scrub_lock); |
2710 | wake_up(&fs_info->scrub_pause_wait); | 2718 | wake_up(&fs_info->scrub_pause_wait); |
2711 | 2719 | ||
2712 | dev_replace->cursor_left = dev_replace->cursor_right; | ||
2713 | dev_replace->item_needs_writeback = 1; | ||
2714 | btrfs_put_block_group(cache); | 2720 | btrfs_put_block_group(cache); |
2715 | if (ret) | 2721 | if (ret) |
2716 | break; | 2722 | break; |
@@ -2724,6 +2730,9 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, | |||
2724 | break; | 2730 | break; |
2725 | } | 2731 | } |
2726 | 2732 | ||
2733 | dev_replace->cursor_left = dev_replace->cursor_right; | ||
2734 | dev_replace->item_needs_writeback = 1; | ||
2735 | |||
2727 | key.offset = found_key.offset + length; | 2736 | key.offset = found_key.offset + length; |
2728 | btrfs_release_path(path); | 2737 | btrfs_release_path(path); |
2729 | } | 2738 | } |
@@ -2775,7 +2784,6 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, | |||
2775 | { | 2784 | { |
2776 | int ret = 0; | 2785 | int ret = 0; |
2777 | 2786 | ||
2778 | mutex_lock(&fs_info->scrub_lock); | ||
2779 | if (fs_info->scrub_workers_refcnt == 0) { | 2787 | if (fs_info->scrub_workers_refcnt == 0) { |
2780 | if (is_dev_replace) | 2788 | if (is_dev_replace) |
2781 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1, | 2789 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", 1, |
@@ -2805,21 +2813,17 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info, | |||
2805 | } | 2813 | } |
2806 | ++fs_info->scrub_workers_refcnt; | 2814 | ++fs_info->scrub_workers_refcnt; |
2807 | out: | 2815 | out: |
2808 | mutex_unlock(&fs_info->scrub_lock); | ||
2809 | |||
2810 | return ret; | 2816 | return ret; |
2811 | } | 2817 | } |
2812 | 2818 | ||
2813 | static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) | 2819 | static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info) |
2814 | { | 2820 | { |
2815 | mutex_lock(&fs_info->scrub_lock); | ||
2816 | if (--fs_info->scrub_workers_refcnt == 0) { | 2821 | if (--fs_info->scrub_workers_refcnt == 0) { |
2817 | btrfs_stop_workers(&fs_info->scrub_workers); | 2822 | btrfs_stop_workers(&fs_info->scrub_workers); |
2818 | btrfs_stop_workers(&fs_info->scrub_wr_completion_workers); | 2823 | btrfs_stop_workers(&fs_info->scrub_wr_completion_workers); |
2819 | btrfs_stop_workers(&fs_info->scrub_nocow_workers); | 2824 | btrfs_stop_workers(&fs_info->scrub_nocow_workers); |
2820 | } | 2825 | } |
2821 | WARN_ON(fs_info->scrub_workers_refcnt < 0); | 2826 | WARN_ON(fs_info->scrub_workers_refcnt < 0); |
2822 | mutex_unlock(&fs_info->scrub_lock); | ||
2823 | } | 2827 | } |
2824 | 2828 | ||
2825 | int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | 2829 | int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, |
@@ -2880,23 +2884,18 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | |||
2880 | return -EINVAL; | 2884 | return -EINVAL; |
2881 | } | 2885 | } |
2882 | 2886 | ||
2883 | ret = scrub_workers_get(fs_info, is_dev_replace); | ||
2884 | if (ret) | ||
2885 | return ret; | ||
2886 | 2887 | ||
2887 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | 2888 | mutex_lock(&fs_info->fs_devices->device_list_mutex); |
2888 | dev = btrfs_find_device(fs_info, devid, NULL, NULL); | 2889 | dev = btrfs_find_device(fs_info, devid, NULL, NULL); |
2889 | if (!dev || (dev->missing && !is_dev_replace)) { | 2890 | if (!dev || (dev->missing && !is_dev_replace)) { |
2890 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 2891 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
2891 | scrub_workers_put(fs_info); | ||
2892 | return -ENODEV; | 2892 | return -ENODEV; |
2893 | } | 2893 | } |
2894 | mutex_lock(&fs_info->scrub_lock); | ||
2895 | 2894 | ||
2895 | mutex_lock(&fs_info->scrub_lock); | ||
2896 | if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) { | 2896 | if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) { |
2897 | mutex_unlock(&fs_info->scrub_lock); | 2897 | mutex_unlock(&fs_info->scrub_lock); |
2898 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 2898 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
2899 | scrub_workers_put(fs_info); | ||
2900 | return -EIO; | 2899 | return -EIO; |
2901 | } | 2900 | } |
2902 | 2901 | ||
@@ -2907,10 +2906,17 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | |||
2907 | btrfs_dev_replace_unlock(&fs_info->dev_replace); | 2906 | btrfs_dev_replace_unlock(&fs_info->dev_replace); |
2908 | mutex_unlock(&fs_info->scrub_lock); | 2907 | mutex_unlock(&fs_info->scrub_lock); |
2909 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 2908 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); |
2910 | scrub_workers_put(fs_info); | ||
2911 | return -EINPROGRESS; | 2909 | return -EINPROGRESS; |
2912 | } | 2910 | } |
2913 | btrfs_dev_replace_unlock(&fs_info->dev_replace); | 2911 | btrfs_dev_replace_unlock(&fs_info->dev_replace); |
2912 | |||
2913 | ret = scrub_workers_get(fs_info, is_dev_replace); | ||
2914 | if (ret) { | ||
2915 | mutex_unlock(&fs_info->scrub_lock); | ||
2916 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
2917 | return ret; | ||
2918 | } | ||
2919 | |||
2914 | sctx = scrub_setup_ctx(dev, is_dev_replace); | 2920 | sctx = scrub_setup_ctx(dev, is_dev_replace); |
2915 | if (IS_ERR(sctx)) { | 2921 | if (IS_ERR(sctx)) { |
2916 | mutex_unlock(&fs_info->scrub_lock); | 2922 | mutex_unlock(&fs_info->scrub_lock); |
@@ -2923,13 +2929,15 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | |||
2923 | 2929 | ||
2924 | atomic_inc(&fs_info->scrubs_running); | 2930 | atomic_inc(&fs_info->scrubs_running); |
2925 | mutex_unlock(&fs_info->scrub_lock); | 2931 | mutex_unlock(&fs_info->scrub_lock); |
2926 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
2927 | 2932 | ||
2928 | if (!is_dev_replace) { | 2933 | if (!is_dev_replace) { |
2929 | down_read(&fs_info->scrub_super_lock); | 2934 | /* |
2935 | * by holding device list mutex, we can | ||
2936 | * kick off writing super in log tree sync. | ||
2937 | */ | ||
2930 | ret = scrub_supers(sctx, dev); | 2938 | ret = scrub_supers(sctx, dev); |
2931 | up_read(&fs_info->scrub_super_lock); | ||
2932 | } | 2939 | } |
2940 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
2933 | 2941 | ||
2934 | if (!ret) | 2942 | if (!ret) |
2935 | ret = scrub_enumerate_chunks(sctx, dev, start, end, | 2943 | ret = scrub_enumerate_chunks(sctx, dev, start, end, |
@@ -2946,10 +2954,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, | |||
2946 | 2954 | ||
2947 | mutex_lock(&fs_info->scrub_lock); | 2955 | mutex_lock(&fs_info->scrub_lock); |
2948 | dev->scrub_device = NULL; | 2956 | dev->scrub_device = NULL; |
2957 | scrub_workers_put(fs_info); | ||
2949 | mutex_unlock(&fs_info->scrub_lock); | 2958 | mutex_unlock(&fs_info->scrub_lock); |
2950 | 2959 | ||
2951 | scrub_free_ctx(sctx); | 2960 | scrub_free_ctx(sctx); |
2952 | scrub_workers_put(fs_info); | ||
2953 | 2961 | ||
2954 | return ret; | 2962 | return ret; |
2955 | } | 2963 | } |
@@ -2979,16 +2987,6 @@ void btrfs_scrub_continue(struct btrfs_root *root) | |||
2979 | wake_up(&fs_info->scrub_pause_wait); | 2987 | wake_up(&fs_info->scrub_pause_wait); |
2980 | } | 2988 | } |
2981 | 2989 | ||
2982 | void btrfs_scrub_pause_super(struct btrfs_root *root) | ||
2983 | { | ||
2984 | down_write(&root->fs_info->scrub_super_lock); | ||
2985 | } | ||
2986 | |||
2987 | void btrfs_scrub_continue_super(struct btrfs_root *root) | ||
2988 | { | ||
2989 | up_write(&root->fs_info->scrub_super_lock); | ||
2990 | } | ||
2991 | |||
2992 | int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) | 2990 | int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info) |
2993 | { | 2991 | { |
2994 | mutex_lock(&fs_info->scrub_lock); | 2992 | mutex_lock(&fs_info->scrub_lock); |
@@ -3126,12 +3124,30 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, | |||
3126 | nocow_ctx->mirror_num = mirror_num; | 3124 | nocow_ctx->mirror_num = mirror_num; |
3127 | nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; | 3125 | nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; |
3128 | nocow_ctx->work.func = copy_nocow_pages_worker; | 3126 | nocow_ctx->work.func = copy_nocow_pages_worker; |
3127 | INIT_LIST_HEAD(&nocow_ctx->inodes); | ||
3129 | btrfs_queue_worker(&fs_info->scrub_nocow_workers, | 3128 | btrfs_queue_worker(&fs_info->scrub_nocow_workers, |
3130 | &nocow_ctx->work); | 3129 | &nocow_ctx->work); |
3131 | 3130 | ||
3132 | return 0; | 3131 | return 0; |
3133 | } | 3132 | } |
3134 | 3133 | ||
3134 | static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx) | ||
3135 | { | ||
3136 | struct scrub_copy_nocow_ctx *nocow_ctx = ctx; | ||
3137 | struct scrub_nocow_inode *nocow_inode; | ||
3138 | |||
3139 | nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS); | ||
3140 | if (!nocow_inode) | ||
3141 | return -ENOMEM; | ||
3142 | nocow_inode->inum = inum; | ||
3143 | nocow_inode->offset = offset; | ||
3144 | nocow_inode->root = root; | ||
3145 | list_add_tail(&nocow_inode->list, &nocow_ctx->inodes); | ||
3146 | return 0; | ||
3147 | } | ||
3148 | |||
3149 | #define COPY_COMPLETE 1 | ||
3150 | |||
3135 | static void copy_nocow_pages_worker(struct btrfs_work *work) | 3151 | static void copy_nocow_pages_worker(struct btrfs_work *work) |
3136 | { | 3152 | { |
3137 | struct scrub_copy_nocow_ctx *nocow_ctx = | 3153 | struct scrub_copy_nocow_ctx *nocow_ctx = |
@@ -3167,8 +3183,7 @@ static void copy_nocow_pages_worker(struct btrfs_work *work) | |||
3167 | } | 3183 | } |
3168 | 3184 | ||
3169 | ret = iterate_inodes_from_logical(logical, fs_info, path, | 3185 | ret = iterate_inodes_from_logical(logical, fs_info, path, |
3170 | copy_nocow_pages_for_inode, | 3186 | record_inode_for_nocow, nocow_ctx); |
3171 | nocow_ctx); | ||
3172 | if (ret != 0 && ret != -ENOENT) { | 3187 | if (ret != 0 && ret != -ENOENT) { |
3173 | pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n", | 3188 | pr_warn("iterate_inodes_from_logical() failed: log %llu, phys %llu, len %llu, mir %u, ret %d\n", |
3174 | logical, physical_for_dev_replace, len, mirror_num, | 3189 | logical, physical_for_dev_replace, len, mirror_num, |
@@ -3177,7 +3192,33 @@ static void copy_nocow_pages_worker(struct btrfs_work *work) | |||
3177 | goto out; | 3192 | goto out; |
3178 | } | 3193 | } |
3179 | 3194 | ||
3195 | btrfs_end_transaction(trans, root); | ||
3196 | trans = NULL; | ||
3197 | while (!list_empty(&nocow_ctx->inodes)) { | ||
3198 | struct scrub_nocow_inode *entry; | ||
3199 | entry = list_first_entry(&nocow_ctx->inodes, | ||
3200 | struct scrub_nocow_inode, | ||
3201 | list); | ||
3202 | list_del_init(&entry->list); | ||
3203 | ret = copy_nocow_pages_for_inode(entry->inum, entry->offset, | ||
3204 | entry->root, nocow_ctx); | ||
3205 | kfree(entry); | ||
3206 | if (ret == COPY_COMPLETE) { | ||
3207 | ret = 0; | ||
3208 | break; | ||
3209 | } else if (ret) { | ||
3210 | break; | ||
3211 | } | ||
3212 | } | ||
3180 | out: | 3213 | out: |
3214 | while (!list_empty(&nocow_ctx->inodes)) { | ||
3215 | struct scrub_nocow_inode *entry; | ||
3216 | entry = list_first_entry(&nocow_ctx->inodes, | ||
3217 | struct scrub_nocow_inode, | ||
3218 | list); | ||
3219 | list_del_init(&entry->list); | ||
3220 | kfree(entry); | ||
3221 | } | ||
3181 | if (trans && !IS_ERR(trans)) | 3222 | if (trans && !IS_ERR(trans)) |
3182 | btrfs_end_transaction(trans, root); | 3223 | btrfs_end_transaction(trans, root); |
3183 | if (not_written) | 3224 | if (not_written) |
@@ -3190,20 +3231,25 @@ out: | |||
3190 | scrub_pending_trans_workers_dec(sctx); | 3231 | scrub_pending_trans_workers_dec(sctx); |
3191 | } | 3232 | } |
3192 | 3233 | ||
3193 | static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx) | 3234 | static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, |
3235 | struct scrub_copy_nocow_ctx *nocow_ctx) | ||
3194 | { | 3236 | { |
3195 | struct scrub_copy_nocow_ctx *nocow_ctx = ctx; | ||
3196 | struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; | 3237 | struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info; |
3197 | struct btrfs_key key; | 3238 | struct btrfs_key key; |
3198 | struct inode *inode; | 3239 | struct inode *inode; |
3199 | struct page *page; | 3240 | struct page *page; |
3200 | struct btrfs_root *local_root; | 3241 | struct btrfs_root *local_root; |
3242 | struct btrfs_ordered_extent *ordered; | ||
3243 | struct extent_map *em; | ||
3244 | struct extent_state *cached_state = NULL; | ||
3245 | struct extent_io_tree *io_tree; | ||
3201 | u64 physical_for_dev_replace; | 3246 | u64 physical_for_dev_replace; |
3202 | u64 len; | 3247 | u64 len = nocow_ctx->len; |
3248 | u64 lockstart = offset, lockend = offset + len - 1; | ||
3203 | unsigned long index; | 3249 | unsigned long index; |
3204 | int srcu_index; | 3250 | int srcu_index; |
3205 | int ret; | 3251 | int ret = 0; |
3206 | int err; | 3252 | int err = 0; |
3207 | 3253 | ||
3208 | key.objectid = root; | 3254 | key.objectid = root; |
3209 | key.type = BTRFS_ROOT_ITEM_KEY; | 3255 | key.type = BTRFS_ROOT_ITEM_KEY; |
@@ -3229,9 +3275,33 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, void *ctx) | |||
3229 | mutex_lock(&inode->i_mutex); | 3275 | mutex_lock(&inode->i_mutex); |
3230 | inode_dio_wait(inode); | 3276 | inode_dio_wait(inode); |
3231 | 3277 | ||
3232 | ret = 0; | ||
3233 | physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; | 3278 | physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; |
3234 | len = nocow_ctx->len; | 3279 | io_tree = &BTRFS_I(inode)->io_tree; |
3280 | |||
3281 | lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state); | ||
3282 | ordered = btrfs_lookup_ordered_range(inode, lockstart, len); | ||
3283 | if (ordered) { | ||
3284 | btrfs_put_ordered_extent(ordered); | ||
3285 | goto out_unlock; | ||
3286 | } | ||
3287 | |||
3288 | em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0); | ||
3289 | if (IS_ERR(em)) { | ||
3290 | ret = PTR_ERR(em); | ||
3291 | goto out_unlock; | ||
3292 | } | ||
3293 | |||
3294 | /* | ||
3295 | * This extent does not actually cover the logical extent anymore, | ||
3296 | * move on to the next inode. | ||
3297 | */ | ||
3298 | if (em->block_start > nocow_ctx->logical || | ||
3299 | em->block_start + em->block_len < nocow_ctx->logical + len) { | ||
3300 | free_extent_map(em); | ||
3301 | goto out_unlock; | ||
3302 | } | ||
3303 | free_extent_map(em); | ||
3304 | |||
3235 | while (len >= PAGE_CACHE_SIZE) { | 3305 | while (len >= PAGE_CACHE_SIZE) { |
3236 | index = offset >> PAGE_CACHE_SHIFT; | 3306 | index = offset >> PAGE_CACHE_SHIFT; |
3237 | again: | 3307 | again: |
@@ -3247,10 +3317,9 @@ again: | |||
3247 | goto next_page; | 3317 | goto next_page; |
3248 | } else { | 3318 | } else { |
3249 | ClearPageError(page); | 3319 | ClearPageError(page); |
3250 | err = extent_read_full_page(&BTRFS_I(inode)-> | 3320 | err = extent_read_full_page_nolock(io_tree, page, |
3251 | io_tree, | 3321 | btrfs_get_extent, |
3252 | page, btrfs_get_extent, | 3322 | nocow_ctx->mirror_num); |
3253 | nocow_ctx->mirror_num); | ||
3254 | if (err) { | 3323 | if (err) { |
3255 | ret = err; | 3324 | ret = err; |
3256 | goto next_page; | 3325 | goto next_page; |
@@ -3264,6 +3333,7 @@ again: | |||
3264 | * page in the page cache. | 3333 | * page in the page cache. |
3265 | */ | 3334 | */ |
3266 | if (page->mapping != inode->i_mapping) { | 3335 | if (page->mapping != inode->i_mapping) { |
3336 | unlock_page(page); | ||
3267 | page_cache_release(page); | 3337 | page_cache_release(page); |
3268 | goto again; | 3338 | goto again; |
3269 | } | 3339 | } |
@@ -3287,6 +3357,10 @@ next_page: | |||
3287 | physical_for_dev_replace += PAGE_CACHE_SIZE; | 3357 | physical_for_dev_replace += PAGE_CACHE_SIZE; |
3288 | len -= PAGE_CACHE_SIZE; | 3358 | len -= PAGE_CACHE_SIZE; |
3289 | } | 3359 | } |
3360 | ret = COPY_COMPLETE; | ||
3361 | out_unlock: | ||
3362 | unlock_extent_cached(io_tree, lockstart, lockend, &cached_state, | ||
3363 | GFP_NOFS); | ||
3290 | out: | 3364 | out: |
3291 | mutex_unlock(&inode->i_mutex); | 3365 | mutex_unlock(&inode->i_mutex); |
3292 | iput(inode); | 3366 | iput(inode); |