aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c71
1 files changed, 35 insertions, 36 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..8b7eb22d508a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4,6 +4,7 @@
4 */ 4 */
5 5
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/sched/mm.h>
7#include <linux/sched/signal.h> 8#include <linux/sched/signal.h>
8#include <linux/pagemap.h> 9#include <linux/pagemap.h>
9#include <linux/writeback.h> 10#include <linux/writeback.h>
@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
7888 return 0; 7889 return 0;
7889} 7890}
7890 7891
7891/* link_block_group will queue up kobjects to add when we're reclaim-safe */
7892void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
7893{
7894 struct btrfs_space_info *space_info;
7895 struct raid_kobject *rkobj;
7896 LIST_HEAD(list);
7897 int ret = 0;
7898
7899 spin_lock(&fs_info->pending_raid_kobjs_lock);
7900 list_splice_init(&fs_info->pending_raid_kobjs, &list);
7901 spin_unlock(&fs_info->pending_raid_kobjs_lock);
7902
7903 list_for_each_entry(rkobj, &list, list) {
7904 space_info = btrfs_find_space_info(fs_info, rkobj->flags);
7905
7906 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
7907 "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
7908 if (ret) {
7909 kobject_put(&rkobj->kobj);
7910 break;
7911 }
7912 }
7913 if (ret)
7914 btrfs_warn(fs_info,
7915 "failed to add kobject for block cache, ignoring");
7916}
7917
7918static void link_block_group(struct btrfs_block_group_cache *cache) 7892static void link_block_group(struct btrfs_block_group_cache *cache)
7919{ 7893{
7920 struct btrfs_space_info *space_info = cache->space_info; 7894 struct btrfs_space_info *space_info = cache->space_info;
@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
7929 up_write(&space_info->groups_sem); 7903 up_write(&space_info->groups_sem);
7930 7904
7931 if (first) { 7905 if (first) {
7932 struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); 7906 struct raid_kobject *rkobj;
7907 unsigned int nofs_flag;
7908 int ret;
7909
7910 /*
7911 * Setup a NOFS context because kobject_add(), deep in its call
7912 * chain, does GFP_KERNEL allocations, and we are often called
7913 * in a context where if reclaim is triggered we can deadlock
7914 * (we are either holding a transaction handle or some lock
7915 * required for a transaction commit).
7916 */
7917 nofs_flag = memalloc_nofs_save();
7918 rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
7933 if (!rkobj) { 7919 if (!rkobj) {
7920 memalloc_nofs_restore(nofs_flag);
7934 btrfs_warn(cache->fs_info, 7921 btrfs_warn(cache->fs_info,
7935 "couldn't alloc memory for raid level kobject"); 7922 "couldn't alloc memory for raid level kobject");
7936 return; 7923 return;
7937 } 7924 }
7938 rkobj->flags = cache->flags; 7925 rkobj->flags = cache->flags;
7939 kobject_init(&rkobj->kobj, &btrfs_raid_ktype); 7926 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
7940 7927 ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
7941 spin_lock(&fs_info->pending_raid_kobjs_lock); 7928 btrfs_bg_type_to_raid_name(rkobj->flags));
7942 list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); 7929 memalloc_nofs_restore(nofs_flag);
7943 spin_unlock(&fs_info->pending_raid_kobjs_lock); 7930 if (ret) {
7931 kobject_put(&rkobj->kobj);
7932 btrfs_warn(fs_info,
7933 "failed to add kobject for block cache, ignoring");
7934 return;
7935 }
7944 space_info->block_group_kobjs[index] = &rkobj->kobj; 7936 space_info->block_group_kobjs[index] = &rkobj->kobj;
7945 } 7937 }
7946} 7938}
@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
8206 inc_block_group_ro(cache, 1); 8198 inc_block_group_ro(cache, 1);
8207 } 8199 }
8208 8200
8209 btrfs_add_raid_kobjects(info);
8210 btrfs_init_global_block_rsv(info); 8201 btrfs_init_global_block_rsv(info);
8211 ret = check_chunk_block_group_mappings(info); 8202 ret = check_chunk_block_group_mappings(info);
8212error: 8203error:
@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8975 struct btrfs_device *device; 8966 struct btrfs_device *device;
8976 struct list_head *devices; 8967 struct list_head *devices;
8977 u64 group_trimmed; 8968 u64 group_trimmed;
8969 u64 range_end = U64_MAX;
8978 u64 start; 8970 u64 start;
8979 u64 end; 8971 u64 end;
8980 u64 trimmed = 0; 8972 u64 trimmed = 0;
@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
8984 int dev_ret = 0; 8976 int dev_ret = 0;
8985 int ret = 0; 8977 int ret = 0;
8986 8978
8979 /*
8980 * Check range overflow if range->len is set.
8981 * The default range->len is U64_MAX.
8982 */
8983 if (range->len != U64_MAX &&
8984 check_add_overflow(range->start, range->len, &range_end))
8985 return -EINVAL;
8986
8987 cache = btrfs_lookup_first_block_group(fs_info, range->start); 8987 cache = btrfs_lookup_first_block_group(fs_info, range->start);
8988 for (; cache; cache = next_block_group(cache)) { 8988 for (; cache; cache = next_block_group(cache)) {
8989 if (cache->key.objectid >= (range->start + range->len)) { 8989 if (cache->key.objectid >= range_end) {
8990 btrfs_put_block_group(cache); 8990 btrfs_put_block_group(cache);
8991 break; 8991 break;
8992 } 8992 }
8993 8993
8994 start = max(range->start, cache->key.objectid); 8994 start = max(range->start, cache->key.objectid);
8995 end = min(range->start + range->len, 8995 end = min(range_end, cache->key.objectid + cache->key.offset);
8996 cache->key.objectid + cache->key.offset);
8997 8996
8998 if (end - start >= range->minlen) { 8997 if (end - start >= range->minlen) {
8999 if (!block_group_cache_done(cache)) { 8998 if (!block_group_cache_done(cache)) {