aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2011-01-28 16:24:59 -0500
committerChris Mason <chris.mason@oracle.com>2011-01-28 16:24:59 -0500
commiteab49bec41136460b12ac0fbeceeb88386d538c7 (patch)
treed2ad3e163407d7779d53fdaa0dedcc3b934106de /fs/btrfs
parentacce952b0263825da32cf10489413dec78053347 (diff)
parent4d728ec7aefdca5419d2ebfb28c147e81a4b59f4 (diff)
Merge branch 'bug-fixes' of git://repo.or.cz/linux-btrfs-devel into btrfs-38
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/acl.c6
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/free-space-cache.c161
-rw-r--r--fs/btrfs/inode.c1
-rw-r--r--fs/btrfs/ioctl.c5
-rw-r--r--fs/btrfs/super.c7
6 files changed, 117 insertions, 65 deletions
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 6d1410e392d3..889ce1348e64 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
37 char *value = NULL; 37 char *value = NULL;
38 struct posix_acl *acl; 38 struct posix_acl *acl;
39 39
40 if (!IS_POSIXACL(inode))
41 return NULL;
42
40 acl = get_cached_acl(inode, type); 43 acl = get_cached_acl(inode, type);
41 if (acl != ACL_NOT_CACHED) 44 if (acl != ACL_NOT_CACHED)
42 return acl; 45 return acl;
@@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
84 struct posix_acl *acl; 87 struct posix_acl *acl;
85 int ret = 0; 88 int ret = 0;
86 89
90 if (!IS_POSIXACL(dentry->d_inode))
91 return -EOPNOTSUPP;
92
87 acl = btrfs_get_acl(dentry->d_inode, type); 93 acl = btrfs_get_acl(dentry->d_inode, type);
88 94
89 if (IS_ERR(acl)) 95 if (IS_ERR(acl))
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1a3af9e8e0c4..2887b8be6fdd 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2565,6 +2565,8 @@ int close_ctree(struct btrfs_root *root)
2565 kfree(fs_info->chunk_root); 2565 kfree(fs_info->chunk_root);
2566 kfree(fs_info->dev_root); 2566 kfree(fs_info->dev_root);
2567 kfree(fs_info->csum_root); 2567 kfree(fs_info->csum_root);
2568 kfree(fs_info);
2569
2568 return 0; 2570 return 0;
2569} 2571}
2570 2572
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 60d684266959..a5501edc3c9f 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
987 return entry; 987 return entry;
988} 988}
989 989
990static void unlink_free_space(struct btrfs_block_group_cache *block_group, 990static inline void
991 struct btrfs_free_space *info) 991__unlink_free_space(struct btrfs_block_group_cache *block_group,
992 struct btrfs_free_space *info)
992{ 993{
993 rb_erase(&info->offset_index, &block_group->free_space_offset); 994 rb_erase(&info->offset_index, &block_group->free_space_offset);
994 block_group->free_extents--; 995 block_group->free_extents--;
996}
997
998static void unlink_free_space(struct btrfs_block_group_cache *block_group,
999 struct btrfs_free_space *info)
1000{
1001 __unlink_free_space(block_group, info);
995 block_group->free_space -= info->bytes; 1002 block_group->free_space -= info->bytes;
996} 1003}
997 1004
@@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
1016 u64 max_bytes; 1023 u64 max_bytes;
1017 u64 bitmap_bytes; 1024 u64 bitmap_bytes;
1018 u64 extent_bytes; 1025 u64 extent_bytes;
1026 u64 size = block_group->key.offset;
1019 1027
1020 /* 1028 /*
1021 * The goal is to keep the total amount of memory used per 1gb of space 1029 * The goal is to keep the total amount of memory used per 1gb of space
1022 * at or below 32k, so we need to adjust how much memory we allow to be 1030 * at or below 32k, so we need to adjust how much memory we allow to be
1023 * used by extent based free space tracking 1031 * used by extent based free space tracking
1024 */ 1032 */
1025 max_bytes = MAX_CACHE_BYTES_PER_GIG * 1033 if (size < 1024 * 1024 * 1024)
1026 (div64_u64(block_group->key.offset, 1024 * 1024 * 1024)); 1034 max_bytes = MAX_CACHE_BYTES_PER_GIG;
1035 else
1036 max_bytes = MAX_CACHE_BYTES_PER_GIG *
1037 div64_u64(size, 1024 * 1024 * 1024);
1027 1038
1028 /* 1039 /*
1029 * we want to account for 1 more bitmap than what we have so we can make 1040 * we want to account for 1 more bitmap than what we have so we can make
@@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
1171 recalculate_thresholds(block_group); 1182 recalculate_thresholds(block_group);
1172} 1183}
1173 1184
1185static void free_bitmap(struct btrfs_block_group_cache *block_group,
1186 struct btrfs_free_space *bitmap_info)
1187{
1188 unlink_free_space(block_group, bitmap_info);
1189 kfree(bitmap_info->bitmap);
1190 kfree(bitmap_info);
1191 block_group->total_bitmaps--;
1192 recalculate_thresholds(block_group);
1193}
1194
1174static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, 1195static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
1175 struct btrfs_free_space *bitmap_info, 1196 struct btrfs_free_space *bitmap_info,
1176 u64 *offset, u64 *bytes) 1197 u64 *offset, u64 *bytes)
@@ -1211,13 +1232,8 @@ again:
1211 1232
1212 if (*bytes) { 1233 if (*bytes) {
1213 struct rb_node *next = rb_next(&bitmap_info->offset_index); 1234 struct rb_node *next = rb_next(&bitmap_info->offset_index);
1214 if (!bitmap_info->bytes) { 1235 if (!bitmap_info->bytes)
1215 unlink_free_space(block_group, bitmap_info); 1236 free_bitmap(block_group, bitmap_info);
1216 kfree(bitmap_info->bitmap);
1217 kfree(bitmap_info);
1218 block_group->total_bitmaps--;
1219 recalculate_thresholds(block_group);
1220 }
1221 1237
1222 /* 1238 /*
1223 * no entry after this bitmap, but we still have bytes to 1239 * no entry after this bitmap, but we still have bytes to
@@ -1250,13 +1266,8 @@ again:
1250 return -EAGAIN; 1266 return -EAGAIN;
1251 1267
1252 goto again; 1268 goto again;
1253 } else if (!bitmap_info->bytes) { 1269 } else if (!bitmap_info->bytes)
1254 unlink_free_space(block_group, bitmap_info); 1270 free_bitmap(block_group, bitmap_info);
1255 kfree(bitmap_info->bitmap);
1256 kfree(bitmap_info);
1257 block_group->total_bitmaps--;
1258 recalculate_thresholds(block_group);
1259 }
1260 1271
1261 return 0; 1272 return 0;
1262} 1273}
@@ -1359,22 +1370,14 @@ out:
1359 return ret; 1370 return ret;
1360} 1371}
1361 1372
1362int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, 1373bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
1363 u64 offset, u64 bytes) 1374 struct btrfs_free_space *info, bool update_stat)
1364{ 1375{
1365 struct btrfs_free_space *right_info = NULL; 1376 struct btrfs_free_space *left_info;
1366 struct btrfs_free_space *left_info = NULL; 1377 struct btrfs_free_space *right_info;
1367 struct btrfs_free_space *info = NULL; 1378 bool merged = false;
1368 int ret = 0; 1379 u64 offset = info->offset;
1369 1380 u64 bytes = info->bytes;
1370 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
1371 if (!info)
1372 return -ENOMEM;
1373
1374 info->offset = offset;
1375 info->bytes = bytes;
1376
1377 spin_lock(&block_group->tree_lock);
1378 1381
1379 /* 1382 /*
1380 * first we want to see if there is free space adjacent to the range we 1383 * first we want to see if there is free space adjacent to the range we
@@ -1388,37 +1391,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1388 else 1391 else
1389 left_info = tree_search_offset(block_group, offset - 1, 0, 0); 1392 left_info = tree_search_offset(block_group, offset - 1, 0, 0);
1390 1393
1391 /*
1392 * If there was no extent directly to the left or right of this new
1393 * extent then we know we're going to have to allocate a new extent, so
1394 * before we do that see if we need to drop this into a bitmap
1395 */
1396 if ((!left_info || left_info->bitmap) &&
1397 (!right_info || right_info->bitmap)) {
1398 ret = insert_into_bitmap(block_group, info);
1399
1400 if (ret < 0) {
1401 goto out;
1402 } else if (ret) {
1403 ret = 0;
1404 goto out;
1405 }
1406 }
1407
1408 if (right_info && !right_info->bitmap) { 1394 if (right_info && !right_info->bitmap) {
1409 unlink_free_space(block_group, right_info); 1395 if (update_stat)
1396 unlink_free_space(block_group, right_info);
1397 else
1398 __unlink_free_space(block_group, right_info);
1410 info->bytes += right_info->bytes; 1399 info->bytes += right_info->bytes;
1411 kfree(right_info); 1400 kfree(right_info);
1401 merged = true;
1412 } 1402 }
1413 1403
1414 if (left_info && !left_info->bitmap && 1404 if (left_info && !left_info->bitmap &&
1415 left_info->offset + left_info->bytes == offset) { 1405 left_info->offset + left_info->bytes == offset) {
1416 unlink_free_space(block_group, left_info); 1406 if (update_stat)
1407 unlink_free_space(block_group, left_info);
1408 else
1409 __unlink_free_space(block_group, left_info);
1417 info->offset = left_info->offset; 1410 info->offset = left_info->offset;
1418 info->bytes += left_info->bytes; 1411 info->bytes += left_info->bytes;
1419 kfree(left_info); 1412 kfree(left_info);
1413 merged = true;
1420 } 1414 }
1421 1415
1416 return merged;
1417}
1418
1419int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
1420 u64 offset, u64 bytes)
1421{
1422 struct btrfs_free_space *info;
1423 int ret = 0;
1424
1425 info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
1426 if (!info)
1427 return -ENOMEM;
1428
1429 info->offset = offset;
1430 info->bytes = bytes;
1431
1432 spin_lock(&block_group->tree_lock);
1433
1434 if (try_merge_free_space(block_group, info, true))
1435 goto link;
1436
1437 /*
1438 * There was no extent directly to the left or right of this new
1439 * extent then we know we're going to have to allocate a new extent, so
1440 * before we do that see if we need to drop this into a bitmap
1441 */
1442 ret = insert_into_bitmap(block_group, info);
1443 if (ret < 0) {
1444 goto out;
1445 } else if (ret) {
1446 ret = 0;
1447 goto out;
1448 }
1449link:
1422 ret = link_free_space(block_group, info); 1450 ret = link_free_space(block_group, info);
1423 if (ret) 1451 if (ret)
1424 kfree(info); 1452 kfree(info);
@@ -1621,6 +1649,7 @@ __btrfs_return_cluster_to_free_space(
1621 node = rb_next(&entry->offset_index); 1649 node = rb_next(&entry->offset_index);
1622 rb_erase(&entry->offset_index, &cluster->root); 1650 rb_erase(&entry->offset_index, &cluster->root);
1623 BUG_ON(entry->bitmap); 1651 BUG_ON(entry->bitmap);
1652 try_merge_free_space(block_group, entry, false);
1624 tree_insert_offset(&block_group->free_space_offset, 1653 tree_insert_offset(&block_group->free_space_offset,
1625 entry->offset, &entry->offset_index, 0); 1654 entry->offset, &entry->offset_index, 0);
1626 } 1655 }
@@ -1685,13 +1714,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
1685 ret = offset; 1714 ret = offset;
1686 if (entry->bitmap) { 1715 if (entry->bitmap) {
1687 bitmap_clear_bits(block_group, entry, offset, bytes); 1716 bitmap_clear_bits(block_group, entry, offset, bytes);
1688 if (!entry->bytes) { 1717 if (!entry->bytes)
1689 unlink_free_space(block_group, entry); 1718 free_bitmap(block_group, entry);
1690 kfree(entry->bitmap);
1691 kfree(entry);
1692 block_group->total_bitmaps--;
1693 recalculate_thresholds(block_group);
1694 }
1695 } else { 1719 } else {
1696 unlink_free_space(block_group, entry); 1720 unlink_free_space(block_group, entry);
1697 entry->offset += bytes; 1721 entry->offset += bytes;
@@ -1789,6 +1813,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
1789 1813
1790 ret = search_start; 1814 ret = search_start;
1791 bitmap_clear_bits(block_group, entry, ret, bytes); 1815 bitmap_clear_bits(block_group, entry, ret, bytes);
1816 if (entry->bytes == 0)
1817 free_bitmap(block_group, entry);
1792out: 1818out:
1793 spin_unlock(&cluster->lock); 1819 spin_unlock(&cluster->lock);
1794 spin_unlock(&block_group->tree_lock); 1820 spin_unlock(&block_group->tree_lock);
@@ -1842,15 +1868,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
1842 entry->offset += bytes; 1868 entry->offset += bytes;
1843 entry->bytes -= bytes; 1869 entry->bytes -= bytes;
1844 1870
1845 if (entry->bytes == 0) { 1871 if (entry->bytes == 0)
1846 rb_erase(&entry->offset_index, &cluster->root); 1872 rb_erase(&entry->offset_index, &cluster->root);
1847 kfree(entry);
1848 }
1849 break; 1873 break;
1850 } 1874 }
1851out: 1875out:
1852 spin_unlock(&cluster->lock); 1876 spin_unlock(&cluster->lock);
1853 1877
1878 if (!ret)
1879 return 0;
1880
1881 spin_lock(&block_group->tree_lock);
1882
1883 block_group->free_space -= bytes;
1884 if (entry->bytes == 0) {
1885 block_group->free_extents--;
1886 kfree(entry);
1887 }
1888
1889 spin_unlock(&block_group->tree_lock);
1890
1854 return ret; 1891 return ret;
1855} 1892}
1856 1893
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 1562765c8e6a..2c9a2f7d5631 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1557,6 +1557,7 @@ out:
1557out_page: 1557out_page:
1558 unlock_page(page); 1558 unlock_page(page);
1559 page_cache_release(page); 1559 page_cache_release(page);
1560 kfree(fixup);
1560} 1561}
1561 1562
1562/* 1563/*
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a506a22b522a..edd82becbb9e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1898,7 +1898,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1898 1898
1899 memcpy(&new_key, &key, sizeof(new_key)); 1899 memcpy(&new_key, &key, sizeof(new_key));
1900 new_key.objectid = inode->i_ino; 1900 new_key.objectid = inode->i_ino;
1901 new_key.offset = key.offset + destoff - off; 1901 if (off <= key.offset)
1902 new_key.offset = key.offset + destoff - off;
1903 else
1904 new_key.offset = destoff;
1902 1905
1903 trans = btrfs_start_transaction(root, 1); 1906 trans = btrfs_start_transaction(root, 1);
1904 if (IS_ERR(trans)) { 1907 if (IS_ERR(trans)) {
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 52e903b0a293..f4e45fdded30 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
383 struct btrfs_fs_devices **fs_devices) 383 struct btrfs_fs_devices **fs_devices)
384{ 384{
385 substring_t args[MAX_OPT_ARGS]; 385 substring_t args[MAX_OPT_ARGS];
386 char *opts, *p; 386 char *opts, *orig, *p;
387 int error = 0; 387 int error = 0;
388 int intarg; 388 int intarg;
389 389
@@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
397 opts = kstrdup(options, GFP_KERNEL); 397 opts = kstrdup(options, GFP_KERNEL);
398 if (!opts) 398 if (!opts)
399 return -ENOMEM; 399 return -ENOMEM;
400 orig = opts;
400 401
401 while ((p = strsep(&opts, ",")) != NULL) { 402 while ((p = strsep(&opts, ",")) != NULL) {
402 int token; 403 int token;
@@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
432 } 433 }
433 434
434 out_free_opts: 435 out_free_opts:
435 kfree(opts); 436 kfree(orig);
436 out: 437 out:
437 /* 438 /*
438 * If no subvolume name is specified we use the default one. Allocate 439 * If no subvolume name is specified we use the default one. Allocate
@@ -760,6 +761,8 @@ static int btrfs_get_sb(struct file_system_type *fs_type, int flags,
760 } 761 }
761 762
762 btrfs_close_devices(fs_devices); 763 btrfs_close_devices(fs_devices);
764 kfree(fs_info);
765 kfree(tree_root);
763 } else { 766 } else {
764 char b[BDEVNAME_SIZE]; 767 char b[BDEVNAME_SIZE];
765 768