diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/ctree.c | 10 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 5 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 55 | ||||
-rw-r--r-- | fs/btrfs/extent_io.h | 2 | ||||
-rw-r--r-- | fs/btrfs/free-space-cache.c | 163 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 4 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 23 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 69 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 7 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 8 |
10 files changed, 233 insertions, 113 deletions
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index d84089349c82..2e667868e0d2 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -1228,6 +1228,7 @@ static void reada_for_search(struct btrfs_root *root, | |||
1228 | u32 nr; | 1228 | u32 nr; |
1229 | u32 blocksize; | 1229 | u32 blocksize; |
1230 | u32 nscan = 0; | 1230 | u32 nscan = 0; |
1231 | bool map = true; | ||
1231 | 1232 | ||
1232 | if (level != 1) | 1233 | if (level != 1) |
1233 | return; | 1234 | return; |
@@ -1249,8 +1250,11 @@ static void reada_for_search(struct btrfs_root *root, | |||
1249 | 1250 | ||
1250 | nritems = btrfs_header_nritems(node); | 1251 | nritems = btrfs_header_nritems(node); |
1251 | nr = slot; | 1252 | nr = slot; |
1253 | if (node->map_token || path->skip_locking) | ||
1254 | map = false; | ||
1255 | |||
1252 | while (1) { | 1256 | while (1) { |
1253 | if (!node->map_token) { | 1257 | if (map && !node->map_token) { |
1254 | unsigned long offset = btrfs_node_key_ptr_offset(nr); | 1258 | unsigned long offset = btrfs_node_key_ptr_offset(nr); |
1255 | map_private_extent_buffer(node, offset, | 1259 | map_private_extent_buffer(node, offset, |
1256 | sizeof(struct btrfs_key_ptr), | 1260 | sizeof(struct btrfs_key_ptr), |
@@ -1277,7 +1281,7 @@ static void reada_for_search(struct btrfs_root *root, | |||
1277 | if ((search <= target && target - search <= 65536) || | 1281 | if ((search <= target && target - search <= 65536) || |
1278 | (search > target && search - target <= 65536)) { | 1282 | (search > target && search - target <= 65536)) { |
1279 | gen = btrfs_node_ptr_generation(node, nr); | 1283 | gen = btrfs_node_ptr_generation(node, nr); |
1280 | if (node->map_token) { | 1284 | if (map && node->map_token) { |
1281 | unmap_extent_buffer(node, node->map_token, | 1285 | unmap_extent_buffer(node, node->map_token, |
1282 | KM_USER1); | 1286 | KM_USER1); |
1283 | node->map_token = NULL; | 1287 | node->map_token = NULL; |
@@ -1289,7 +1293,7 @@ static void reada_for_search(struct btrfs_root *root, | |||
1289 | if ((nread > 65536 || nscan > 32)) | 1293 | if ((nread > 65536 || nscan > 32)) |
1290 | break; | 1294 | break; |
1291 | } | 1295 | } |
1292 | if (node->map_token) { | 1296 | if (map && node->map_token) { |
1293 | unmap_extent_buffer(node, node->map_token, KM_USER1); | 1297 | unmap_extent_buffer(node, node->map_token, KM_USER1); |
1294 | node->map_token = NULL; | 1298 | node->map_token = NULL; |
1295 | } | 1299 | } |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a203d363184d..9f68c6898653 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -1668,8 +1668,6 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1668 | init_waitqueue_head(&fs_info->scrub_pause_wait); | 1668 | init_waitqueue_head(&fs_info->scrub_pause_wait); |
1669 | init_rwsem(&fs_info->scrub_super_lock); | 1669 | init_rwsem(&fs_info->scrub_super_lock); |
1670 | fs_info->scrub_workers_refcnt = 0; | 1670 | fs_info->scrub_workers_refcnt = 0; |
1671 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", | ||
1672 | fs_info->thread_pool_size, &fs_info->generic_worker); | ||
1673 | 1671 | ||
1674 | sb->s_blocksize = 4096; | 1672 | sb->s_blocksize = 4096; |
1675 | sb->s_blocksize_bits = blksize_bits(4096); | 1673 | sb->s_blocksize_bits = blksize_bits(4096); |
@@ -2911,9 +2909,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root) | |||
2911 | 2909 | ||
2912 | INIT_LIST_HEAD(&splice); | 2910 | INIT_LIST_HEAD(&splice); |
2913 | 2911 | ||
2914 | list_splice_init(&root->fs_info->delalloc_inodes, &splice); | ||
2915 | |||
2916 | spin_lock(&root->fs_info->delalloc_lock); | 2912 | spin_lock(&root->fs_info->delalloc_lock); |
2913 | list_splice_init(&root->fs_info->delalloc_inodes, &splice); | ||
2917 | 2914 | ||
2918 | while (!list_empty(&splice)) { | 2915 | while (!list_empty(&splice)) { |
2919 | btrfs_inode = list_entry(splice.next, struct btrfs_inode, | 2916 | btrfs_inode = list_entry(splice.next, struct btrfs_inode, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5b9b6b6df242..b42efc2ded51 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3089,6 +3089,13 @@ alloc: | |||
3089 | } | 3089 | } |
3090 | goto again; | 3090 | goto again; |
3091 | } | 3091 | } |
3092 | |||
3093 | /* | ||
3094 | * If we have less pinned bytes than we want to allocate then | ||
3095 | * don't bother committing the transaction, it won't help us. | ||
3096 | */ | ||
3097 | if (data_sinfo->bytes_pinned < bytes) | ||
3098 | committed = 1; | ||
3092 | spin_unlock(&data_sinfo->lock); | 3099 | spin_unlock(&data_sinfo->lock); |
3093 | 3100 | ||
3094 | /* commit the current transaction and try again */ | 3101 | /* commit the current transaction and try again */ |
@@ -5211,9 +5218,7 @@ loop: | |||
5211 | * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try | 5218 | * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try |
5212 | * again | 5219 | * again |
5213 | */ | 5220 | */ |
5214 | if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && | 5221 | if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) { |
5215 | (found_uncached_bg || empty_size || empty_cluster || | ||
5216 | allowed_chunk_alloc)) { | ||
5217 | index = 0; | 5222 | index = 0; |
5218 | if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { | 5223 | if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { |
5219 | found_uncached_bg = false; | 5224 | found_uncached_bg = false; |
@@ -5253,32 +5258,36 @@ loop: | |||
5253 | goto search; | 5258 | goto search; |
5254 | } | 5259 | } |
5255 | 5260 | ||
5256 | if (loop < LOOP_CACHING_WAIT) { | 5261 | loop++; |
5257 | loop++; | ||
5258 | goto search; | ||
5259 | } | ||
5260 | 5262 | ||
5261 | if (loop == LOOP_ALLOC_CHUNK) { | 5263 | if (loop == LOOP_ALLOC_CHUNK) { |
5262 | empty_size = 0; | 5264 | if (allowed_chunk_alloc) { |
5263 | empty_cluster = 0; | 5265 | ret = do_chunk_alloc(trans, root, num_bytes + |
5264 | } | 5266 | 2 * 1024 * 1024, data, |
5267 | CHUNK_ALLOC_LIMITED); | ||
5268 | allowed_chunk_alloc = 0; | ||
5269 | if (ret == 1) | ||
5270 | done_chunk_alloc = 1; | ||
5271 | } else if (!done_chunk_alloc && | ||
5272 | space_info->force_alloc == | ||
5273 | CHUNK_ALLOC_NO_FORCE) { | ||
5274 | space_info->force_alloc = CHUNK_ALLOC_LIMITED; | ||
5275 | } | ||
5265 | 5276 | ||
5266 | if (allowed_chunk_alloc) { | 5277 | /* |
5267 | ret = do_chunk_alloc(trans, root, num_bytes + | 5278 | * We didn't allocate a chunk, go ahead and drop the |
5268 | 2 * 1024 * 1024, data, | 5279 | * empty size and loop again. |
5269 | CHUNK_ALLOC_LIMITED); | 5280 | */ |
5270 | allowed_chunk_alloc = 0; | 5281 | if (!done_chunk_alloc) |
5271 | done_chunk_alloc = 1; | 5282 | loop = LOOP_NO_EMPTY_SIZE; |
5272 | } else if (!done_chunk_alloc && | ||
5273 | space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) { | ||
5274 | space_info->force_alloc = CHUNK_ALLOC_LIMITED; | ||
5275 | } | 5283 | } |
5276 | 5284 | ||
5277 | if (loop < LOOP_NO_EMPTY_SIZE) { | 5285 | if (loop == LOOP_NO_EMPTY_SIZE) { |
5278 | loop++; | 5286 | empty_size = 0; |
5279 | goto search; | 5287 | empty_cluster = 0; |
5280 | } | 5288 | } |
5281 | ret = -ENOSPC; | 5289 | |
5290 | goto search; | ||
5282 | } else if (!ins->objectid) { | 5291 | } else if (!ins->objectid) { |
5283 | ret = -ENOSPC; | 5292 | ret = -ENOSPC; |
5284 | } else if (ins->objectid) { | 5293 | } else if (ins->objectid) { |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 4e8445a4757c..a11a92ee2d30 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -126,9 +126,9 @@ struct extent_buffer { | |||
126 | unsigned long map_len; | 126 | unsigned long map_len; |
127 | struct page *first_page; | 127 | struct page *first_page; |
128 | unsigned long bflags; | 128 | unsigned long bflags; |
129 | atomic_t refs; | ||
130 | struct list_head leak_list; | 129 | struct list_head leak_list; |
131 | struct rcu_head rcu_head; | 130 | struct rcu_head rcu_head; |
131 | atomic_t refs; | ||
132 | 132 | ||
133 | /* the spinlock is used to protect most operations */ | 133 | /* the spinlock is used to protect most operations */ |
134 | spinlock_t lock; | 134 | spinlock_t lock; |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index ad144736a5fd..9f985a429877 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
250 | pgoff_t index = 0; | 250 | pgoff_t index = 0; |
251 | unsigned long first_page_offset; | 251 | unsigned long first_page_offset; |
252 | int num_checksums; | 252 | int num_checksums; |
253 | int ret = 0, ret2; | 253 | int ret = 0; |
254 | 254 | ||
255 | INIT_LIST_HEAD(&bitmaps); | 255 | INIT_LIST_HEAD(&bitmaps); |
256 | 256 | ||
@@ -421,11 +421,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
421 | goto free_cache; | 421 | goto free_cache; |
422 | } | 422 | } |
423 | spin_lock(&ctl->tree_lock); | 423 | spin_lock(&ctl->tree_lock); |
424 | ret2 = link_free_space(ctl, e); | 424 | ret = link_free_space(ctl, e); |
425 | ctl->total_bitmaps++; | 425 | ctl->total_bitmaps++; |
426 | ctl->op->recalc_thresholds(ctl); | 426 | ctl->op->recalc_thresholds(ctl); |
427 | spin_unlock(&ctl->tree_lock); | 427 | spin_unlock(&ctl->tree_lock); |
428 | list_add_tail(&e->list, &bitmaps); | ||
429 | if (ret) { | 428 | if (ret) { |
430 | printk(KERN_ERR "Duplicate entries in " | 429 | printk(KERN_ERR "Duplicate entries in " |
431 | "free space cache, dumping\n"); | 430 | "free space cache, dumping\n"); |
@@ -434,6 +433,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, | |||
434 | page_cache_release(page); | 433 | page_cache_release(page); |
435 | goto free_cache; | 434 | goto free_cache; |
436 | } | 435 | } |
436 | list_add_tail(&e->list, &bitmaps); | ||
437 | } | 437 | } |
438 | 438 | ||
439 | num_entries--; | 439 | num_entries--; |
@@ -1417,6 +1417,23 @@ again: | |||
1417 | return 0; | 1417 | return 0; |
1418 | } | 1418 | } |
1419 | 1419 | ||
1420 | static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl, | ||
1421 | struct btrfs_free_space *info, u64 offset, | ||
1422 | u64 bytes) | ||
1423 | { | ||
1424 | u64 bytes_to_set = 0; | ||
1425 | u64 end; | ||
1426 | |||
1427 | end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); | ||
1428 | |||
1429 | bytes_to_set = min(end - offset, bytes); | ||
1430 | |||
1431 | bitmap_set_bits(ctl, info, offset, bytes_to_set); | ||
1432 | |||
1433 | return bytes_to_set; | ||
1434 | |||
1435 | } | ||
1436 | |||
1420 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | 1437 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
1421 | struct btrfs_free_space *info) | 1438 | struct btrfs_free_space *info) |
1422 | { | 1439 | { |
@@ -1453,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1453 | return true; | 1470 | return true; |
1454 | } | 1471 | } |
1455 | 1472 | ||
1473 | static struct btrfs_free_space_op free_space_op = { | ||
1474 | .recalc_thresholds = recalculate_thresholds, | ||
1475 | .use_bitmap = use_bitmap, | ||
1476 | }; | ||
1477 | |||
1456 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | 1478 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, |
1457 | struct btrfs_free_space *info) | 1479 | struct btrfs_free_space *info) |
1458 | { | 1480 | { |
1459 | struct btrfs_free_space *bitmap_info; | 1481 | struct btrfs_free_space *bitmap_info; |
1482 | struct btrfs_block_group_cache *block_group = NULL; | ||
1460 | int added = 0; | 1483 | int added = 0; |
1461 | u64 bytes, offset, end; | 1484 | u64 bytes, offset, bytes_added; |
1462 | int ret; | 1485 | int ret; |
1463 | 1486 | ||
1464 | bytes = info->bytes; | 1487 | bytes = info->bytes; |
@@ -1467,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | |||
1467 | if (!ctl->op->use_bitmap(ctl, info)) | 1490 | if (!ctl->op->use_bitmap(ctl, info)) |
1468 | return 0; | 1491 | return 0; |
1469 | 1492 | ||
1493 | if (ctl->op == &free_space_op) | ||
1494 | block_group = ctl->private; | ||
1470 | again: | 1495 | again: |
1496 | /* | ||
1497 | * Since we link bitmaps right into the cluster we need to see if we | ||
1498 | * have a cluster here, and if so and it has our bitmap we need to add | ||
1499 | * the free space to that bitmap. | ||
1500 | */ | ||
1501 | if (block_group && !list_empty(&block_group->cluster_list)) { | ||
1502 | struct btrfs_free_cluster *cluster; | ||
1503 | struct rb_node *node; | ||
1504 | struct btrfs_free_space *entry; | ||
1505 | |||
1506 | cluster = list_entry(block_group->cluster_list.next, | ||
1507 | struct btrfs_free_cluster, | ||
1508 | block_group_list); | ||
1509 | spin_lock(&cluster->lock); | ||
1510 | node = rb_first(&cluster->root); | ||
1511 | if (!node) { | ||
1512 | spin_unlock(&cluster->lock); | ||
1513 | goto no_cluster_bitmap; | ||
1514 | } | ||
1515 | |||
1516 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1517 | if (!entry->bitmap) { | ||
1518 | spin_unlock(&cluster->lock); | ||
1519 | goto no_cluster_bitmap; | ||
1520 | } | ||
1521 | |||
1522 | if (entry->offset == offset_to_bitmap(ctl, offset)) { | ||
1523 | bytes_added = add_bytes_to_bitmap(ctl, entry, | ||
1524 | offset, bytes); | ||
1525 | bytes -= bytes_added; | ||
1526 | offset += bytes_added; | ||
1527 | } | ||
1528 | spin_unlock(&cluster->lock); | ||
1529 | if (!bytes) { | ||
1530 | ret = 1; | ||
1531 | goto out; | ||
1532 | } | ||
1533 | } | ||
1534 | |||
1535 | no_cluster_bitmap: | ||
1471 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), | 1536 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1472 | 1, 0); | 1537 | 1, 0); |
1473 | if (!bitmap_info) { | 1538 | if (!bitmap_info) { |
@@ -1475,19 +1540,10 @@ again: | |||
1475 | goto new_bitmap; | 1540 | goto new_bitmap; |
1476 | } | 1541 | } |
1477 | 1542 | ||
1478 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); | 1543 | bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes); |
1479 | 1544 | bytes -= bytes_added; | |
1480 | if (offset >= bitmap_info->offset && offset + bytes > end) { | 1545 | offset += bytes_added; |
1481 | bitmap_set_bits(ctl, bitmap_info, offset, end - offset); | 1546 | added = 0; |
1482 | bytes -= end - offset; | ||
1483 | offset = end; | ||
1484 | added = 0; | ||
1485 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | ||
1486 | bitmap_set_bits(ctl, bitmap_info, offset, bytes); | ||
1487 | bytes = 0; | ||
1488 | } else { | ||
1489 | BUG(); | ||
1490 | } | ||
1491 | 1547 | ||
1492 | if (!bytes) { | 1548 | if (!bytes) { |
1493 | ret = 1; | 1549 | ret = 1; |
@@ -1766,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
1766 | "\n", count); | 1822 | "\n", count); |
1767 | } | 1823 | } |
1768 | 1824 | ||
1769 | static struct btrfs_free_space_op free_space_op = { | ||
1770 | .recalc_thresholds = recalculate_thresholds, | ||
1771 | .use_bitmap = use_bitmap, | ||
1772 | }; | ||
1773 | |||
1774 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) | 1825 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) |
1775 | { | 1826 | { |
1776 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 1827 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
@@ -2142,9 +2193,11 @@ again: | |||
2142 | /* | 2193 | /* |
2143 | * This searches the block group for just extents to fill the cluster with. | 2194 | * This searches the block group for just extents to fill the cluster with. |
2144 | */ | 2195 | */ |
2145 | static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | 2196 | static noinline int |
2146 | struct btrfs_free_cluster *cluster, | 2197 | setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, |
2147 | u64 offset, u64 bytes, u64 min_bytes) | 2198 | struct btrfs_free_cluster *cluster, |
2199 | struct list_head *bitmaps, u64 offset, u64 bytes, | ||
2200 | u64 min_bytes) | ||
2148 | { | 2201 | { |
2149 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2202 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2150 | struct btrfs_free_space *first = NULL; | 2203 | struct btrfs_free_space *first = NULL; |
@@ -2166,6 +2219,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2166 | * extent entry. | 2219 | * extent entry. |
2167 | */ | 2220 | */ |
2168 | while (entry->bitmap) { | 2221 | while (entry->bitmap) { |
2222 | if (list_empty(&entry->list)) | ||
2223 | list_add_tail(&entry->list, bitmaps); | ||
2169 | node = rb_next(&entry->offset_index); | 2224 | node = rb_next(&entry->offset_index); |
2170 | if (!node) | 2225 | if (!node) |
2171 | return -ENOSPC; | 2226 | return -ENOSPC; |
@@ -2185,8 +2240,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2185 | return -ENOSPC; | 2240 | return -ENOSPC; |
2186 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2241 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
2187 | 2242 | ||
2188 | if (entry->bitmap) | 2243 | if (entry->bitmap) { |
2244 | if (list_empty(&entry->list)) | ||
2245 | list_add_tail(&entry->list, bitmaps); | ||
2189 | continue; | 2246 | continue; |
2247 | } | ||
2248 | |||
2190 | /* | 2249 | /* |
2191 | * we haven't filled the empty size and the window is | 2250 | * we haven't filled the empty size and the window is |
2192 | * very large. reset and try again | 2251 | * very large. reset and try again |
@@ -2238,9 +2297,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2238 | * This specifically looks for bitmaps that may work in the cluster, we assume | 2297 | * This specifically looks for bitmaps that may work in the cluster, we assume |
2239 | * that we have already failed to find extents that will work. | 2298 | * that we have already failed to find extents that will work. |
2240 | */ | 2299 | */ |
2241 | static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | 2300 | static noinline int |
2242 | struct btrfs_free_cluster *cluster, | 2301 | setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, |
2243 | u64 offset, u64 bytes, u64 min_bytes) | 2302 | struct btrfs_free_cluster *cluster, |
2303 | struct list_head *bitmaps, u64 offset, u64 bytes, | ||
2304 | u64 min_bytes) | ||
2244 | { | 2305 | { |
2245 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2306 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2246 | struct btrfs_free_space *entry; | 2307 | struct btrfs_free_space *entry; |
@@ -2250,10 +2311,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2250 | if (ctl->total_bitmaps == 0) | 2311 | if (ctl->total_bitmaps == 0) |
2251 | return -ENOSPC; | 2312 | return -ENOSPC; |
2252 | 2313 | ||
2314 | /* | ||
2315 | * First check our cached list of bitmaps and see if there is an entry | ||
2316 | * here that will work. | ||
2317 | */ | ||
2318 | list_for_each_entry(entry, bitmaps, list) { | ||
2319 | if (entry->bytes < min_bytes) | ||
2320 | continue; | ||
2321 | ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset, | ||
2322 | bytes, min_bytes); | ||
2323 | if (!ret) | ||
2324 | return 0; | ||
2325 | } | ||
2326 | |||
2327 | /* | ||
2328 | * If we do have entries on our list and we are here then we didn't find | ||
2329 | * anything, so go ahead and get the next entry after the last entry in | ||
2330 | * this list and start the search from there. | ||
2331 | */ | ||
2332 | if (!list_empty(bitmaps)) { | ||
2333 | entry = list_entry(bitmaps->prev, struct btrfs_free_space, | ||
2334 | list); | ||
2335 | node = rb_next(&entry->offset_index); | ||
2336 | if (!node) | ||
2337 | return -ENOSPC; | ||
2338 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | ||
2339 | goto search; | ||
2340 | } | ||
2341 | |||
2253 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); | 2342 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); |
2254 | if (!entry) | 2343 | if (!entry) |
2255 | return -ENOSPC; | 2344 | return -ENOSPC; |
2256 | 2345 | ||
2346 | search: | ||
2257 | node = &entry->offset_index; | 2347 | node = &entry->offset_index; |
2258 | do { | 2348 | do { |
2259 | entry = rb_entry(node, struct btrfs_free_space, offset_index); | 2349 | entry = rb_entry(node, struct btrfs_free_space, offset_index); |
@@ -2284,6 +2374,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2284 | u64 offset, u64 bytes, u64 empty_size) | 2374 | u64 offset, u64 bytes, u64 empty_size) |
2285 | { | 2375 | { |
2286 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | 2376 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
2377 | struct list_head bitmaps; | ||
2378 | struct btrfs_free_space *entry, *tmp; | ||
2287 | u64 min_bytes; | 2379 | u64 min_bytes; |
2288 | int ret; | 2380 | int ret; |
2289 | 2381 | ||
@@ -2322,11 +2414,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2322 | goto out; | 2414 | goto out; |
2323 | } | 2415 | } |
2324 | 2416 | ||
2325 | ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, | 2417 | INIT_LIST_HEAD(&bitmaps); |
2326 | min_bytes); | 2418 | ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, |
2419 | bytes, min_bytes); | ||
2327 | if (ret) | 2420 | if (ret) |
2328 | ret = setup_cluster_bitmap(block_group, cluster, offset, | 2421 | ret = setup_cluster_bitmap(block_group, cluster, &bitmaps, |
2329 | bytes, min_bytes); | 2422 | offset, bytes, min_bytes); |
2423 | |||
2424 | /* Clear our temporary list */ | ||
2425 | list_for_each_entry_safe(entry, tmp, &bitmaps, list) | ||
2426 | list_del_init(&entry->list); | ||
2330 | 2427 | ||
2331 | if (!ret) { | 2428 | if (!ret) { |
2332 | atomic_inc(&block_group->count); | 2429 | atomic_inc(&block_group->count); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index ebf95f7a44d6..751ddf8fc58a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1986,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
1986 | } | 1986 | } |
1987 | 1987 | ||
1988 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) | 1988 | if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) |
1989 | return 0; | 1989 | goto good; |
1990 | 1990 | ||
1991 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && | 1991 | if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && |
1992 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { | 1992 | test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { |
@@ -3646,7 +3646,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
3646 | btrfs_i_size_write(inode, 0); | 3646 | btrfs_i_size_write(inode, 0); |
3647 | 3647 | ||
3648 | while (1) { | 3648 | while (1) { |
3649 | trans = btrfs_start_transaction(root, 0); | 3649 | trans = btrfs_join_transaction(root); |
3650 | BUG_ON(IS_ERR(trans)); | 3650 | BUG_ON(IS_ERR(trans)); |
3651 | trans->block_rsv = root->orphan_block_rsv; | 3651 | trans->block_rsv = root->orphan_block_rsv; |
3652 | 3652 | ||
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index ac37040e426a..b793d112d1f6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -2054,29 +2054,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) | |||
2054 | 2054 | ||
2055 | static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) | 2055 | static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) |
2056 | { | 2056 | { |
2057 | struct btrfs_ioctl_fs_info_args fi_args; | 2057 | struct btrfs_ioctl_fs_info_args *fi_args; |
2058 | struct btrfs_device *device; | 2058 | struct btrfs_device *device; |
2059 | struct btrfs_device *next; | 2059 | struct btrfs_device *next; |
2060 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | 2060 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; |
2061 | int ret = 0; | ||
2061 | 2062 | ||
2062 | if (!capable(CAP_SYS_ADMIN)) | 2063 | if (!capable(CAP_SYS_ADMIN)) |
2063 | return -EPERM; | 2064 | return -EPERM; |
2064 | 2065 | ||
2065 | fi_args.num_devices = fs_devices->num_devices; | 2066 | fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL); |
2066 | fi_args.max_id = 0; | 2067 | if (!fi_args) |
2067 | memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); | 2068 | return -ENOMEM; |
2069 | |||
2070 | fi_args->num_devices = fs_devices->num_devices; | ||
2071 | memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid)); | ||
2068 | 2072 | ||
2069 | mutex_lock(&fs_devices->device_list_mutex); | 2073 | mutex_lock(&fs_devices->device_list_mutex); |
2070 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { | 2074 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { |
2071 | if (device->devid > fi_args.max_id) | 2075 | if (device->devid > fi_args->max_id) |
2072 | fi_args.max_id = device->devid; | 2076 | fi_args->max_id = device->devid; |
2073 | } | 2077 | } |
2074 | mutex_unlock(&fs_devices->device_list_mutex); | 2078 | mutex_unlock(&fs_devices->device_list_mutex); |
2075 | 2079 | ||
2076 | if (copy_to_user(arg, &fi_args, sizeof(fi_args))) | 2080 | if (copy_to_user(arg, fi_args, sizeof(*fi_args))) |
2077 | return -EFAULT; | 2081 | ret = -EFAULT; |
2078 | 2082 | ||
2079 | return 0; | 2083 | kfree(fi_args); |
2084 | return ret; | ||
2080 | } | 2085 | } |
2081 | 2086 | ||
2082 | static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | 2087 | static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index df50fd1eca8f..a8d03d5efb5d 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c | |||
@@ -16,13 +16,7 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/pagemap.h> | ||
21 | #include <linux/writeback.h> | ||
22 | #include <linux/blkdev.h> | 19 | #include <linux/blkdev.h> |
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include "ctree.h" | 20 | #include "ctree.h" |
27 | #include "volumes.h" | 21 | #include "volumes.h" |
28 | #include "disk-io.h" | 22 | #include "disk-io.h" |
@@ -804,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
804 | 798 | ||
805 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 799 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
806 | if (ret < 0) | 800 | if (ret < 0) |
807 | goto out; | 801 | goto out_noplug; |
808 | |||
809 | l = path->nodes[0]; | ||
810 | slot = path->slots[0]; | ||
811 | btrfs_item_key_to_cpu(l, &key, slot); | ||
812 | if (key.objectid != logical) { | ||
813 | ret = btrfs_previous_item(root, path, 0, | ||
814 | BTRFS_EXTENT_ITEM_KEY); | ||
815 | if (ret < 0) | ||
816 | goto out; | ||
817 | } | ||
818 | 802 | ||
803 | /* | ||
804 | * we might miss half an extent here, but that doesn't matter, | ||
805 | * as it's only the prefetch | ||
806 | */ | ||
819 | while (1) { | 807 | while (1) { |
820 | l = path->nodes[0]; | 808 | l = path->nodes[0]; |
821 | slot = path->slots[0]; | 809 | slot = path->slots[0]; |
@@ -824,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | |||
824 | if (ret == 0) | 812 | if (ret == 0) |
825 | continue; | 813 | continue; |
826 | if (ret < 0) | 814 | if (ret < 0) |
827 | goto out; | 815 | goto out_noplug; |
828 | 816 | ||
829 | break; | 817 | break; |
830 | } | 818 | } |
@@ -906,15 +894,20 @@ again: | |||
906 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 894 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
907 | if (ret < 0) | 895 | if (ret < 0) |
908 | goto out; | 896 | goto out; |
909 | 897 | if (ret > 0) { | |
910 | l = path->nodes[0]; | ||
911 | slot = path->slots[0]; | ||
912 | btrfs_item_key_to_cpu(l, &key, slot); | ||
913 | if (key.objectid != logical) { | ||
914 | ret = btrfs_previous_item(root, path, 0, | 898 | ret = btrfs_previous_item(root, path, 0, |
915 | BTRFS_EXTENT_ITEM_KEY); | 899 | BTRFS_EXTENT_ITEM_KEY); |
916 | if (ret < 0) | 900 | if (ret < 0) |
917 | goto out; | 901 | goto out; |
902 | if (ret > 0) { | ||
903 | /* there's no smaller item, so stick with the | ||
904 | * larger one */ | ||
905 | btrfs_release_path(path); | ||
906 | ret = btrfs_search_slot(NULL, root, &key, | ||
907 | path, 0, 0); | ||
908 | if (ret < 0) | ||
909 | goto out; | ||
910 | } | ||
918 | } | 911 | } |
919 | 912 | ||
920 | while (1) { | 913 | while (1) { |
@@ -989,6 +982,7 @@ next: | |||
989 | 982 | ||
990 | out: | 983 | out: |
991 | blk_finish_plug(&plug); | 984 | blk_finish_plug(&plug); |
985 | out_noplug: | ||
992 | btrfs_free_path(path); | 986 | btrfs_free_path(path); |
993 | return ret < 0 ? ret : 0; | 987 | return ret < 0 ? ret : 0; |
994 | } | 988 | } |
@@ -1064,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1064 | while (1) { | 1058 | while (1) { |
1065 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 1059 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
1066 | if (ret < 0) | 1060 | if (ret < 0) |
1067 | goto out; | 1061 | break; |
1068 | ret = 0; | 1062 | if (ret > 0) { |
1063 | if (path->slots[0] >= | ||
1064 | btrfs_header_nritems(path->nodes[0])) { | ||
1065 | ret = btrfs_next_leaf(root, path); | ||
1066 | if (ret) | ||
1067 | break; | ||
1068 | } | ||
1069 | } | ||
1069 | 1070 | ||
1070 | l = path->nodes[0]; | 1071 | l = path->nodes[0]; |
1071 | slot = path->slots[0]; | 1072 | slot = path->slots[0]; |
@@ -1075,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1075 | if (found_key.objectid != sdev->dev->devid) | 1076 | if (found_key.objectid != sdev->dev->devid) |
1076 | break; | 1077 | break; |
1077 | 1078 | ||
1078 | if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) | 1079 | if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY) |
1079 | break; | 1080 | break; |
1080 | 1081 | ||
1081 | if (found_key.offset >= end) | 1082 | if (found_key.offset >= end) |
@@ -1104,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1104 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); | 1105 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); |
1105 | if (!cache) { | 1106 | if (!cache) { |
1106 | ret = -ENOENT; | 1107 | ret = -ENOENT; |
1107 | goto out; | 1108 | break; |
1108 | } | 1109 | } |
1109 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, | 1110 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, |
1110 | chunk_offset, length); | 1111 | chunk_offset, length); |
@@ -1116,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | |||
1116 | btrfs_release_path(path); | 1117 | btrfs_release_path(path); |
1117 | } | 1118 | } |
1118 | 1119 | ||
1119 | out: | ||
1120 | btrfs_free_path(path); | 1120 | btrfs_free_path(path); |
1121 | return ret; | 1121 | |
1122 | /* | ||
1123 | * ret can still be 1 from search_slot or next_leaf, | ||
1124 | * that's not an error | ||
1125 | */ | ||
1126 | return ret < 0 ? ret : 0; | ||
1122 | } | 1127 | } |
1123 | 1128 | ||
1124 | static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) | 1129 | static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) |
@@ -1155,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) | |||
1155 | struct btrfs_fs_info *fs_info = root->fs_info; | 1160 | struct btrfs_fs_info *fs_info = root->fs_info; |
1156 | 1161 | ||
1157 | mutex_lock(&fs_info->scrub_lock); | 1162 | mutex_lock(&fs_info->scrub_lock); |
1158 | if (fs_info->scrub_workers_refcnt == 0) | 1163 | if (fs_info->scrub_workers_refcnt == 0) { |
1164 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", | ||
1165 | fs_info->thread_pool_size, &fs_info->generic_worker); | ||
1166 | fs_info->scrub_workers.idle_thresh = 4; | ||
1159 | btrfs_start_workers(&fs_info->scrub_workers, 1); | 1167 | btrfs_start_workers(&fs_info->scrub_workers, 1); |
1168 | } | ||
1160 | ++fs_info->scrub_workers_refcnt; | 1169 | ++fs_info->scrub_workers_refcnt; |
1161 | mutex_unlock(&fs_info->scrub_lock); | 1170 | mutex_unlock(&fs_info->scrub_lock); |
1162 | 1171 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index dd719662340e..2b3590b9fe98 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -349,7 +349,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid) | |||
349 | list) { | 349 | list) { |
350 | if (t->in_commit) { | 350 | if (t->in_commit) { |
351 | if (t->commit_done) | 351 | if (t->commit_done) |
352 | goto out; | 352 | break; |
353 | cur_trans = t; | 353 | cur_trans = t; |
354 | atomic_inc(&cur_trans->use_count); | 354 | atomic_inc(&cur_trans->use_count); |
355 | break; | 355 | break; |
@@ -1118,8 +1118,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
1118 | wait_current_trans_commit_start_and_unblock(root, cur_trans); | 1118 | wait_current_trans_commit_start_and_unblock(root, cur_trans); |
1119 | else | 1119 | else |
1120 | wait_current_trans_commit_start(root, cur_trans); | 1120 | wait_current_trans_commit_start(root, cur_trans); |
1121 | put_transaction(cur_trans); | ||
1122 | 1121 | ||
1122 | if (current->journal_info == trans) | ||
1123 | current->journal_info = NULL; | ||
1124 | |||
1125 | put_transaction(cur_trans); | ||
1123 | return 0; | 1126 | return 0; |
1124 | } | 1127 | } |
1125 | 1128 | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da541dfca2e3..1efa56e18f9b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, | |||
689 | transid = btrfs_super_generation(disk_super); | 689 | transid = btrfs_super_generation(disk_super); |
690 | if (disk_super->label[0]) | 690 | if (disk_super->label[0]) |
691 | printk(KERN_INFO "device label %s ", disk_super->label); | 691 | printk(KERN_INFO "device label %s ", disk_super->label); |
692 | else { | 692 | else |
693 | /* FIXME, make a readl uuid parser */ | 693 | printk(KERN_INFO "device fsid %pU ", disk_super->fsid); |
694 | printk(KERN_INFO "device fsid %llx-%llx ", | ||
695 | *(unsigned long long *)disk_super->fsid, | ||
696 | *(unsigned long long *)(disk_super->fsid + 8)); | ||
697 | } | ||
698 | printk(KERN_CONT "devid %llu transid %llu %s\n", | 694 | printk(KERN_CONT "devid %llu transid %llu %s\n", |
699 | (unsigned long long)devid, (unsigned long long)transid, path); | 695 | (unsigned long long)devid, (unsigned long long)transid, path); |
700 | ret = device_list_add(path, disk_super, devid, fs_devices_ret); | 696 | ret = device_list_add(path, disk_super, devid, fs_devices_ret); |