aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/free-space-cache.c82
1 files changed, 78 insertions, 4 deletions
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 13575de85543..f561c953205b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -24,6 +24,7 @@
24#include "free-space-cache.h" 24#include "free-space-cache.h"
25#include "transaction.h" 25#include "transaction.h"
26#include "disk-io.h" 26#include "disk-io.h"
27#include "extent_io.h"
27 28
28#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 29#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
29#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) 30#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
@@ -224,6 +225,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
224 u64 num_entries; 225 u64 num_entries;
225 u64 num_bitmaps; 226 u64 num_bitmaps;
226 u64 generation; 227 u64 generation;
228 u64 used = btrfs_block_group_used(&block_group->item);
227 u32 cur_crc = ~(u32)0; 229 u32 cur_crc = ~(u32)0;
228 pgoff_t index = 0; 230 pgoff_t index = 0;
229 unsigned long first_page_offset; 231 unsigned long first_page_offset;
@@ -469,6 +471,17 @@ next:
469 index++; 471 index++;
470 } 472 }
471 473
474 spin_lock(&block_group->tree_lock);
475 if (block_group->free_space != (block_group->key.offset - used -
476 block_group->bytes_super)) {
477 spin_unlock(&block_group->tree_lock);
478 printk(KERN_ERR "block group %llu has an wrong amount of free "
479 "space\n", block_group->key.objectid);
480 ret = 0;
481 goto free_cache;
482 }
483 spin_unlock(&block_group->tree_lock);
484
472 ret = 1; 485 ret = 1;
473out: 486out:
474 kfree(checksums); 487 kfree(checksums);
@@ -497,8 +510,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
497 struct list_head *pos, *n; 510 struct list_head *pos, *n;
498 struct page *page; 511 struct page *page;
499 struct extent_state *cached_state = NULL; 512 struct extent_state *cached_state = NULL;
513 struct btrfs_free_cluster *cluster = NULL;
514 struct extent_io_tree *unpin = NULL;
500 struct list_head bitmap_list; 515 struct list_head bitmap_list;
501 struct btrfs_key key; 516 struct btrfs_key key;
517 u64 start, end, len;
502 u64 bytes = 0; 518 u64 bytes = 0;
503 u32 *crc, *checksums; 519 u32 *crc, *checksums;
504 pgoff_t index = 0, last_index = 0; 520 pgoff_t index = 0, last_index = 0;
@@ -507,6 +523,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
507 int entries = 0; 523 int entries = 0;
508 int bitmaps = 0; 524 int bitmaps = 0;
509 int ret = 0; 525 int ret = 0;
526 bool next_page = false;
510 527
511 root = root->fs_info->tree_root; 528 root = root->fs_info->tree_root;
512 529
@@ -553,6 +570,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
553 */ 570 */
554 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64); 571 first_page_offset = (sizeof(u32) * num_checksums) + sizeof(u64);
555 572
573 /* Get the cluster for this block_group if it exists */
574 if (!list_empty(&block_group->cluster_list))
575 cluster = list_entry(block_group->cluster_list.next,
576 struct btrfs_free_cluster,
577 block_group_list);
578
579 /*
580 * We shouldn't have switched the pinned extents yet so this is the
581 * right one
582 */
583 unpin = root->fs_info->pinned_extents;
584
556 /* 585 /*
557 * Lock all pages first so we can lock the extent safely. 586 * Lock all pages first so we can lock the extent safely.
558 * 587 *
@@ -582,6 +611,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
582 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, 611 lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
583 0, &cached_state, GFP_NOFS); 612 0, &cached_state, GFP_NOFS);
584 613
614 /*
615 * When searching for pinned extents, we need to start at our start
616 * offset.
617 */
618 start = block_group->key.objectid;
619
585 /* Write out the extent entries */ 620 /* Write out the extent entries */
586 do { 621 do {
587 struct btrfs_free_space_entry *entry; 622 struct btrfs_free_space_entry *entry;
@@ -589,6 +624,8 @@ int btrfs_write_out_cache(struct btrfs_root *root,
589 unsigned long offset = 0; 624 unsigned long offset = 0;
590 unsigned long start_offset = 0; 625 unsigned long start_offset = 0;
591 626
627 next_page = false;
628
592 if (index == 0) { 629 if (index == 0) {
593 start_offset = first_page_offset; 630 start_offset = first_page_offset;
594 offset = start_offset; 631 offset = start_offset;
@@ -600,7 +637,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
600 entry = addr + start_offset; 637 entry = addr + start_offset;
601 638
602 memset(addr, 0, PAGE_CACHE_SIZE); 639 memset(addr, 0, PAGE_CACHE_SIZE);
603 while (1) { 640 while (node && !next_page) {
604 struct btrfs_free_space *e; 641 struct btrfs_free_space *e;
605 642
606 e = rb_entry(node, struct btrfs_free_space, offset_index); 643 e = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -616,12 +653,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
616 entry->type = BTRFS_FREE_SPACE_EXTENT; 653 entry->type = BTRFS_FREE_SPACE_EXTENT;
617 } 654 }
618 node = rb_next(node); 655 node = rb_next(node);
619 if (!node) 656 if (!node && cluster) {
620 break; 657 node = rb_first(&cluster->root);
658 cluster = NULL;
659 }
621 offset += sizeof(struct btrfs_free_space_entry); 660 offset += sizeof(struct btrfs_free_space_entry);
622 if (offset + sizeof(struct btrfs_free_space_entry) >= 661 if (offset + sizeof(struct btrfs_free_space_entry) >=
623 PAGE_CACHE_SIZE) 662 PAGE_CACHE_SIZE)
663 next_page = true;
664 entry++;
665 }
666
667 /*
668 * We want to add any pinned extents to our free space cache
669 * so we don't leak the space
670 */
671 while (!next_page && (start < block_group->key.objectid +
672 block_group->key.offset)) {
673 ret = find_first_extent_bit(unpin, start, &start, &end,
674 EXTENT_DIRTY);
675 if (ret) {
676 ret = 0;
677 break;
678 }
679
680 /* This pinned extent is out of our range */
681 if (start >= block_group->key.objectid +
682 block_group->key.offset)
624 break; 683 break;
684
685 len = block_group->key.objectid +
686 block_group->key.offset - start;
687 len = min(len, end + 1 - start);
688
689 entries++;
690 entry->offset = cpu_to_le64(start);
691 entry->bytes = cpu_to_le64(len);
692 entry->type = BTRFS_FREE_SPACE_EXTENT;
693
694 start = end + 1;
695 offset += sizeof(struct btrfs_free_space_entry);
696 if (offset + sizeof(struct btrfs_free_space_entry) >=
697 PAGE_CACHE_SIZE)
698 next_page = true;
625 entry++; 699 entry++;
626 } 700 }
627 *crc = ~(u32)0; 701 *crc = ~(u32)0;
@@ -652,7 +726,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
652 page_cache_release(page); 726 page_cache_release(page);
653 727
654 index++; 728 index++;
655 } while (node); 729 } while (node || next_page);
656 730
657 /* Write out the bitmaps */ 731 /* Write out the bitmaps */
658 list_for_each_safe(pos, n, &bitmap_list) { 732 list_for_each_safe(pos, n, &bitmap_list) {