diff options
author | Chris Mason <chris.mason@oracle.com> | 2008-07-17 12:53:50 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:04:04 -0400 |
commit | e6dcd2dc9c489108648e2ed543315dd134d50a9a (patch) | |
tree | cddf6f588b65e28c5feb8bff89b22d8ff70f8a50 /fs/btrfs/extent_map.c | |
parent | 77a41afb7d0dd0f27b6f2f1a5bc701929c7034de (diff) |
Btrfs: New data=ordered implementation
The old data=ordered code would force commit to wait until
all the data extents from the transaction were fully on disk. This
introduced large latencies into the commit and stalled new writers
in the transaction for a long time.
The new code changes the way data allocations and extents work:
* When delayed allocation is filled, data extents are reserved, and
the extent bit EXTENT_ORDERED is set on the entire range of the extent.
A struct btrfs_ordered_extent is allocated an inserted into a per-inode
rbtree to track the pending extents.
* As each page is written EXTENT_ORDERED is cleared on the bytes corresponding
to that page.
* When all of the bytes corresponding to a single struct btrfs_ordered_extent
are written, The previously reserved extent is inserted into the FS
btree and into the extent allocation trees. The checksums for the file
data are also updated.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r-- | fs/btrfs/extent_map.c | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index f5a04eb9a2ac..81123277c2b8 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -206,10 +206,11 @@ int add_extent_mapping(struct extent_map_tree *tree, | |||
206 | struct extent_map *merge = NULL; | 206 | struct extent_map *merge = NULL; |
207 | struct rb_node *rb; | 207 | struct rb_node *rb; |
208 | 208 | ||
209 | BUG_ON(spin_trylock(&tree->lock)); | ||
209 | rb = tree_insert(&tree->map, em->start, &em->rb_node); | 210 | rb = tree_insert(&tree->map, em->start, &em->rb_node); |
210 | if (rb) { | 211 | if (rb) { |
211 | merge = rb_entry(rb, struct extent_map, rb_node); | ||
212 | ret = -EEXIST; | 212 | ret = -EEXIST; |
213 | free_extent_map(merge); | ||
213 | goto out; | 214 | goto out; |
214 | } | 215 | } |
215 | atomic_inc(&em->refs); | 216 | atomic_inc(&em->refs); |
@@ -268,6 +269,7 @@ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, | |||
268 | struct rb_node *next = NULL; | 269 | struct rb_node *next = NULL; |
269 | u64 end = range_end(start, len); | 270 | u64 end = range_end(start, len); |
270 | 271 | ||
272 | BUG_ON(spin_trylock(&tree->lock)); | ||
271 | em = tree->last; | 273 | em = tree->last; |
272 | if (em && end > em->start && start < extent_map_end(em)) | 274 | if (em && end > em->start && start < extent_map_end(em)) |
273 | goto found; | 275 | goto found; |
@@ -318,6 +320,7 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) | |||
318 | { | 320 | { |
319 | int ret = 0; | 321 | int ret = 0; |
320 | 322 | ||
323 | BUG_ON(spin_trylock(&tree->lock)); | ||
321 | rb_erase(&em->rb_node, &tree->map); | 324 | rb_erase(&em->rb_node, &tree->map); |
322 | em->in_tree = 0; | 325 | em->in_tree = 0; |
323 | if (tree->last == em) | 326 | if (tree->last == em) |