aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-09-02 16:53:46 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:07 -0400
commit8b62b72b26bcd72082c4a69d179dd906bcc22200 (patch)
treeceee20dfebe45654cb3a25d8916c195836cdbabf
parent9655d2982b53fdb38a9e0f2f11315b99b92d66e2 (diff)
Btrfs: Use PagePrivate2 to track pages in the data=ordered code.
Btrfs writes go through delalloc to the data=ordered code. This makes sure that all of the data is on disk before the metadata that references it. The tracking means that we have to make sure each page in an extent is fully written before we add that extent into the on-disk btree. This was done in the past by setting the EXTENT_ORDERED bit for the range of an extent when it was added to the data=ordered code, and then clearing the EXTENT_ORDERED bit in the extent state tree as each page finished IO. One of the reasons we had to do this was because sometimes pages are magically dirtied without page_mkwrite being called. The EXTENT_ORDERED bit is checked at writepage time, and if it isn't there, our page become dirty without going through the proper path. These bit operations make for a number of rbtree searches for each page, and can cause considerable lock contention. This commit switches from the EXTENT_ORDERED bit to use PagePrivate2. As pages go into the ordered code, PagePrivate2 is set on each one. This is a cheap operation because we already have all the pages locked and ready to go. As IO finishes, the PagePrivate2 bit is cleared and the ordered accoutning is updated for each page. At writepage time, if the PagePrivate2 bit is missing, we go into the writepage fixup code to handle improperly dirtied pages. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/extent_io.c29
-rw-r--r--fs/btrfs/extent_io.h9
-rw-r--r--fs/btrfs/inode.c47
-rw-r--r--fs/btrfs/ordered-data.c29
-rw-r--r--fs/btrfs/ordered-data.h3
5 files changed, 62 insertions, 55 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c9a438d374b6..a102422cd92e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -885,13 +885,6 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
885 NULL, mask); 885 NULL, mask);
886} 886}
887 887
888int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
889 gfp_t mask)
890{
891 return set_extent_bit(tree, start, end, EXTENT_ORDERED, 0, NULL, NULL,
892 mask);
893}
894
895int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, 888int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
896 int bits, gfp_t mask) 889 int bits, gfp_t mask)
897{ 890{
@@ -921,13 +914,6 @@ int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
921 NULL, mask); 914 NULL, mask);
922} 915}
923 916
924int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end,
925 gfp_t mask)
926{
927 return clear_extent_bit(tree, start, end, EXTENT_ORDERED, 1, 0,
928 NULL, mask);
929}
930
931int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 917int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
932 gfp_t mask) 918 gfp_t mask)
933{ 919{
@@ -1373,7 +1359,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1373 int clear_unlock, 1359 int clear_unlock,
1374 int clear_delalloc, int clear_dirty, 1360 int clear_delalloc, int clear_dirty,
1375 int set_writeback, 1361 int set_writeback,
1376 int end_writeback) 1362 int end_writeback,
1363 int set_private2)
1377{ 1364{
1378 int ret; 1365 int ret;
1379 struct page *pages[16]; 1366 struct page *pages[16];
@@ -1392,7 +1379,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1392 clear_bits |= EXTENT_DELALLOC; 1379 clear_bits |= EXTENT_DELALLOC;
1393 1380
1394 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS); 1381 clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
1395 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback)) 1382 if (!(unlock_pages || clear_dirty || set_writeback || end_writeback ||
1383 set_private2))
1396 return 0; 1384 return 0;
1397 1385
1398 while (nr_pages > 0) { 1386 while (nr_pages > 0) {
@@ -1400,6 +1388,10 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1400 min_t(unsigned long, 1388 min_t(unsigned long,
1401 nr_pages, ARRAY_SIZE(pages)), pages); 1389 nr_pages, ARRAY_SIZE(pages)), pages);
1402 for (i = 0; i < ret; i++) { 1390 for (i = 0; i < ret; i++) {
1391
1392 if (set_private2)
1393 SetPagePrivate2(pages[i]);
1394
1403 if (pages[i] == locked_page) { 1395 if (pages[i] == locked_page) {
1404 page_cache_release(pages[i]); 1396 page_cache_release(pages[i]);
1405 continue; 1397 continue;
@@ -2792,7 +2784,7 @@ int try_release_extent_state(struct extent_map_tree *map,
2792 int ret = 1; 2784 int ret = 1;
2793 2785
2794 if (test_range_bit(tree, start, end, 2786 if (test_range_bit(tree, start, end,
2795 EXTENT_IOBITS | EXTENT_ORDERED, 0, NULL)) 2787 EXTENT_IOBITS, 0, NULL))
2796 ret = 0; 2788 ret = 0;
2797 else { 2789 else {
2798 if ((mask & GFP_NOFS) == GFP_NOFS) 2790 if ((mask & GFP_NOFS) == GFP_NOFS)
@@ -2835,8 +2827,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
2835 } 2827 }
2836 if (!test_range_bit(tree, em->start, 2828 if (!test_range_bit(tree, em->start,
2837 extent_map_end(em) - 1, 2829 extent_map_end(em) - 1,
2838 EXTENT_LOCKED | EXTENT_WRITEBACK | 2830 EXTENT_LOCKED | EXTENT_WRITEBACK,
2839 EXTENT_ORDERED,
2840 0, NULL)) { 2831 0, NULL)) {
2841 remove_extent_mapping(map, em); 2832 remove_extent_mapping(map, em);
2842 /* once for the rb tree */ 2833 /* once for the rb tree */
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 09cd6fa3cc86..14ed16fd862d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -13,10 +13,8 @@
13#define EXTENT_DEFRAG (1 << 6) 13#define EXTENT_DEFRAG (1 << 6)
14#define EXTENT_DEFRAG_DONE (1 << 7) 14#define EXTENT_DEFRAG_DONE (1 << 7)
15#define EXTENT_BUFFER_FILLED (1 << 8) 15#define EXTENT_BUFFER_FILLED (1 << 8)
16#define EXTENT_ORDERED (1 << 9) 16#define EXTENT_BOUNDARY (1 << 9)
17#define EXTENT_ORDERED_METADATA (1 << 10) 17#define EXTENT_NODATASUM (1 << 10)
18#define EXTENT_BOUNDARY (1 << 11)
19#define EXTENT_NODATASUM (1 << 12)
20#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) 18#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
21 19
22/* flags for bio submission */ 20/* flags for bio submission */
@@ -285,5 +283,6 @@ int extent_clear_unlock_delalloc(struct inode *inode,
285 int clear_unlock, 283 int clear_unlock,
286 int clear_delalloc, int clear_dirty, 284 int clear_delalloc, int clear_dirty,
287 int set_writeback, 285 int set_writeback,
288 int end_writeback); 286 int end_writeback,
287 int set_private2);
289#endif 288#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3f8e93de2989..739a245e25d6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -426,7 +426,7 @@ again:
426 extent_clear_unlock_delalloc(inode, 426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree, 427 &BTRFS_I(inode)->io_tree,
428 start, end, NULL, 1, 0, 428 start, end, NULL, 1, 0,
429 0, 1, 1, 1); 429 0, 1, 1, 1, 0);
430 ret = 0; 430 ret = 0;
431 goto free_pages_out; 431 goto free_pages_out;
432 } 432 }
@@ -641,7 +641,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
641 async_extent->start, 641 async_extent->start,
642 async_extent->start + 642 async_extent->start +
643 async_extent->ram_size - 1, 643 async_extent->ram_size - 1,
644 NULL, 1, 1, 0, 1, 1, 0); 644 NULL, 1, 1, 0, 1, 1, 0, 0);
645 645
646 ret = btrfs_submit_compressed_write(inode, 646 ret = btrfs_submit_compressed_write(inode,
647 async_extent->start, 647 async_extent->start,
@@ -714,7 +714,7 @@ static noinline int cow_file_range(struct inode *inode,
714 extent_clear_unlock_delalloc(inode, 714 extent_clear_unlock_delalloc(inode,
715 &BTRFS_I(inode)->io_tree, 715 &BTRFS_I(inode)->io_tree,
716 start, end, NULL, 1, 1, 716 start, end, NULL, 1, 1,
717 1, 1, 1, 1); 717 1, 1, 1, 1, 0);
718 *nr_written = *nr_written + 718 *nr_written = *nr_written +
719 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; 719 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
720 *page_started = 1; 720 *page_started = 1;
@@ -777,11 +777,14 @@ static noinline int cow_file_range(struct inode *inode,
777 /* we're not doing compressed IO, don't unlock the first 777 /* we're not doing compressed IO, don't unlock the first
778 * page (which the caller expects to stay locked), don't 778 * page (which the caller expects to stay locked), don't
779 * clear any dirty bits and don't set any writeback bits 779 * clear any dirty bits and don't set any writeback bits
780 *
781 * Do set the Private2 bit so we know this page was properly
782 * setup for writepage
780 */ 783 */
781 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 784 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
782 start, start + ram_size - 1, 785 start, start + ram_size - 1,
783 locked_page, unlock, 1, 786 locked_page, unlock, 1,
784 1, 0, 0, 0); 787 1, 0, 0, 0, 1);
785 disk_num_bytes -= cur_alloc_size; 788 disk_num_bytes -= cur_alloc_size;
786 num_bytes -= cur_alloc_size; 789 num_bytes -= cur_alloc_size;
787 alloc_hint = ins.objectid + ins.offset; 790 alloc_hint = ins.objectid + ins.offset;
@@ -1102,7 +1105,7 @@ out_check:
1102 1105
1103 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, 1106 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1104 cur_offset, cur_offset + num_bytes - 1, 1107 cur_offset, cur_offset + num_bytes - 1,
1105 locked_page, 1, 1, 1, 0, 0, 0); 1108 locked_page, 1, 1, 1, 0, 0, 0, 1);
1106 cur_offset = extent_end; 1109 cur_offset = extent_end;
1107 if (cur_offset > end) 1110 if (cur_offset > end)
1108 break; 1111 break;
@@ -1375,10 +1378,8 @@ again:
1375 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS); 1378 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1376 1379
1377 /* already ordered? We're done */ 1380 /* already ordered? We're done */
1378 if (test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end, 1381 if (PagePrivate2(page))
1379 EXTENT_ORDERED, 0, NULL)) {
1380 goto out; 1382 goto out;
1381 }
1382 1383
1383 ordered = btrfs_lookup_ordered_extent(inode, page_start); 1384 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1384 if (ordered) { 1385 if (ordered) {
@@ -1414,11 +1415,9 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1414 struct inode *inode = page->mapping->host; 1415 struct inode *inode = page->mapping->host;
1415 struct btrfs_writepage_fixup *fixup; 1416 struct btrfs_writepage_fixup *fixup;
1416 struct btrfs_root *root = BTRFS_I(inode)->root; 1417 struct btrfs_root *root = BTRFS_I(inode)->root;
1417 int ret;
1418 1418
1419 ret = test_range_bit(&BTRFS_I(inode)->io_tree, start, end, 1419 /* this page is properly in the ordered list */
1420 EXTENT_ORDERED, 0, NULL); 1420 if (TestClearPagePrivate2(page))
1421 if (ret)
1422 return 0; 1421 return 0;
1423 1422
1424 if (PageChecked(page)) 1423 if (PageChecked(page))
@@ -1624,6 +1623,7 @@ nocow:
1624static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1623static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1625 struct extent_state *state, int uptodate) 1624 struct extent_state *state, int uptodate)
1626{ 1625{
1626 ClearPagePrivate2(page);
1627 return btrfs_finish_ordered_io(page->mapping->host, start, end); 1627 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1628} 1628}
1629 1629
@@ -4403,13 +4403,21 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4403 u64 page_start = page_offset(page); 4403 u64 page_start = page_offset(page);
4404 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 4404 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4405 4405
4406
4407 /*
4408 * we have the page locked, so new writeback can't start,
4409 * and the dirty bit won't be cleared while we are here.
4410 *
4411 * Wait for IO on this page so that we can safely clear
4412 * the PagePrivate2 bit and do ordered accounting
4413 */
4406 wait_on_page_writeback(page); 4414 wait_on_page_writeback(page);
4415
4407 tree = &BTRFS_I(page->mapping->host)->io_tree; 4416 tree = &BTRFS_I(page->mapping->host)->io_tree;
4408 if (offset) { 4417 if (offset) {
4409 btrfs_releasepage(page, GFP_NOFS); 4418 btrfs_releasepage(page, GFP_NOFS);
4410 return; 4419 return;
4411 } 4420 }
4412
4413 lock_extent(tree, page_start, page_end, GFP_NOFS); 4421 lock_extent(tree, page_start, page_end, GFP_NOFS);
4414 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 4422 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4415 page_offset(page)); 4423 page_offset(page));
@@ -4421,14 +4429,19 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4421 clear_extent_bit(tree, page_start, page_end, 4429 clear_extent_bit(tree, page_start, page_end,
4422 EXTENT_DIRTY | EXTENT_DELALLOC | 4430 EXTENT_DIRTY | EXTENT_DELALLOC |
4423 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS); 4431 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
4424 btrfs_finish_ordered_io(page->mapping->host, 4432 /*
4425 page_start, page_end); 4433 * whoever cleared the private bit is responsible
4434 * for the finish_ordered_io
4435 */
4436 if (TestClearPagePrivate2(page)) {
4437 btrfs_finish_ordered_io(page->mapping->host,
4438 page_start, page_end);
4439 }
4426 btrfs_put_ordered_extent(ordered); 4440 btrfs_put_ordered_extent(ordered);
4427 lock_extent(tree, page_start, page_end, GFP_NOFS); 4441 lock_extent(tree, page_start, page_end, GFP_NOFS);
4428 } 4442 }
4429 clear_extent_bit(tree, page_start, page_end, 4443 clear_extent_bit(tree, page_start, page_end,
4430 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC | 4444 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
4431 EXTENT_ORDERED,
4432 1, 1, NULL, GFP_NOFS); 4445 1, 1, NULL, GFP_NOFS);
4433 __btrfs_releasepage(page, GFP_NOFS); 4446 __btrfs_releasepage(page, GFP_NOFS);
4434 4447
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index 7f751e462f0b..4a9c8c4cec25 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -159,8 +159,6 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
159 * 159 *
160 * len is the length of the extent 160 * len is the length of the extent
161 * 161 *
162 * This also sets the EXTENT_ORDERED bit on the range in the inode.
163 *
164 * The tree is given a single reference on the ordered extent that was 162 * The tree is given a single reference on the ordered extent that was
165 * inserted. 163 * inserted.
166 */ 164 */
@@ -181,6 +179,7 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
181 entry->start = start; 179 entry->start = start;
182 entry->len = len; 180 entry->len = len;
183 entry->disk_len = disk_len; 181 entry->disk_len = disk_len;
182 entry->bytes_left = len;
184 entry->inode = inode; 183 entry->inode = inode;
185 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 184 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
186 set_bit(type, &entry->flags); 185 set_bit(type, &entry->flags);
@@ -195,9 +194,6 @@ int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
195 &entry->rb_node); 194 &entry->rb_node);
196 BUG_ON(node); 195 BUG_ON(node);
197 196
198 set_extent_ordered(&BTRFS_I(inode)->io_tree, file_offset,
199 entry_end(entry) - 1, GFP_NOFS);
200
201 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 197 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
202 list_add_tail(&entry->root_extent_list, 198 list_add_tail(&entry->root_extent_list,
203 &BTRFS_I(inode)->root->fs_info->ordered_extents); 199 &BTRFS_I(inode)->root->fs_info->ordered_extents);
@@ -241,13 +237,10 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
241 struct btrfs_ordered_inode_tree *tree; 237 struct btrfs_ordered_inode_tree *tree;
242 struct rb_node *node; 238 struct rb_node *node;
243 struct btrfs_ordered_extent *entry; 239 struct btrfs_ordered_extent *entry;
244 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
245 int ret; 240 int ret;
246 241
247 tree = &BTRFS_I(inode)->ordered_tree; 242 tree = &BTRFS_I(inode)->ordered_tree;
248 mutex_lock(&tree->mutex); 243 mutex_lock(&tree->mutex);
249 clear_extent_ordered(io_tree, file_offset, file_offset + io_size - 1,
250 GFP_NOFS);
251 node = tree_search(tree, file_offset); 244 node = tree_search(tree, file_offset);
252 if (!node) { 245 if (!node) {
253 ret = 1; 246 ret = 1;
@@ -260,11 +253,16 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
260 goto out; 253 goto out;
261 } 254 }
262 255
263 ret = test_range_bit(io_tree, entry->file_offset, 256 if (io_size > entry->bytes_left) {
264 entry->file_offset + entry->len - 1, 257 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
265 EXTENT_ORDERED, 0, NULL); 258 (unsigned long long)entry->bytes_left,
266 if (ret == 0) 259 (unsigned long long)io_size);
260 }
261 entry->bytes_left -= io_size;
262 if (entry->bytes_left == 0)
267 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 263 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
264 else
265 ret = 1;
268out: 266out:
269 mutex_unlock(&tree->mutex); 267 mutex_unlock(&tree->mutex);
270 return ret == 0; 268 return ret == 0;
@@ -476,6 +474,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
476 u64 orig_end; 474 u64 orig_end;
477 u64 wait_end; 475 u64 wait_end;
478 struct btrfs_ordered_extent *ordered; 476 struct btrfs_ordered_extent *ordered;
477 int found;
479 478
480 if (start + len < start) { 479 if (start + len < start) {
481 orig_end = INT_LIMIT(loff_t); 480 orig_end = INT_LIMIT(loff_t);
@@ -502,6 +501,7 @@ again:
502 orig_end >> PAGE_CACHE_SHIFT); 501 orig_end >> PAGE_CACHE_SHIFT);
503 502
504 end = orig_end; 503 end = orig_end;
504 found = 0;
505 while (1) { 505 while (1) {
506 ordered = btrfs_lookup_first_ordered_extent(inode, end); 506 ordered = btrfs_lookup_first_ordered_extent(inode, end);
507 if (!ordered) 507 if (!ordered)
@@ -514,6 +514,7 @@ again:
514 btrfs_put_ordered_extent(ordered); 514 btrfs_put_ordered_extent(ordered);
515 break; 515 break;
516 } 516 }
517 found++;
517 btrfs_start_ordered_extent(inode, ordered, 1); 518 btrfs_start_ordered_extent(inode, ordered, 1);
518 end = ordered->file_offset; 519 end = ordered->file_offset;
519 btrfs_put_ordered_extent(ordered); 520 btrfs_put_ordered_extent(ordered);
@@ -521,8 +522,8 @@ again:
521 break; 522 break;
522 end--; 523 end--;
523 } 524 }
524 if (test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end, 525 if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
525 EXTENT_ORDERED | EXTENT_DELALLOC, 0, NULL)) { 526 EXTENT_DELALLOC, 0, NULL)) {
526 schedule_timeout(1); 527 schedule_timeout(1);
527 goto again; 528 goto again;
528 } 529 }
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 3d31c8827b01..993a7ea45c70 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -85,6 +85,9 @@ struct btrfs_ordered_extent {
85 /* extent length on disk */ 85 /* extent length on disk */
86 u64 disk_len; 86 u64 disk_len;
87 87
88 /* number of bytes that still need writing */
89 u64 bytes_left;
90
88 /* flags (described above) */ 91 /* flags (described above) */
89 unsigned long flags; 92 unsigned long flags;
90 93