aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c130
1 files changed, 70 insertions, 60 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index f80254d82f40..eb1bac7c8553 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -477,6 +477,47 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages)
477 } 477 }
478} 478}
479 479
480static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
481 const u64 start,
482 const u64 len,
483 struct extent_state **cached_state)
484{
485 u64 search_start = start;
486 const u64 end = start + len - 1;
487
488 while (search_start < end) {
489 const u64 search_len = end - search_start + 1;
490 struct extent_map *em;
491 u64 em_len;
492 int ret = 0;
493
494 em = btrfs_get_extent(inode, NULL, 0, search_start,
495 search_len, 0);
496 if (IS_ERR(em))
497 return PTR_ERR(em);
498
499 if (em->block_start != EXTENT_MAP_HOLE)
500 goto next;
501
502 em_len = em->len;
503 if (em->start < search_start)
504 em_len -= search_start - em->start;
505 if (em_len > search_len)
506 em_len = search_len;
507
508 ret = set_extent_bit(&inode->io_tree, search_start,
509 search_start + em_len - 1,
510 EXTENT_DELALLOC_NEW,
511 NULL, cached_state, GFP_NOFS);
512next:
513 search_start = extent_map_end(em);
514 free_extent_map(em);
515 if (ret)
516 return ret;
517 }
518 return 0;
519}
520
480/* 521/*
481 * after copy_from_user, pages need to be dirtied and we need to make 522 * after copy_from_user, pages need to be dirtied and we need to make
482 * sure holes are created between the current EOF and the start of 523 * sure holes are created between the current EOF and the start of
@@ -497,14 +538,34 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages,
497 u64 end_of_last_block; 538 u64 end_of_last_block;
498 u64 end_pos = pos + write_bytes; 539 u64 end_pos = pos + write_bytes;
499 loff_t isize = i_size_read(inode); 540 loff_t isize = i_size_read(inode);
541 unsigned int extra_bits = 0;
500 542
501 start_pos = pos & ~((u64) fs_info->sectorsize - 1); 543 start_pos = pos & ~((u64) fs_info->sectorsize - 1);
502 num_bytes = round_up(write_bytes + pos - start_pos, 544 num_bytes = round_up(write_bytes + pos - start_pos,
503 fs_info->sectorsize); 545 fs_info->sectorsize);
504 546
505 end_of_last_block = start_pos + num_bytes - 1; 547 end_of_last_block = start_pos + num_bytes - 1;
548
549 if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
550 if (start_pos >= isize &&
551 !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
552 /*
553 * There can't be any extents following eof in this case
554 * so just set the delalloc new bit for the range
555 * directly.
556 */
557 extra_bits |= EXTENT_DELALLOC_NEW;
558 } else {
559 err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
560 start_pos,
561 num_bytes, cached);
562 if (err)
563 return err;
564 }
565 }
566
506 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, 567 err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
507 cached, 0); 568 extra_bits, cached, 0);
508 if (err) 569 if (err)
509 return err; 570 return err;
510 571
@@ -1404,47 +1465,6 @@ fail:
1404 1465
1405} 1466}
1406 1467
1407static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
1408 const u64 start,
1409 const u64 len,
1410 struct extent_state **cached_state)
1411{
1412 u64 search_start = start;
1413 const u64 end = start + len - 1;
1414
1415 while (search_start < end) {
1416 const u64 search_len = end - search_start + 1;
1417 struct extent_map *em;
1418 u64 em_len;
1419 int ret = 0;
1420
1421 em = btrfs_get_extent(inode, NULL, 0, search_start,
1422 search_len, 0);
1423 if (IS_ERR(em))
1424 return PTR_ERR(em);
1425
1426 if (em->block_start != EXTENT_MAP_HOLE)
1427 goto next;
1428
1429 em_len = em->len;
1430 if (em->start < search_start)
1431 em_len -= search_start - em->start;
1432 if (em_len > search_len)
1433 em_len = search_len;
1434
1435 ret = set_extent_bit(&inode->io_tree, search_start,
1436 search_start + em_len - 1,
1437 EXTENT_DELALLOC_NEW,
1438 NULL, cached_state, GFP_NOFS);
1439next:
1440 search_start = extent_map_end(em);
1441 free_extent_map(em);
1442 if (ret)
1443 return ret;
1444 }
1445 return 0;
1446}
1447
1448/* 1468/*
1449 * This function locks the extent and properly waits for data=ordered extents 1469 * This function locks the extent and properly waits for data=ordered extents
1450 * to finish before allowing the pages to be modified if need. 1470 * to finish before allowing the pages to be modified if need.
@@ -1473,10 +1493,8 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1473 + round_up(pos + write_bytes - start_pos, 1493 + round_up(pos + write_bytes - start_pos,
1474 fs_info->sectorsize) - 1; 1494 fs_info->sectorsize) - 1;
1475 1495
1476 if (start_pos < inode->vfs_inode.i_size || 1496 if (start_pos < inode->vfs_inode.i_size) {
1477 (inode->flags & BTRFS_INODE_PREALLOC)) {
1478 struct btrfs_ordered_extent *ordered; 1497 struct btrfs_ordered_extent *ordered;
1479 unsigned int clear_bits;
1480 1498
1481 lock_extent_bits(&inode->io_tree, start_pos, last_pos, 1499 lock_extent_bits(&inode->io_tree, start_pos, last_pos,
1482 cached_state); 1500 cached_state);
@@ -1498,19 +1516,10 @@ lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages,
1498 } 1516 }
1499 if (ordered) 1517 if (ordered)
1500 btrfs_put_ordered_extent(ordered); 1518 btrfs_put_ordered_extent(ordered);
1501 ret = btrfs_find_new_delalloc_bytes(inode, start_pos, 1519 clear_extent_bit(&inode->io_tree, start_pos, last_pos,
1502 last_pos - start_pos + 1, 1520 EXTENT_DIRTY | EXTENT_DELALLOC |
1503 cached_state); 1521 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
1504 clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC | 1522 0, 0, cached_state, GFP_NOFS);
1505 EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
1506 if (ret)
1507 clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
1508 clear_extent_bit(&inode->io_tree, start_pos,
1509 last_pos, clear_bits,
1510 (clear_bits & EXTENT_LOCKED) ? 1 : 0,
1511 0, cached_state, GFP_NOFS);
1512 if (ret)
1513 return ret;
1514 *lockstart = start_pos; 1523 *lockstart = start_pos;
1515 *lockend = last_pos; 1524 *lockend = last_pos;
1516 ret = 1; 1525 ret = 1;
@@ -2048,6 +2057,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2048 len = (u64)end - (u64)start + 1; 2057 len = (u64)end - (u64)start + 1;
2049 trace_btrfs_sync_file(file, datasync); 2058 trace_btrfs_sync_file(file, datasync);
2050 2059
2060 btrfs_init_log_ctx(&ctx, inode);
2061
2051 /* 2062 /*
2052 * We write the dirty pages in the range and wait until they complete 2063 * We write the dirty pages in the range and wait until they complete
2053 * out of the ->i_mutex. If so, we can flush the dirty pages by 2064 * out of the ->i_mutex. If so, we can flush the dirty pages by
@@ -2194,8 +2205,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2194 } 2205 }
2195 trans->sync = true; 2206 trans->sync = true;
2196 2207
2197 btrfs_init_log_ctx(&ctx, inode);
2198
2199 ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx); 2208 ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
2200 if (ret < 0) { 2209 if (ret < 0) {
2201 /* Fallthrough and commit/free transaction. */ 2210 /* Fallthrough and commit/free transaction. */
@@ -2253,6 +2262,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
2253 ret = btrfs_end_transaction(trans); 2262 ret = btrfs_end_transaction(trans);
2254 } 2263 }
2255out: 2264out:
2265 ASSERT(list_empty(&ctx.list));
2256 err = file_check_and_advance_wb_err(file); 2266 err = file_check_and_advance_wb_err(file);
2257 if (!ret) 2267 if (!ret)
2258 ret = err; 2268 ret = err;