diff options
author | Chris Mason <chris.mason@oracle.com> | 2007-11-01 19:45:34 -0400 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2008-09-25 11:03:57 -0400 |
commit | b293f02e1423f2099744f3ade23ddd83b65321fc (patch) | |
tree | 6cac2c422ce3ccde9cf7f6f29c62e045365cb3cb /fs/btrfs/extent_map.c | |
parent | 944746ec7575258475958d858cdf3a0e4c8f7b9e (diff) |
Btrfs: Add writepages support
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/extent_map.c')
-rw-r--r-- | fs/btrfs/extent_map.c | 135 |
1 files changed, 110 insertions, 25 deletions
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index ff8881fb56d..b0677c84bb7 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/blkdev.h> | 10 | #include <linux/blkdev.h> |
11 | #include <linux/swap.h> | 11 | #include <linux/swap.h> |
12 | #include <linux/version.h> | 12 | #include <linux/version.h> |
13 | #include <linux/writeback.h> | ||
13 | #include "extent_map.h" | 14 | #include "extent_map.h" |
14 | 15 | ||
15 | /* temporary define until extent_map moves out of btrfs */ | 16 | /* temporary define until extent_map moves out of btrfs */ |
@@ -35,6 +36,12 @@ struct tree_entry { | |||
35 | struct rb_node rb_node; | 36 | struct rb_node rb_node; |
36 | }; | 37 | }; |
37 | 38 | ||
39 | struct extent_page_data { | ||
40 | struct bio *bio; | ||
41 | struct extent_map_tree *tree; | ||
42 | get_extent_t *get_extent; | ||
43 | }; | ||
44 | |||
38 | void __init extent_map_init(void) | 45 | void __init extent_map_init(void) |
39 | { | 46 | { |
40 | extent_map_cache = btrfs_cache_create("extent_map", | 47 | extent_map_cache = btrfs_cache_create("extent_map", |
@@ -1460,40 +1467,76 @@ static int end_bio_extent_preparewrite(struct bio *bio, | |||
1460 | #endif | 1467 | #endif |
1461 | } | 1468 | } |
1462 | 1469 | ||
1463 | static int submit_extent_page(int rw, struct extent_map_tree *tree, | 1470 | static struct bio * |
1464 | struct page *page, sector_t sector, | 1471 | extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, |
1465 | size_t size, unsigned long offset, | 1472 | gfp_t gfp_flags) |
1466 | struct block_device *bdev, | ||
1467 | bio_end_io_t end_io_func) | ||
1468 | { | 1473 | { |
1469 | struct bio *bio; | 1474 | struct bio *bio; |
1470 | int ret = 0; | ||
1471 | 1475 | ||
1472 | bio = bio_alloc(GFP_NOIO, 1); | 1476 | bio = bio_alloc(gfp_flags, nr_vecs); |
1473 | 1477 | ||
1474 | bio->bi_sector = sector; | 1478 | if (bio == NULL && (current->flags & PF_MEMALLOC)) { |
1475 | bio->bi_bdev = bdev; | 1479 | while (!bio && (nr_vecs /= 2)) |
1476 | bio->bi_io_vec[0].bv_page = page; | 1480 | bio = bio_alloc(gfp_flags, nr_vecs); |
1477 | bio->bi_io_vec[0].bv_len = size; | 1481 | } |
1478 | bio->bi_io_vec[0].bv_offset = offset; | ||
1479 | |||
1480 | bio->bi_vcnt = 1; | ||
1481 | bio->bi_idx = 0; | ||
1482 | bio->bi_size = size; | ||
1483 | 1482 | ||
1484 | bio->bi_end_io = end_io_func; | 1483 | if (bio) { |
1485 | bio->bi_private = tree; | 1484 | bio->bi_bdev = bdev; |
1485 | bio->bi_sector = first_sector; | ||
1486 | } | ||
1487 | return bio; | ||
1488 | } | ||
1486 | 1489 | ||
1490 | static int submit_one_bio(int rw, struct bio *bio) | ||
1491 | { | ||
1492 | int ret = 0; | ||
1487 | bio_get(bio); | 1493 | bio_get(bio); |
1488 | submit_bio(rw, bio); | 1494 | submit_bio(rw, bio); |
1489 | |||
1490 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) | 1495 | if (bio_flagged(bio, BIO_EOPNOTSUPP)) |
1491 | ret = -EOPNOTSUPP; | 1496 | ret = -EOPNOTSUPP; |
1492 | |||
1493 | bio_put(bio); | 1497 | bio_put(bio); |
1494 | return ret; | 1498 | return ret; |
1495 | } | 1499 | } |
1496 | 1500 | ||
1501 | static int submit_extent_page(int rw, struct extent_map_tree *tree, | ||
1502 | struct page *page, sector_t sector, | ||
1503 | size_t size, unsigned long offset, | ||
1504 | struct block_device *bdev, | ||
1505 | struct bio **bio_ret, | ||
1506 | int max_pages, | ||
1507 | bio_end_io_t end_io_func) | ||
1508 | { | ||
1509 | int ret = 0; | ||
1510 | struct bio *bio; | ||
1511 | int nr; | ||
1512 | |||
1513 | if (bio_ret && *bio_ret) { | ||
1514 | bio = *bio_ret; | ||
1515 | if (bio->bi_sector + (bio->bi_size >> 9) != sector || | ||
1516 | bio_add_page(bio, page, size, offset) < size) { | ||
1517 | ret = submit_one_bio(rw, bio); | ||
1518 | bio = NULL; | ||
1519 | } else { | ||
1520 | return 0; | ||
1521 | } | ||
1522 | } | ||
1523 | nr = min(max_pages, bio_get_nr_vecs(bdev)); | ||
1524 | bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); | ||
1525 | if (!bio) { | ||
1526 | printk("failed to allocate bio nr %d\n", nr); | ||
1527 | } | ||
1528 | bio_add_page(bio, page, size, offset); | ||
1529 | bio->bi_end_io = end_io_func; | ||
1530 | bio->bi_private = tree; | ||
1531 | if (bio_ret) { | ||
1532 | *bio_ret = bio; | ||
1533 | } else { | ||
1534 | ret = submit_one_bio(rw, bio); | ||
1535 | } | ||
1536 | |||
1537 | return ret; | ||
1538 | } | ||
1539 | |||
1497 | void set_page_extent_mapped(struct page *page) | 1540 | void set_page_extent_mapped(struct page *page) |
1498 | { | 1541 | { |
1499 | if (!PagePrivate(page)) { | 1542 | if (!PagePrivate(page)) { |
@@ -1590,7 +1633,8 @@ int extent_read_full_page(struct extent_map_tree *tree, struct page *page, | |||
1590 | if (!ret) { | 1633 | if (!ret) { |
1591 | ret = submit_extent_page(READ, tree, page, | 1634 | ret = submit_extent_page(READ, tree, page, |
1592 | sector, iosize, page_offset, | 1635 | sector, iosize, page_offset, |
1593 | bdev, end_bio_extent_readpage); | 1636 | bdev, NULL, 1, |
1637 | end_bio_extent_readpage); | ||
1594 | } | 1638 | } |
1595 | if (ret) | 1639 | if (ret) |
1596 | SetPageError(page); | 1640 | SetPageError(page); |
@@ -1613,11 +1657,12 @@ EXPORT_SYMBOL(extent_read_full_page); | |||
1613 | * are found, they are marked writeback. Then the lock bits are removed | 1657 | * are found, they are marked writeback. Then the lock bits are removed |
1614 | * and the end_io handler clears the writeback ranges | 1658 | * and the end_io handler clears the writeback ranges |
1615 | */ | 1659 | */ |
1616 | int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | 1660 | static int __extent_writepage(struct page *page, struct writeback_control *wbc, |
1617 | get_extent_t *get_extent, | 1661 | void *data) |
1618 | struct writeback_control *wbc) | ||
1619 | { | 1662 | { |
1620 | struct inode *inode = page->mapping->host; | 1663 | struct inode *inode = page->mapping->host; |
1664 | struct extent_page_data *epd = data; | ||
1665 | struct extent_map_tree *tree = epd->tree; | ||
1621 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; | 1666 | u64 start = (u64)page->index << PAGE_CACHE_SHIFT; |
1622 | u64 page_end = start + PAGE_CACHE_SIZE - 1; | 1667 | u64 page_end = start + PAGE_CACHE_SIZE - 1; |
1623 | u64 end; | 1668 | u64 end; |
@@ -1691,7 +1736,7 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | |||
1691 | clear_extent_dirty(tree, cur, page_end, GFP_NOFS); | 1736 | clear_extent_dirty(tree, cur, page_end, GFP_NOFS); |
1692 | break; | 1737 | break; |
1693 | } | 1738 | } |
1694 | em = get_extent(inode, page, page_offset, cur, end, 1); | 1739 | em = epd->get_extent(inode, page, page_offset, cur, end, 1); |
1695 | if (IS_ERR(em) || !em) { | 1740 | if (IS_ERR(em) || !em) { |
1696 | SetPageError(page); | 1741 | SetPageError(page); |
1697 | break; | 1742 | break; |
@@ -1734,9 +1779,12 @@ int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | |||
1734 | if (ret) | 1779 | if (ret) |
1735 | SetPageError(page); | 1780 | SetPageError(page); |
1736 | else { | 1781 | else { |
1782 | unsigned long nr = end_index + 1; | ||
1737 | set_range_writeback(tree, cur, cur + iosize - 1); | 1783 | set_range_writeback(tree, cur, cur + iosize - 1); |
1784 | |||
1738 | ret = submit_extent_page(WRITE, tree, page, sector, | 1785 | ret = submit_extent_page(WRITE, tree, page, sector, |
1739 | iosize, page_offset, bdev, | 1786 | iosize, page_offset, bdev, |
1787 | &epd->bio, nr, | ||
1740 | end_bio_extent_writepage); | 1788 | end_bio_extent_writepage); |
1741 | if (ret) | 1789 | if (ret) |
1742 | SetPageError(page); | 1790 | SetPageError(page); |
@@ -1750,8 +1798,44 @@ done: | |||
1750 | unlock_page(page); | 1798 | unlock_page(page); |
1751 | return 0; | 1799 | return 0; |
1752 | } | 1800 | } |
1801 | |||
1802 | int extent_write_full_page(struct extent_map_tree *tree, struct page *page, | ||
1803 | get_extent_t *get_extent, | ||
1804 | struct writeback_control *wbc) | ||
1805 | { | ||
1806 | int ret; | ||
1807 | struct extent_page_data epd = { | ||
1808 | .bio = NULL, | ||
1809 | .tree = tree, | ||
1810 | .get_extent = get_extent, | ||
1811 | }; | ||
1812 | |||
1813 | ret = __extent_writepage(page, wbc, &epd); | ||
1814 | if (epd.bio) | ||
1815 | submit_one_bio(WRITE, epd.bio); | ||
1816 | return ret; | ||
1817 | } | ||
1753 | EXPORT_SYMBOL(extent_write_full_page); | 1818 | EXPORT_SYMBOL(extent_write_full_page); |
1754 | 1819 | ||
1820 | int extent_writepages(struct extent_map_tree *tree, | ||
1821 | struct address_space *mapping, | ||
1822 | get_extent_t *get_extent, | ||
1823 | struct writeback_control *wbc) | ||
1824 | { | ||
1825 | int ret; | ||
1826 | struct extent_page_data epd = { | ||
1827 | .bio = NULL, | ||
1828 | .tree = tree, | ||
1829 | .get_extent = get_extent, | ||
1830 | }; | ||
1831 | |||
1832 | ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd); | ||
1833 | if (epd.bio) | ||
1834 | submit_one_bio(WRITE, epd.bio); | ||
1835 | return ret; | ||
1836 | } | ||
1837 | EXPORT_SYMBOL(extent_writepages); | ||
1838 | |||
1755 | /* | 1839 | /* |
1756 | * basic invalidatepage code, this waits on any locked or writeback | 1840 | * basic invalidatepage code, this waits on any locked or writeback |
1757 | * ranges corresponding to the page, and then deletes any extent state | 1841 | * ranges corresponding to the page, and then deletes any extent state |
@@ -1869,6 +1953,7 @@ int extent_prepare_write(struct extent_map_tree *tree, | |||
1869 | EXTENT_LOCKED, 0, NULL, GFP_NOFS); | 1953 | EXTENT_LOCKED, 0, NULL, GFP_NOFS); |
1870 | ret = submit_extent_page(READ, tree, page, | 1954 | ret = submit_extent_page(READ, tree, page, |
1871 | sector, iosize, page_offset, em->bdev, | 1955 | sector, iosize, page_offset, em->bdev, |
1956 | NULL, 1, | ||
1872 | end_bio_extent_preparewrite); | 1957 | end_bio_extent_preparewrite); |
1873 | iocount++; | 1958 | iocount++; |
1874 | block_start = block_start + iosize; | 1959 | block_start = block_start + iosize; |