diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2013-06-29 13:45:29 -0400 |
---|---|---|
committer | Miklos Szeredi <mszeredi@suse.cz> | 2013-10-01 10:44:52 -0400 |
commit | 26d614df1da9d7d255686af5d6d4508f77853c01 (patch) | |
tree | 95840c244bc0f92450db6a7b96e015ff8f07a109 /fs/fuse | |
parent | 72523425fb434e81c0c9f611bd880ce339c4e06b (diff) |
fuse: Implement writepages callback
The .writepages one is required to make each writeback request carry more than
one page on it. The patch enables optimized behaviour unconditionally,
i.e. mmap-ed writes will benefit from the patch even if fc->writeback_cache=0.
[SzM: simplify, add comments]
Signed-off-by: Maxim Patlasov <MPatlasov@parallels.com>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r-- | fs/fuse/file.c | 150 |
1 files changed, 147 insertions, 3 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6ce0066d7c8c..0bd349dd968f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1502,8 +1502,8 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) | |||
1502 | fuse_writepage_free(fc, req); | 1502 | fuse_writepage_free(fc, req); |
1503 | } | 1503 | } |
1504 | 1504 | ||
1505 | static struct fuse_file *fuse_write_file(struct fuse_conn *fc, | 1505 | static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, |
1506 | struct fuse_inode *fi) | 1506 | struct fuse_inode *fi) |
1507 | { | 1507 | { |
1508 | struct fuse_file *ff = NULL; | 1508 | struct fuse_file *ff = NULL; |
1509 | 1509 | ||
@@ -1540,7 +1540,7 @@ static int fuse_writepage_locked(struct page *page) | |||
1540 | goto err_free; | 1540 | goto err_free; |
1541 | 1541 | ||
1542 | error = -EIO; | 1542 | error = -EIO; |
1543 | req->ff = fuse_write_file(fc, fi); | 1543 | req->ff = fuse_write_file_get(fc, fi); |
1544 | if (!req->ff) | 1544 | if (!req->ff) |
1545 | goto err_free; | 1545 | goto err_free; |
1546 | 1546 | ||
@@ -1586,6 +1586,149 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc) | |||
1586 | return err; | 1586 | return err; |
1587 | } | 1587 | } |
1588 | 1588 | ||
1589 | struct fuse_fill_wb_data { | ||
1590 | struct fuse_req *req; | ||
1591 | struct fuse_file *ff; | ||
1592 | struct inode *inode; | ||
1593 | }; | ||
1594 | |||
1595 | static void fuse_writepages_send(struct fuse_fill_wb_data *data) | ||
1596 | { | ||
1597 | struct fuse_req *req = data->req; | ||
1598 | struct inode *inode = data->inode; | ||
1599 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
1600 | struct fuse_inode *fi = get_fuse_inode(inode); | ||
1601 | |||
1602 | req->ff = fuse_file_get(data->ff); | ||
1603 | spin_lock(&fc->lock); | ||
1604 | list_add_tail(&req->list, &fi->queued_writes); | ||
1605 | fuse_flush_writepages(inode); | ||
1606 | spin_unlock(&fc->lock); | ||
1607 | } | ||
1608 | |||
1609 | static int fuse_writepages_fill(struct page *page, | ||
1610 | struct writeback_control *wbc, void *_data) | ||
1611 | { | ||
1612 | struct fuse_fill_wb_data *data = _data; | ||
1613 | struct fuse_req *req = data->req; | ||
1614 | struct inode *inode = data->inode; | ||
1615 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
1616 | struct page *tmp_page; | ||
1617 | int err; | ||
1618 | |||
1619 | if (!data->ff) { | ||
1620 | err = -EIO; | ||
1621 | data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); | ||
1622 | if (!data->ff) | ||
1623 | goto out_unlock; | ||
1624 | } | ||
1625 | |||
1626 | if (req) { | ||
1627 | BUG_ON(!req->num_pages); | ||
1628 | if (req->num_pages == FUSE_MAX_PAGES_PER_REQ || | ||
1629 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || | ||
1630 | req->pages[req->num_pages - 1]->index + 1 != page->index) { | ||
1631 | |||
1632 | fuse_writepages_send(data); | ||
1633 | data->req = NULL; | ||
1634 | } | ||
1635 | } | ||
1636 | err = -ENOMEM; | ||
1637 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | ||
1638 | if (!tmp_page) | ||
1639 | goto out_unlock; | ||
1640 | |||
1641 | /* | ||
1642 | * The page must not be redirtied until the writeout is completed | ||
1643 | * (i.e. userspace has sent a reply to the write request). Otherwise | ||
1644 | * there could be more than one temporary page instance for each real | ||
1645 | * page. | ||
1646 | * | ||
1647 | * This is ensured by holding the page lock in page_mkwrite() while | ||
1648 | * checking fuse_page_is_writeback(). We already hold the page lock | ||
1649 | * since clear_page_dirty_for_io() and keep it held until we add the | ||
1650 | * request to the fi->writepages list and increment req->num_pages. | ||
1651 | * After this fuse_page_is_writeback() will indicate that the page is | ||
1652 | * under writeback, so we can release the page lock. | ||
1653 | */ | ||
1654 | if (data->req == NULL) { | ||
1655 | struct fuse_inode *fi = get_fuse_inode(inode); | ||
1656 | |||
1657 | err = -ENOMEM; | ||
1658 | req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); | ||
1659 | if (!req) { | ||
1660 | __free_page(tmp_page); | ||
1661 | goto out_unlock; | ||
1662 | } | ||
1663 | |||
1664 | fuse_write_fill(req, data->ff, page_offset(page), 0); | ||
1665 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; | ||
1666 | req->in.argpages = 1; | ||
1667 | req->background = 1; | ||
1668 | req->num_pages = 0; | ||
1669 | req->end = fuse_writepage_end; | ||
1670 | req->inode = inode; | ||
1671 | |||
1672 | spin_lock(&fc->lock); | ||
1673 | list_add(&req->writepages_entry, &fi->writepages); | ||
1674 | spin_unlock(&fc->lock); | ||
1675 | |||
1676 | data->req = req; | ||
1677 | } | ||
1678 | set_page_writeback(page); | ||
1679 | |||
1680 | copy_highpage(tmp_page, page); | ||
1681 | req->pages[req->num_pages] = tmp_page; | ||
1682 | req->page_descs[req->num_pages].offset = 0; | ||
1683 | req->page_descs[req->num_pages].length = PAGE_SIZE; | ||
1684 | |||
1685 | inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK); | ||
1686 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | ||
1687 | end_page_writeback(page); | ||
1688 | |||
1689 | /* | ||
1690 | * Protected by fc->lock against concurrent access by | ||
1691 | * fuse_page_is_writeback(). | ||
1692 | */ | ||
1693 | spin_lock(&fc->lock); | ||
1694 | req->num_pages++; | ||
1695 | spin_unlock(&fc->lock); | ||
1696 | |||
1697 | err = 0; | ||
1698 | out_unlock: | ||
1699 | unlock_page(page); | ||
1700 | |||
1701 | return err; | ||
1702 | } | ||
1703 | |||
1704 | static int fuse_writepages(struct address_space *mapping, | ||
1705 | struct writeback_control *wbc) | ||
1706 | { | ||
1707 | struct inode *inode = mapping->host; | ||
1708 | struct fuse_fill_wb_data data; | ||
1709 | int err; | ||
1710 | |||
1711 | err = -EIO; | ||
1712 | if (is_bad_inode(inode)) | ||
1713 | goto out; | ||
1714 | |||
1715 | data.inode = inode; | ||
1716 | data.req = NULL; | ||
1717 | data.ff = NULL; | ||
1718 | |||
1719 | err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); | ||
1720 | if (data.req) { | ||
1721 | /* Ignore errors if we can write at least one page */ | ||
1722 | BUG_ON(!data.req->num_pages); | ||
1723 | fuse_writepages_send(&data); | ||
1724 | err = 0; | ||
1725 | } | ||
1726 | if (data.ff) | ||
1727 | fuse_file_put(data.ff, false); | ||
1728 | out: | ||
1729 | return err; | ||
1730 | } | ||
1731 | |||
1589 | static int fuse_launder_page(struct page *page) | 1732 | static int fuse_launder_page(struct page *page) |
1590 | { | 1733 | { |
1591 | int err = 0; | 1734 | int err = 0; |
@@ -2607,6 +2750,7 @@ static const struct file_operations fuse_direct_io_file_operations = { | |||
2607 | static const struct address_space_operations fuse_file_aops = { | 2750 | static const struct address_space_operations fuse_file_aops = { |
2608 | .readpage = fuse_readpage, | 2751 | .readpage = fuse_readpage, |
2609 | .writepage = fuse_writepage, | 2752 | .writepage = fuse_writepage, |
2753 | .writepages = fuse_writepages, | ||
2610 | .launder_page = fuse_launder_page, | 2754 | .launder_page = fuse_launder_page, |
2611 | .readpages = fuse_readpages, | 2755 | .readpages = fuse_readpages, |
2612 | .set_page_dirty = __set_page_dirty_nobuffers, | 2756 | .set_page_dirty = __set_page_dirty_nobuffers, |