diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-13 01:27:00 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-13 01:27:00 -0500 |
commit | a7fa20a594fadf1e37cb3469c880ce6a544d3c3b (patch) | |
tree | 4be4e98648ff9afa398be947ca8fa716d6701686 | |
parent | a30124539b2641c5b3551193af7d21a6fc61ba98 (diff) | |
parent | ce128de6260f86a990ed44a697f26d0859684f28 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse
Pull fuse updates from Miklos Szeredi:
"This adds a ->writepage() implementation to fuse, improving mmaped
writeout and paving the way for buffered writeback.
And there's a patch to add a fix minor number for /dev/cuse, similarly
to /dev/fuse"
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse:
fuse: writepages: protect secondary requests from fuse file release
fuse: writepages: update bdi writeout when deleting secondary request
fuse: writepages: crop secondary requests
fuse: writepages: roll back changes if request not found
cuse: add fix minor number to /dev/cuse
fuse: writepage: skip already in flight
fuse: writepages: handle same page rewrites
fuse: writepages: fix aggregation
fuse: fix race in fuse_writepages()
fuse: Implement writepages callback
fuse: don't BUG on no write file
fuse: lock page in mkwrite
fuse: Prepare to handle multiple pages in writeback
fuse: Getting file for writeback helper
-rw-r--r-- | Documentation/devices.txt | 1 | ||||
-rw-r--r-- | fs/fuse/cuse.c | 5 | ||||
-rw-r--r-- | fs/fuse/file.c | 361 | ||||
-rw-r--r-- | fs/fuse/fuse_i.h | 1 | ||||
-rw-r--r-- | include/linux/miscdevice.h | 1 |
5 files changed, 342 insertions, 27 deletions
diff --git a/Documentation/devices.txt b/Documentation/devices.txt index 23721d3be3e6..80b72419ffd8 100644 --- a/Documentation/devices.txt +++ b/Documentation/devices.txt | |||
@@ -414,6 +414,7 @@ Your cooperation is appreciated. | |||
414 | 200 = /dev/net/tun TAP/TUN network device | 414 | 200 = /dev/net/tun TAP/TUN network device |
415 | 201 = /dev/button/gulpb Transmeta GULP-B buttons | 415 | 201 = /dev/button/gulpb Transmeta GULP-B buttons |
416 | 202 = /dev/emd/ctl Enhanced Metadisk RAID (EMD) control | 416 | 202 = /dev/emd/ctl Enhanced Metadisk RAID (EMD) control |
417 | 203 = /dev/cuse Cuse (character device in user-space) | ||
417 | 204 = /dev/video/em8300 EM8300 DVD decoder control | 418 | 204 = /dev/video/em8300 EM8300 DVD decoder control |
418 | 205 = /dev/video/em8300_mv EM8300 DVD decoder video | 419 | 205 = /dev/video/em8300_mv EM8300 DVD decoder video |
419 | 206 = /dev/video/em8300_ma EM8300 DVD decoder audio | 420 | 206 = /dev/video/em8300_ma EM8300 DVD decoder audio |
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index adbfd66b380f..24da581cb52b 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c | |||
@@ -589,11 +589,14 @@ static struct attribute *cuse_class_dev_attrs[] = { | |||
589 | ATTRIBUTE_GROUPS(cuse_class_dev); | 589 | ATTRIBUTE_GROUPS(cuse_class_dev); |
590 | 590 | ||
591 | static struct miscdevice cuse_miscdev = { | 591 | static struct miscdevice cuse_miscdev = { |
592 | .minor = MISC_DYNAMIC_MINOR, | 592 | .minor = CUSE_MINOR, |
593 | .name = "cuse", | 593 | .name = "cuse", |
594 | .fops = &cuse_channel_fops, | 594 | .fops = &cuse_channel_fops, |
595 | }; | 595 | }; |
596 | 596 | ||
597 | MODULE_ALIAS_MISCDEV(CUSE_MINOR); | ||
598 | MODULE_ALIAS("devname:cuse"); | ||
599 | |||
597 | static int __init cuse_init(void) | 600 | static int __init cuse_init(void) |
598 | { | 601 | { |
599 | int i, rc; | 602 | int i, rc; |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 4598345ab87d..7e70506297bc 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -334,7 +334,8 @@ static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) | |||
334 | 334 | ||
335 | BUG_ON(req->inode != inode); | 335 | BUG_ON(req->inode != inode); |
336 | curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | 336 | curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; |
337 | if (curr_index == index) { | 337 | if (curr_index <= index && |
338 | index < curr_index + req->num_pages) { | ||
338 | found = true; | 339 | found = true; |
339 | break; | 340 | break; |
340 | } | 341 | } |
@@ -1409,8 +1410,13 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, | |||
1409 | 1410 | ||
1410 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) | 1411 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
1411 | { | 1412 | { |
1412 | __free_page(req->pages[0]); | 1413 | int i; |
1413 | fuse_file_put(req->ff, false); | 1414 | |
1415 | for (i = 0; i < req->num_pages; i++) | ||
1416 | __free_page(req->pages[i]); | ||
1417 | |||
1418 | if (req->ff) | ||
1419 | fuse_file_put(req->ff, false); | ||
1414 | } | 1420 | } |
1415 | 1421 | ||
1416 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | 1422 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) |
@@ -1418,30 +1424,34 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | |||
1418 | struct inode *inode = req->inode; | 1424 | struct inode *inode = req->inode; |
1419 | struct fuse_inode *fi = get_fuse_inode(inode); | 1425 | struct fuse_inode *fi = get_fuse_inode(inode); |
1420 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; | 1426 | struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info; |
1427 | int i; | ||
1421 | 1428 | ||
1422 | list_del(&req->writepages_entry); | 1429 | list_del(&req->writepages_entry); |
1423 | dec_bdi_stat(bdi, BDI_WRITEBACK); | 1430 | for (i = 0; i < req->num_pages; i++) { |
1424 | dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP); | 1431 | dec_bdi_stat(bdi, BDI_WRITEBACK); |
1425 | bdi_writeout_inc(bdi); | 1432 | dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); |
1433 | bdi_writeout_inc(bdi); | ||
1434 | } | ||
1426 | wake_up(&fi->page_waitq); | 1435 | wake_up(&fi->page_waitq); |
1427 | } | 1436 | } |
1428 | 1437 | ||
1429 | /* Called under fc->lock, may release and reacquire it */ | 1438 | /* Called under fc->lock, may release and reacquire it */ |
1430 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req) | 1439 | static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req, |
1440 | loff_t size) | ||
1431 | __releases(fc->lock) | 1441 | __releases(fc->lock) |
1432 | __acquires(fc->lock) | 1442 | __acquires(fc->lock) |
1433 | { | 1443 | { |
1434 | struct fuse_inode *fi = get_fuse_inode(req->inode); | 1444 | struct fuse_inode *fi = get_fuse_inode(req->inode); |
1435 | loff_t size = i_size_read(req->inode); | ||
1436 | struct fuse_write_in *inarg = &req->misc.write.in; | 1445 | struct fuse_write_in *inarg = &req->misc.write.in; |
1446 | __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; | ||
1437 | 1447 | ||
1438 | if (!fc->connected) | 1448 | if (!fc->connected) |
1439 | goto out_free; | 1449 | goto out_free; |
1440 | 1450 | ||
1441 | if (inarg->offset + PAGE_CACHE_SIZE <= size) { | 1451 | if (inarg->offset + data_size <= size) { |
1442 | inarg->size = PAGE_CACHE_SIZE; | 1452 | inarg->size = data_size; |
1443 | } else if (inarg->offset < size) { | 1453 | } else if (inarg->offset < size) { |
1444 | inarg->size = size & (PAGE_CACHE_SIZE - 1); | 1454 | inarg->size = size - inarg->offset; |
1445 | } else { | 1455 | } else { |
1446 | /* Got truncated off completely */ | 1456 | /* Got truncated off completely */ |
1447 | goto out_free; | 1457 | goto out_free; |
@@ -1472,12 +1482,13 @@ __acquires(fc->lock) | |||
1472 | { | 1482 | { |
1473 | struct fuse_conn *fc = get_fuse_conn(inode); | 1483 | struct fuse_conn *fc = get_fuse_conn(inode); |
1474 | struct fuse_inode *fi = get_fuse_inode(inode); | 1484 | struct fuse_inode *fi = get_fuse_inode(inode); |
1485 | size_t crop = i_size_read(inode); | ||
1475 | struct fuse_req *req; | 1486 | struct fuse_req *req; |
1476 | 1487 | ||
1477 | while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { | 1488 | while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { |
1478 | req = list_entry(fi->queued_writes.next, struct fuse_req, list); | 1489 | req = list_entry(fi->queued_writes.next, struct fuse_req, list); |
1479 | list_del_init(&req->list); | 1490 | list_del_init(&req->list); |
1480 | fuse_send_writepage(fc, req); | 1491 | fuse_send_writepage(fc, req, crop); |
1481 | } | 1492 | } |
1482 | } | 1493 | } |
1483 | 1494 | ||
@@ -1488,12 +1499,62 @@ static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) | |||
1488 | 1499 | ||
1489 | mapping_set_error(inode->i_mapping, req->out.h.error); | 1500 | mapping_set_error(inode->i_mapping, req->out.h.error); |
1490 | spin_lock(&fc->lock); | 1501 | spin_lock(&fc->lock); |
1502 | while (req->misc.write.next) { | ||
1503 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
1504 | struct fuse_write_in *inarg = &req->misc.write.in; | ||
1505 | struct fuse_req *next = req->misc.write.next; | ||
1506 | req->misc.write.next = next->misc.write.next; | ||
1507 | next->misc.write.next = NULL; | ||
1508 | next->ff = fuse_file_get(req->ff); | ||
1509 | list_add(&next->writepages_entry, &fi->writepages); | ||
1510 | |||
1511 | /* | ||
1512 | * Skip fuse_flush_writepages() to make it easy to crop requests | ||
1513 | * based on primary request size. | ||
1514 | * | ||
1515 | * 1st case (trivial): there are no concurrent activities using | ||
1516 | * fuse_set/release_nowrite. Then we're on safe side because | ||
1517 | * fuse_flush_writepages() would call fuse_send_writepage() | ||
1518 | * anyway. | ||
1519 | * | ||
1520 | * 2nd case: someone called fuse_set_nowrite and it is waiting | ||
1521 | * now for completion of all in-flight requests. This happens | ||
1522 | * rarely and no more than once per page, so this should be | ||
1523 | * okay. | ||
1524 | * | ||
1525 | * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle | ||
1526 | * of fuse_set_nowrite..fuse_release_nowrite section. The fact | ||
1527 | * that fuse_set_nowrite returned implies that all in-flight | ||
1528 | * requests were completed along with all of their secondary | ||
1529 | * requests. Further primary requests are blocked by negative | ||
1530 | * writectr. Hence there cannot be any in-flight requests and | ||
1531 | * no invocations of fuse_writepage_end() while we're in | ||
1532 | * fuse_set_nowrite..fuse_release_nowrite section. | ||
1533 | */ | ||
1534 | fuse_send_writepage(fc, next, inarg->offset + inarg->size); | ||
1535 | } | ||
1491 | fi->writectr--; | 1536 | fi->writectr--; |
1492 | fuse_writepage_finish(fc, req); | 1537 | fuse_writepage_finish(fc, req); |
1493 | spin_unlock(&fc->lock); | 1538 | spin_unlock(&fc->lock); |
1494 | fuse_writepage_free(fc, req); | 1539 | fuse_writepage_free(fc, req); |
1495 | } | 1540 | } |
1496 | 1541 | ||
1542 | static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc, | ||
1543 | struct fuse_inode *fi) | ||
1544 | { | ||
1545 | struct fuse_file *ff = NULL; | ||
1546 | |||
1547 | spin_lock(&fc->lock); | ||
1548 | if (!WARN_ON(list_empty(&fi->write_files))) { | ||
1549 | ff = list_entry(fi->write_files.next, struct fuse_file, | ||
1550 | write_entry); | ||
1551 | fuse_file_get(ff); | ||
1552 | } | ||
1553 | spin_unlock(&fc->lock); | ||
1554 | |||
1555 | return ff; | ||
1556 | } | ||
1557 | |||
1497 | static int fuse_writepage_locked(struct page *page) | 1558 | static int fuse_writepage_locked(struct page *page) |
1498 | { | 1559 | { |
1499 | struct address_space *mapping = page->mapping; | 1560 | struct address_space *mapping = page->mapping; |
@@ -1501,8 +1562,8 @@ static int fuse_writepage_locked(struct page *page) | |||
1501 | struct fuse_conn *fc = get_fuse_conn(inode); | 1562 | struct fuse_conn *fc = get_fuse_conn(inode); |
1502 | struct fuse_inode *fi = get_fuse_inode(inode); | 1563 | struct fuse_inode *fi = get_fuse_inode(inode); |
1503 | struct fuse_req *req; | 1564 | struct fuse_req *req; |
1504 | struct fuse_file *ff; | ||
1505 | struct page *tmp_page; | 1565 | struct page *tmp_page; |
1566 | int error = -ENOMEM; | ||
1506 | 1567 | ||
1507 | set_page_writeback(page); | 1568 | set_page_writeback(page); |
1508 | 1569 | ||
@@ -1515,16 +1576,16 @@ static int fuse_writepage_locked(struct page *page) | |||
1515 | if (!tmp_page) | 1576 | if (!tmp_page) |
1516 | goto err_free; | 1577 | goto err_free; |
1517 | 1578 | ||
1518 | spin_lock(&fc->lock); | 1579 | error = -EIO; |
1519 | BUG_ON(list_empty(&fi->write_files)); | 1580 | req->ff = fuse_write_file_get(fc, fi); |
1520 | ff = list_entry(fi->write_files.next, struct fuse_file, write_entry); | 1581 | if (!req->ff) |
1521 | req->ff = fuse_file_get(ff); | 1582 | goto err_free; |
1522 | spin_unlock(&fc->lock); | ||
1523 | 1583 | ||
1524 | fuse_write_fill(req, ff, page_offset(page), 0); | 1584 | fuse_write_fill(req, req->ff, page_offset(page), 0); |
1525 | 1585 | ||
1526 | copy_highpage(tmp_page, page); | 1586 | copy_highpage(tmp_page, page); |
1527 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; | 1587 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; |
1588 | req->misc.write.next = NULL; | ||
1528 | req->in.argpages = 1; | 1589 | req->in.argpages = 1; |
1529 | req->num_pages = 1; | 1590 | req->num_pages = 1; |
1530 | req->pages[0] = tmp_page; | 1591 | req->pages[0] = tmp_page; |
@@ -1550,19 +1611,263 @@ err_free: | |||
1550 | fuse_request_free(req); | 1611 | fuse_request_free(req); |
1551 | err: | 1612 | err: |
1552 | end_page_writeback(page); | 1613 | end_page_writeback(page); |
1553 | return -ENOMEM; | 1614 | return error; |
1554 | } | 1615 | } |
1555 | 1616 | ||
1556 | static int fuse_writepage(struct page *page, struct writeback_control *wbc) | 1617 | static int fuse_writepage(struct page *page, struct writeback_control *wbc) |
1557 | { | 1618 | { |
1558 | int err; | 1619 | int err; |
1559 | 1620 | ||
1621 | if (fuse_page_is_writeback(page->mapping->host, page->index)) { | ||
1622 | /* | ||
1623 | * ->writepages() should be called for sync() and friends. We | ||
1624 | * should only get here on direct reclaim and then we are | ||
1625 | * allowed to skip a page which is already in flight | ||
1626 | */ | ||
1627 | WARN_ON(wbc->sync_mode == WB_SYNC_ALL); | ||
1628 | |||
1629 | redirty_page_for_writepage(wbc, page); | ||
1630 | return 0; | ||
1631 | } | ||
1632 | |||
1560 | err = fuse_writepage_locked(page); | 1633 | err = fuse_writepage_locked(page); |
1561 | unlock_page(page); | 1634 | unlock_page(page); |
1562 | 1635 | ||
1563 | return err; | 1636 | return err; |
1564 | } | 1637 | } |
1565 | 1638 | ||
1639 | struct fuse_fill_wb_data { | ||
1640 | struct fuse_req *req; | ||
1641 | struct fuse_file *ff; | ||
1642 | struct inode *inode; | ||
1643 | struct page **orig_pages; | ||
1644 | }; | ||
1645 | |||
1646 | static void fuse_writepages_send(struct fuse_fill_wb_data *data) | ||
1647 | { | ||
1648 | struct fuse_req *req = data->req; | ||
1649 | struct inode *inode = data->inode; | ||
1650 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
1651 | struct fuse_inode *fi = get_fuse_inode(inode); | ||
1652 | int num_pages = req->num_pages; | ||
1653 | int i; | ||
1654 | |||
1655 | req->ff = fuse_file_get(data->ff); | ||
1656 | spin_lock(&fc->lock); | ||
1657 | list_add_tail(&req->list, &fi->queued_writes); | ||
1658 | fuse_flush_writepages(inode); | ||
1659 | spin_unlock(&fc->lock); | ||
1660 | |||
1661 | for (i = 0; i < num_pages; i++) | ||
1662 | end_page_writeback(data->orig_pages[i]); | ||
1663 | } | ||
1664 | |||
1665 | static bool fuse_writepage_in_flight(struct fuse_req *new_req, | ||
1666 | struct page *page) | ||
1667 | { | ||
1668 | struct fuse_conn *fc = get_fuse_conn(new_req->inode); | ||
1669 | struct fuse_inode *fi = get_fuse_inode(new_req->inode); | ||
1670 | struct fuse_req *tmp; | ||
1671 | struct fuse_req *old_req; | ||
1672 | bool found = false; | ||
1673 | pgoff_t curr_index; | ||
1674 | |||
1675 | BUG_ON(new_req->num_pages != 0); | ||
1676 | |||
1677 | spin_lock(&fc->lock); | ||
1678 | list_del(&new_req->writepages_entry); | ||
1679 | list_for_each_entry(old_req, &fi->writepages, writepages_entry) { | ||
1680 | BUG_ON(old_req->inode != new_req->inode); | ||
1681 | curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; | ||
1682 | if (curr_index <= page->index && | ||
1683 | page->index < curr_index + old_req->num_pages) { | ||
1684 | found = true; | ||
1685 | break; | ||
1686 | } | ||
1687 | } | ||
1688 | if (!found) { | ||
1689 | list_add(&new_req->writepages_entry, &fi->writepages); | ||
1690 | goto out_unlock; | ||
1691 | } | ||
1692 | |||
1693 | new_req->num_pages = 1; | ||
1694 | for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { | ||
1695 | BUG_ON(tmp->inode != new_req->inode); | ||
1696 | curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; | ||
1697 | if (tmp->num_pages == 1 && | ||
1698 | curr_index == page->index) { | ||
1699 | old_req = tmp; | ||
1700 | } | ||
1701 | } | ||
1702 | |||
1703 | if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || | ||
1704 | old_req->state == FUSE_REQ_PENDING)) { | ||
1705 | struct backing_dev_info *bdi = page->mapping->backing_dev_info; | ||
1706 | |||
1707 | copy_highpage(old_req->pages[0], page); | ||
1708 | spin_unlock(&fc->lock); | ||
1709 | |||
1710 | dec_bdi_stat(bdi, BDI_WRITEBACK); | ||
1711 | dec_zone_page_state(page, NR_WRITEBACK_TEMP); | ||
1712 | bdi_writeout_inc(bdi); | ||
1713 | fuse_writepage_free(fc, new_req); | ||
1714 | fuse_request_free(new_req); | ||
1715 | goto out; | ||
1716 | } else { | ||
1717 | new_req->misc.write.next = old_req->misc.write.next; | ||
1718 | old_req->misc.write.next = new_req; | ||
1719 | } | ||
1720 | out_unlock: | ||
1721 | spin_unlock(&fc->lock); | ||
1722 | out: | ||
1723 | return found; | ||
1724 | } | ||
1725 | |||
1726 | static int fuse_writepages_fill(struct page *page, | ||
1727 | struct writeback_control *wbc, void *_data) | ||
1728 | { | ||
1729 | struct fuse_fill_wb_data *data = _data; | ||
1730 | struct fuse_req *req = data->req; | ||
1731 | struct inode *inode = data->inode; | ||
1732 | struct fuse_conn *fc = get_fuse_conn(inode); | ||
1733 | struct page *tmp_page; | ||
1734 | bool is_writeback; | ||
1735 | int err; | ||
1736 | |||
1737 | if (!data->ff) { | ||
1738 | err = -EIO; | ||
1739 | data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); | ||
1740 | if (!data->ff) | ||
1741 | goto out_unlock; | ||
1742 | } | ||
1743 | |||
1744 | /* | ||
1745 | * Being under writeback is unlikely but possible. For example direct | ||
1746 | * read to an mmaped fuse file will set the page dirty twice; once when | ||
1747 | * the pages are faulted with get_user_pages(), and then after the read | ||
1748 | * completed. | ||
1749 | */ | ||
1750 | is_writeback = fuse_page_is_writeback(inode, page->index); | ||
1751 | |||
1752 | if (req && req->num_pages && | ||
1753 | (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || | ||
1754 | (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || | ||
1755 | data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { | ||
1756 | fuse_writepages_send(data); | ||
1757 | data->req = NULL; | ||
1758 | } | ||
1759 | err = -ENOMEM; | ||
1760 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | ||
1761 | if (!tmp_page) | ||
1762 | goto out_unlock; | ||
1763 | |||
1764 | /* | ||
1765 | * The page must not be redirtied until the writeout is completed | ||
1766 | * (i.e. userspace has sent a reply to the write request). Otherwise | ||
1767 | * there could be more than one temporary page instance for each real | ||
1768 | * page. | ||
1769 | * | ||
1770 | * This is ensured by holding the page lock in page_mkwrite() while | ||
1771 | * checking fuse_page_is_writeback(). We already hold the page lock | ||
1772 | * since clear_page_dirty_for_io() and keep it held until we add the | ||
1773 | * request to the fi->writepages list and increment req->num_pages. | ||
1774 | * After this fuse_page_is_writeback() will indicate that the page is | ||
1775 | * under writeback, so we can release the page lock. | ||
1776 | */ | ||
1777 | if (data->req == NULL) { | ||
1778 | struct fuse_inode *fi = get_fuse_inode(inode); | ||
1779 | |||
1780 | err = -ENOMEM; | ||
1781 | req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); | ||
1782 | if (!req) { | ||
1783 | __free_page(tmp_page); | ||
1784 | goto out_unlock; | ||
1785 | } | ||
1786 | |||
1787 | fuse_write_fill(req, data->ff, page_offset(page), 0); | ||
1788 | req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; | ||
1789 | req->misc.write.next = NULL; | ||
1790 | req->in.argpages = 1; | ||
1791 | req->background = 1; | ||
1792 | req->num_pages = 0; | ||
1793 | req->end = fuse_writepage_end; | ||
1794 | req->inode = inode; | ||
1795 | |||
1796 | spin_lock(&fc->lock); | ||
1797 | list_add(&req->writepages_entry, &fi->writepages); | ||
1798 | spin_unlock(&fc->lock); | ||
1799 | |||
1800 | data->req = req; | ||
1801 | } | ||
1802 | set_page_writeback(page); | ||
1803 | |||
1804 | copy_highpage(tmp_page, page); | ||
1805 | req->pages[req->num_pages] = tmp_page; | ||
1806 | req->page_descs[req->num_pages].offset = 0; | ||
1807 | req->page_descs[req->num_pages].length = PAGE_SIZE; | ||
1808 | |||
1809 | inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK); | ||
1810 | inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); | ||
1811 | |||
1812 | err = 0; | ||
1813 | if (is_writeback && fuse_writepage_in_flight(req, page)) { | ||
1814 | end_page_writeback(page); | ||
1815 | data->req = NULL; | ||
1816 | goto out_unlock; | ||
1817 | } | ||
1818 | data->orig_pages[req->num_pages] = page; | ||
1819 | |||
1820 | /* | ||
1821 | * Protected by fc->lock against concurrent access by | ||
1822 | * fuse_page_is_writeback(). | ||
1823 | */ | ||
1824 | spin_lock(&fc->lock); | ||
1825 | req->num_pages++; | ||
1826 | spin_unlock(&fc->lock); | ||
1827 | |||
1828 | out_unlock: | ||
1829 | unlock_page(page); | ||
1830 | |||
1831 | return err; | ||
1832 | } | ||
1833 | |||
1834 | static int fuse_writepages(struct address_space *mapping, | ||
1835 | struct writeback_control *wbc) | ||
1836 | { | ||
1837 | struct inode *inode = mapping->host; | ||
1838 | struct fuse_fill_wb_data data; | ||
1839 | int err; | ||
1840 | |||
1841 | err = -EIO; | ||
1842 | if (is_bad_inode(inode)) | ||
1843 | goto out; | ||
1844 | |||
1845 | data.inode = inode; | ||
1846 | data.req = NULL; | ||
1847 | data.ff = NULL; | ||
1848 | |||
1849 | err = -ENOMEM; | ||
1850 | data.orig_pages = kzalloc(sizeof(struct page *) * | ||
1851 | FUSE_MAX_PAGES_PER_REQ, | ||
1852 | GFP_NOFS); | ||
1853 | if (!data.orig_pages) | ||
1854 | goto out; | ||
1855 | |||
1856 | err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); | ||
1857 | if (data.req) { | ||
1858 | /* Ignore errors if we can write at least one page */ | ||
1859 | BUG_ON(!data.req->num_pages); | ||
1860 | fuse_writepages_send(&data); | ||
1861 | err = 0; | ||
1862 | } | ||
1863 | if (data.ff) | ||
1864 | fuse_file_put(data.ff, false); | ||
1865 | |||
1866 | kfree(data.orig_pages); | ||
1867 | out: | ||
1868 | return err; | ||
1869 | } | ||
1870 | |||
1566 | static int fuse_launder_page(struct page *page) | 1871 | static int fuse_launder_page(struct page *page) |
1567 | { | 1872 | { |
1568 | int err = 0; | 1873 | int err = 0; |
@@ -1602,14 +1907,17 @@ static void fuse_vma_close(struct vm_area_struct *vma) | |||
1602 | static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | 1907 | static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) |
1603 | { | 1908 | { |
1604 | struct page *page = vmf->page; | 1909 | struct page *page = vmf->page; |
1605 | /* | 1910 | struct inode *inode = file_inode(vma->vm_file); |
1606 | * Don't use page->mapping as it may become NULL from a | 1911 | |
1607 | * concurrent truncate. | 1912 | file_update_time(vma->vm_file); |
1608 | */ | 1913 | lock_page(page); |
1609 | struct inode *inode = vma->vm_file->f_mapping->host; | 1914 | if (page->mapping != inode->i_mapping) { |
1915 | unlock_page(page); | ||
1916 | return VM_FAULT_NOPAGE; | ||
1917 | } | ||
1610 | 1918 | ||
1611 | fuse_wait_on_page_writeback(inode, page->index); | 1919 | fuse_wait_on_page_writeback(inode, page->index); |
1612 | return 0; | 1920 | return VM_FAULT_LOCKED; |
1613 | } | 1921 | } |
1614 | 1922 | ||
1615 | static const struct vm_operations_struct fuse_file_vm_ops = { | 1923 | static const struct vm_operations_struct fuse_file_vm_ops = { |
@@ -2581,6 +2889,7 @@ static const struct file_operations fuse_direct_io_file_operations = { | |||
2581 | static const struct address_space_operations fuse_file_aops = { | 2889 | static const struct address_space_operations fuse_file_aops = { |
2582 | .readpage = fuse_readpage, | 2890 | .readpage = fuse_readpage, |
2583 | .writepage = fuse_writepage, | 2891 | .writepage = fuse_writepage, |
2892 | .writepages = fuse_writepages, | ||
2584 | .launder_page = fuse_launder_page, | 2893 | .launder_page = fuse_launder_page, |
2585 | .readpages = fuse_readpages, | 2894 | .readpages = fuse_readpages, |
2586 | .set_page_dirty = __set_page_dirty_nobuffers, | 2895 | .set_page_dirty = __set_page_dirty_nobuffers, |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 5b9e6f3b6aef..643274852c8b 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -321,6 +321,7 @@ struct fuse_req { | |||
321 | struct { | 321 | struct { |
322 | struct fuse_write_in in; | 322 | struct fuse_write_in in; |
323 | struct fuse_write_out out; | 323 | struct fuse_write_out out; |
324 | struct fuse_req *next; | ||
324 | } write; | 325 | } write; |
325 | struct fuse_notify_retrieve_in retrieve_in; | 326 | struct fuse_notify_retrieve_in retrieve_in; |
326 | struct fuse_lk_in lk_in; | 327 | struct fuse_lk_in lk_in; |
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index cb358355ef43..f7eaf2d60083 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h | |||
@@ -31,6 +31,7 @@ | |||
31 | #define I2O_MINOR 166 | 31 | #define I2O_MINOR 166 |
32 | #define MICROCODE_MINOR 184 | 32 | #define MICROCODE_MINOR 184 |
33 | #define TUN_MINOR 200 | 33 | #define TUN_MINOR 200 |
34 | #define CUSE_MINOR 203 | ||
34 | #define MWAVE_MINOR 219 /* ACP/Mwave Modem */ | 35 | #define MWAVE_MINOR 219 /* ACP/Mwave Modem */ |
35 | #define MPT_MINOR 220 | 36 | #define MPT_MINOR 220 |
36 | #define MPT2SAS_MINOR 221 | 37 | #define MPT2SAS_MINOR 221 |