diff options
-rw-r--r-- | fs/nfs/file.c | 9 | ||||
-rw-r--r-- | fs/nfs/inode.c | 2 | ||||
-rw-r--r-- | fs/nfs/write.c | 93 | ||||
-rw-r--r-- | include/linux/nfs_fs.h | 27 |
4 files changed, 92 insertions, 39 deletions
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index cc93865cea93..d6ee60fc3ba6 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -307,11 +307,14 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse | |||
307 | 307 | ||
308 | static void nfs_invalidate_page(struct page *page, unsigned long offset) | 308 | static void nfs_invalidate_page(struct page *page, unsigned long offset) |
309 | { | 309 | { |
310 | struct inode *inode = page->mapping->host; | 310 | loff_t range_start, range_end; |
311 | 311 | ||
312 | if (offset != 0) | ||
313 | return; | ||
312 | /* Cancel any unstarted writes on this page */ | 314 | /* Cancel any unstarted writes on this page */ |
313 | if (offset == 0) | 315 | range_start = page_offset(page); |
314 | nfs_sync_inode_wait(inode, page->index, 1, FLUSH_INVALIDATE); | 316 | range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); |
317 | nfs_sync_mapping_range(page->mapping, range_start, range_end, FLUSH_INVALIDATE); | ||
315 | } | 318 | } |
316 | 319 | ||
317 | static int nfs_release_page(struct page *page, gfp_t gfp) | 320 | static int nfs_release_page(struct page *page, gfp_t gfp) |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 08cc4c5919ab..7c32187f953e 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -422,7 +422,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) | |||
422 | int err; | 422 | int err; |
423 | 423 | ||
424 | /* Flush out writes to the server in order to update c/mtime */ | 424 | /* Flush out writes to the server in order to update c/mtime */ |
425 | nfs_sync_inode_wait(inode, 0, 0, FLUSH_NOCOMMIT); | 425 | nfs_sync_mapping_range(inode->i_mapping, 0, 0, FLUSH_NOCOMMIT); |
426 | 426 | ||
427 | /* | 427 | /* |
428 | * We may force a getattr if the user cares about atime. | 428 | * We may force a getattr if the user cares about atime. |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index dbc89fa7e9d5..310fdeca6250 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -80,6 +80,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context*, | |||
80 | static int nfs_wait_on_write_congestion(struct address_space *, int); | 80 | static int nfs_wait_on_write_congestion(struct address_space *, int); |
81 | static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); | 81 | static int nfs_wait_on_requests(struct inode *, unsigned long, unsigned int); |
82 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); | 82 | static long nfs_flush_mapping(struct address_space *mapping, struct writeback_control *wbc, int how); |
83 | static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how); | ||
83 | static const struct rpc_call_ops nfs_write_partial_ops; | 84 | static const struct rpc_call_ops nfs_write_partial_ops; |
84 | static const struct rpc_call_ops nfs_write_full_ops; | 85 | static const struct rpc_call_ops nfs_write_full_ops; |
85 | static const struct rpc_call_ops nfs_commit_ops; | 86 | static const struct rpc_call_ops nfs_commit_ops; |
@@ -1476,29 +1477,38 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
1476 | } | 1477 | } |
1477 | #endif | 1478 | #endif |
1478 | 1479 | ||
1479 | long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | 1480 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) |
1480 | unsigned int npages, int how) | ||
1481 | { | 1481 | { |
1482 | struct inode *inode = mapping->host; | ||
1482 | struct nfs_inode *nfsi = NFS_I(inode); | 1483 | struct nfs_inode *nfsi = NFS_I(inode); |
1483 | struct address_space *mapping = inode->i_mapping; | 1484 | unsigned long idx_start, idx_end; |
1484 | struct writeback_control wbc = { | 1485 | unsigned int npages = 0; |
1485 | .bdi = mapping->backing_dev_info, | ||
1486 | .sync_mode = WB_SYNC_ALL, | ||
1487 | .nr_to_write = LONG_MAX, | ||
1488 | .range_start = ((loff_t)idx_start) << PAGE_CACHE_SHIFT, | ||
1489 | .range_end = ((loff_t)(idx_start + npages - 1)) << PAGE_CACHE_SHIFT, | ||
1490 | }; | ||
1491 | LIST_HEAD(head); | 1486 | LIST_HEAD(head); |
1492 | int nocommit = how & FLUSH_NOCOMMIT; | 1487 | int nocommit = how & FLUSH_NOCOMMIT; |
1493 | long pages, ret; | 1488 | long pages, ret; |
1494 | 1489 | ||
1490 | /* FIXME */ | ||
1491 | if (wbc->range_cyclic) | ||
1492 | idx_start = 0; | ||
1493 | else { | ||
1494 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | ||
1495 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | ||
1496 | if (idx_end > idx_start) { | ||
1497 | unsigned long l_npages = 1 + idx_end - idx_start; | ||
1498 | npages = l_npages; | ||
1499 | if (sizeof(npages) != sizeof(l_npages) && | ||
1500 | (unsigned long)npages != l_npages) | ||
1501 | npages = 0; | ||
1502 | } | ||
1503 | } | ||
1495 | how &= ~FLUSH_NOCOMMIT; | 1504 | how &= ~FLUSH_NOCOMMIT; |
1496 | spin_lock(&nfsi->req_lock); | 1505 | spin_lock(&nfsi->req_lock); |
1497 | do { | 1506 | do { |
1507 | wbc->pages_skipped = 0; | ||
1498 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | 1508 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); |
1499 | if (ret != 0) | 1509 | if (ret != 0) |
1500 | continue; | 1510 | continue; |
1501 | pages = nfs_scan_dirty(mapping, &wbc, &head); | 1511 | pages = nfs_scan_dirty(mapping, wbc, &head); |
1502 | if (pages != 0) { | 1512 | if (pages != 0) { |
1503 | spin_unlock(&nfsi->req_lock); | 1513 | spin_unlock(&nfsi->req_lock); |
1504 | if (how & FLUSH_INVALIDATE) { | 1514 | if (how & FLUSH_INVALIDATE) { |
@@ -1509,11 +1519,16 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
1509 | spin_lock(&nfsi->req_lock); | 1519 | spin_lock(&nfsi->req_lock); |
1510 | continue; | 1520 | continue; |
1511 | } | 1521 | } |
1522 | if (wbc->pages_skipped != 0) | ||
1523 | continue; | ||
1512 | if (nocommit) | 1524 | if (nocommit) |
1513 | break; | 1525 | break; |
1514 | pages = nfs_scan_commit(inode, &head, idx_start, npages); | 1526 | pages = nfs_scan_commit(inode, &head, idx_start, npages); |
1515 | if (pages == 0) | 1527 | if (pages == 0) { |
1528 | if (wbc->pages_skipped != 0) | ||
1529 | continue; | ||
1516 | break; | 1530 | break; |
1531 | } | ||
1517 | if (how & FLUSH_INVALIDATE) { | 1532 | if (how & FLUSH_INVALIDATE) { |
1518 | spin_unlock(&nfsi->req_lock); | 1533 | spin_unlock(&nfsi->req_lock); |
1519 | nfs_cancel_commit_list(&head); | 1534 | nfs_cancel_commit_list(&head); |
@@ -1530,6 +1545,60 @@ long nfs_sync_inode_wait(struct inode *inode, unsigned long idx_start, | |||
1530 | return ret; | 1545 | return ret; |
1531 | } | 1546 | } |
1532 | 1547 | ||
1548 | /* | ||
1549 | * flush the inode to disk. | ||
1550 | */ | ||
1551 | int nfs_wb_all(struct inode *inode) | ||
1552 | { | ||
1553 | struct address_space *mapping = inode->i_mapping; | ||
1554 | struct writeback_control wbc = { | ||
1555 | .bdi = mapping->backing_dev_info, | ||
1556 | .sync_mode = WB_SYNC_ALL, | ||
1557 | .nr_to_write = LONG_MAX, | ||
1558 | .range_cyclic = 1, | ||
1559 | }; | ||
1560 | int ret; | ||
1561 | |||
1562 | ret = nfs_sync_mapping_wait(mapping, &wbc, 0); | ||
1563 | if (ret >= 0) | ||
1564 | return 0; | ||
1565 | return ret; | ||
1566 | } | ||
1567 | |||
1568 | int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how) | ||
1569 | { | ||
1570 | struct writeback_control wbc = { | ||
1571 | .bdi = mapping->backing_dev_info, | ||
1572 | .sync_mode = WB_SYNC_ALL, | ||
1573 | .nr_to_write = LONG_MAX, | ||
1574 | .range_start = range_start, | ||
1575 | .range_end = range_end, | ||
1576 | }; | ||
1577 | int ret; | ||
1578 | |||
1579 | ret = nfs_sync_mapping_wait(mapping, &wbc, how); | ||
1580 | if (ret >= 0) | ||
1581 | return 0; | ||
1582 | return ret; | ||
1583 | } | ||
1584 | |||
1585 | static int nfs_wb_page_priority(struct inode *inode, struct page *page, int how) | ||
1586 | { | ||
1587 | loff_t range_start = page_offset(page); | ||
1588 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); | ||
1589 | |||
1590 | return nfs_sync_mapping_range(inode->i_mapping, range_start, range_end, how | FLUSH_STABLE); | ||
1591 | } | ||
1592 | |||
1593 | /* | ||
1594 | * Write back all requests on one page - we do this before reading it. | ||
1595 | */ | ||
1596 | int nfs_wb_page(struct inode *inode, struct page* page) | ||
1597 | { | ||
1598 | return nfs_wb_page_priority(inode, page, 0); | ||
1599 | } | ||
1600 | |||
1601 | |||
1533 | int __init nfs_init_writepagecache(void) | 1602 | int __init nfs_init_writepagecache(void) |
1534 | { | 1603 | { |
1535 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", | 1604 | nfs_wdata_cachep = kmem_cache_create("nfs_write_data", |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index f8190ae9e3fb..f2ec9be1e22f 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -432,7 +432,10 @@ extern void nfs_writedata_release(void *); | |||
432 | * Try to write back everything synchronously (but check the | 432 | * Try to write back everything synchronously (but check the |
433 | * return value!) | 433 | * return value!) |
434 | */ | 434 | */ |
435 | extern long nfs_sync_inode_wait(struct inode *, unsigned long, unsigned int, int); | 435 | extern long nfs_sync_mapping_wait(struct address_space *, struct writeback_control *, int); |
436 | extern int nfs_sync_mapping_range(struct address_space *, loff_t, loff_t, int); | ||
437 | extern int nfs_wb_all(struct inode *inode); | ||
438 | extern int nfs_wb_page(struct inode *inode, struct page* page); | ||
436 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 439 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
437 | extern int nfs_commit_inode(struct inode *, int); | 440 | extern int nfs_commit_inode(struct inode *, int); |
438 | extern struct nfs_write_data *nfs_commit_alloc(void); | 441 | extern struct nfs_write_data *nfs_commit_alloc(void); |
@@ -452,28 +455,6 @@ nfs_have_writebacks(struct inode *inode) | |||
452 | return NFS_I(inode)->npages != 0; | 455 | return NFS_I(inode)->npages != 0; |
453 | } | 456 | } |
454 | 457 | ||
455 | static inline int | ||
456 | nfs_wb_all(struct inode *inode) | ||
457 | { | ||
458 | int error = nfs_sync_inode_wait(inode, 0, 0, 0); | ||
459 | return (error < 0) ? error : 0; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Write back all requests on one page - we do this before reading it. | ||
464 | */ | ||
465 | static inline int nfs_wb_page_priority(struct inode *inode, struct page* page, int how) | ||
466 | { | ||
467 | int error = nfs_sync_inode_wait(inode, page->index, 1, | ||
468 | how | FLUSH_STABLE); | ||
469 | return (error < 0) ? error : 0; | ||
470 | } | ||
471 | |||
472 | static inline int nfs_wb_page(struct inode *inode, struct page* page) | ||
473 | { | ||
474 | return nfs_wb_page_priority(inode, page, 0); | ||
475 | } | ||
476 | |||
477 | /* | 458 | /* |
478 | * Allocate nfs_write_data structures | 459 | * Allocate nfs_write_data structures |
479 | */ | 460 | */ |