diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2010-02-19 20:03:28 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2010-03-05 15:44:55 -0500 |
commit | 7f2f12d963e7c33a93bfb0b22f0178eb1e6a4196 (patch) | |
tree | cfc2694fd38f40bc22d22c3ef228f871ed827fe2 /fs/nfs/write.c | |
parent | acdc53b2146c7ee67feb1f02f7bc3020126514b8 (diff) |
NFS: Simplify nfs_wb_page()
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r-- | fs/nfs/write.c | 120 |
1 files changed, 23 insertions, 97 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 0b323091b481..53ff70e23993 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -502,44 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req) | |||
502 | } | 502 | } |
503 | #endif | 503 | #endif |
504 | 504 | ||
505 | /* | ||
506 | * Wait for a request to complete. | ||
507 | * | ||
508 | * Interruptible by fatal signals only. | ||
509 | */ | ||
510 | static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages) | ||
511 | { | ||
512 | struct nfs_inode *nfsi = NFS_I(inode); | ||
513 | struct nfs_page *req; | ||
514 | pgoff_t idx_end, next; | ||
515 | unsigned int res = 0; | ||
516 | int error; | ||
517 | |||
518 | if (npages == 0) | ||
519 | idx_end = ~0; | ||
520 | else | ||
521 | idx_end = idx_start + npages - 1; | ||
522 | |||
523 | next = idx_start; | ||
524 | while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) { | ||
525 | if (req->wb_index > idx_end) | ||
526 | break; | ||
527 | |||
528 | next = req->wb_index + 1; | ||
529 | BUG_ON(!NFS_WBACK_BUSY(req)); | ||
530 | |||
531 | kref_get(&req->wb_kref); | ||
532 | spin_unlock(&inode->i_lock); | ||
533 | error = nfs_wait_on_request(req); | ||
534 | nfs_release_request(req); | ||
535 | spin_lock(&inode->i_lock); | ||
536 | if (error < 0) | ||
537 | return error; | ||
538 | res++; | ||
539 | } | ||
540 | return res; | ||
541 | } | ||
542 | |||
543 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) | 505 | #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) |
544 | static int | 506 | static int |
545 | nfs_need_commit(struct nfs_inode *nfsi) | 507 | nfs_need_commit(struct nfs_inode *nfsi) |
@@ -1432,7 +1394,7 @@ out_mark_dirty: | |||
1432 | return ret; | 1394 | return ret; |
1433 | } | 1395 | } |
1434 | #else | 1396 | #else |
1435 | static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how) | 1397 | static int nfs_commit_inode(struct inode *inode, int how) |
1436 | { | 1398 | { |
1437 | return 0; | 1399 | return 0; |
1438 | } | 1400 | } |
@@ -1448,46 +1410,6 @@ int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
1448 | return nfs_commit_unstable_pages(inode, wbc); | 1410 | return nfs_commit_unstable_pages(inode, wbc); |
1449 | } | 1411 | } |
1450 | 1412 | ||
1451 | long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) | ||
1452 | { | ||
1453 | struct inode *inode = mapping->host; | ||
1454 | pgoff_t idx_start, idx_end; | ||
1455 | unsigned int npages = 0; | ||
1456 | LIST_HEAD(head); | ||
1457 | long pages, ret; | ||
1458 | |||
1459 | /* FIXME */ | ||
1460 | if (wbc->range_cyclic) | ||
1461 | idx_start = 0; | ||
1462 | else { | ||
1463 | idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; | ||
1464 | idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; | ||
1465 | if (idx_end > idx_start) { | ||
1466 | pgoff_t l_npages = 1 + idx_end - idx_start; | ||
1467 | npages = l_npages; | ||
1468 | if (sizeof(npages) != sizeof(l_npages) && | ||
1469 | (pgoff_t)npages != l_npages) | ||
1470 | npages = 0; | ||
1471 | } | ||
1472 | } | ||
1473 | spin_lock(&inode->i_lock); | ||
1474 | do { | ||
1475 | ret = nfs_wait_on_requests_locked(inode, idx_start, npages); | ||
1476 | if (ret != 0) | ||
1477 | continue; | ||
1478 | pages = nfs_scan_commit(inode, &head, idx_start, npages); | ||
1479 | if (pages == 0) | ||
1480 | break; | ||
1481 | pages += nfs_scan_commit(inode, &head, 0, 0); | ||
1482 | spin_unlock(&inode->i_lock); | ||
1483 | ret = nfs_commit_list(inode, &head, how); | ||
1484 | spin_lock(&inode->i_lock); | ||
1485 | |||
1486 | } while (ret >= 0); | ||
1487 | spin_unlock(&inode->i_lock); | ||
1488 | return ret; | ||
1489 | } | ||
1490 | |||
1491 | /* | 1413 | /* |
1492 | * flush the inode to disk. | 1414 | * flush the inode to disk. |
1493 | */ | 1415 | */ |
@@ -1531,45 +1453,49 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) | |||
1531 | return ret; | 1453 | return ret; |
1532 | } | 1454 | } |
1533 | 1455 | ||
1534 | static int nfs_wb_page_priority(struct inode *inode, struct page *page, | 1456 | /* |
1535 | int how) | 1457 | * Write back all requests on one page - we do this before reading it. |
1458 | */ | ||
1459 | int nfs_wb_page(struct inode *inode, struct page *page) | ||
1536 | { | 1460 | { |
1537 | loff_t range_start = page_offset(page); | 1461 | loff_t range_start = page_offset(page); |
1538 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); | 1462 | loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); |
1539 | struct writeback_control wbc = { | 1463 | struct writeback_control wbc = { |
1540 | .bdi = page->mapping->backing_dev_info, | ||
1541 | .sync_mode = WB_SYNC_ALL, | 1464 | .sync_mode = WB_SYNC_ALL, |
1542 | .nr_to_write = LONG_MAX, | 1465 | .nr_to_write = 0, |
1543 | .range_start = range_start, | 1466 | .range_start = range_start, |
1544 | .range_end = range_end, | 1467 | .range_end = range_end, |
1545 | }; | 1468 | }; |
1469 | struct nfs_page *req; | ||
1470 | int need_commit; | ||
1546 | int ret; | 1471 | int ret; |
1547 | 1472 | ||
1548 | do { | 1473 | while(PagePrivate(page)) { |
1549 | if (clear_page_dirty_for_io(page)) { | 1474 | if (clear_page_dirty_for_io(page)) { |
1550 | ret = nfs_writepage_locked(page, &wbc); | 1475 | ret = nfs_writepage_locked(page, &wbc); |
1551 | if (ret < 0) | 1476 | if (ret < 0) |
1552 | goto out_error; | 1477 | goto out_error; |
1553 | } else if (!PagePrivate(page)) | 1478 | } |
1479 | req = nfs_find_and_lock_request(page); | ||
1480 | if (!req) | ||
1554 | break; | 1481 | break; |
1555 | ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); | 1482 | if (IS_ERR(req)) { |
1556 | if (ret < 0) | 1483 | ret = PTR_ERR(req); |
1557 | goto out_error; | 1484 | goto out_error; |
1558 | } while (PagePrivate(page)); | 1485 | } |
1486 | need_commit = test_bit(PG_CLEAN, &req->wb_flags); | ||
1487 | nfs_clear_page_tag_locked(req); | ||
1488 | if (need_commit) { | ||
1489 | ret = nfs_commit_inode(inode, FLUSH_SYNC); | ||
1490 | if (ret < 0) | ||
1491 | goto out_error; | ||
1492 | } | ||
1493 | } | ||
1559 | return 0; | 1494 | return 0; |
1560 | out_error: | 1495 | out_error: |
1561 | __mark_inode_dirty(inode, I_DIRTY_PAGES); | ||
1562 | return ret; | 1496 | return ret; |
1563 | } | 1497 | } |
1564 | 1498 | ||
1565 | /* | ||
1566 | * Write back all requests on one page - we do this before reading it. | ||
1567 | */ | ||
1568 | int nfs_wb_page(struct inode *inode, struct page* page) | ||
1569 | { | ||
1570 | return nfs_wb_page_priority(inode, page, FLUSH_STABLE); | ||
1571 | } | ||
1572 | |||
1573 | #ifdef CONFIG_MIGRATION | 1499 | #ifdef CONFIG_MIGRATION |
1574 | int nfs_migrate_page(struct address_space *mapping, struct page *newpage, | 1500 | int nfs_migrate_page(struct address_space *mapping, struct page *newpage, |
1575 | struct page *page) | 1501 | struct page *page) |