aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/write.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/write.c')
-rw-r--r--fs/nfs/write.c250
1 files changed, 77 insertions, 173 deletions
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 7b54b8bb101f..53ff70e23993 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -438,6 +438,7 @@ nfs_mark_request_commit(struct nfs_page *req)
438 radix_tree_tag_set(&nfsi->nfs_page_tree, 438 radix_tree_tag_set(&nfsi->nfs_page_tree,
439 req->wb_index, 439 req->wb_index,
440 NFS_PAGE_TAG_COMMIT); 440 NFS_PAGE_TAG_COMMIT);
441 nfsi->ncommit++;
441 spin_unlock(&inode->i_lock); 442 spin_unlock(&inode->i_lock);
442 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 443 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
443 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); 444 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
@@ -501,57 +502,6 @@ int nfs_reschedule_unstable_write(struct nfs_page *req)
501} 502}
502#endif 503#endif
503 504
504/*
505 * Wait for a request to complete.
506 *
507 * Interruptible by fatal signals only.
508 */
509static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
510{
511 struct nfs_inode *nfsi = NFS_I(inode);
512 struct nfs_page *req;
513 pgoff_t idx_end, next;
514 unsigned int res = 0;
515 int error;
516
517 if (npages == 0)
518 idx_end = ~0;
519 else
520 idx_end = idx_start + npages - 1;
521
522 next = idx_start;
523 while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_LOCKED)) {
524 if (req->wb_index > idx_end)
525 break;
526
527 next = req->wb_index + 1;
528 BUG_ON(!NFS_WBACK_BUSY(req));
529
530 kref_get(&req->wb_kref);
531 spin_unlock(&inode->i_lock);
532 error = nfs_wait_on_request(req);
533 nfs_release_request(req);
534 spin_lock(&inode->i_lock);
535 if (error < 0)
536 return error;
537 res++;
538 }
539 return res;
540}
541
542static void nfs_cancel_commit_list(struct list_head *head)
543{
544 struct nfs_page *req;
545
546 while(!list_empty(head)) {
547 req = nfs_list_entry(head->next);
548 nfs_list_remove_request(req);
549 nfs_clear_request_commit(req);
550 nfs_inode_remove_request(req);
551 nfs_unlock_request(req);
552 }
553}
554
555#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 505#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
556static int 506static int
557nfs_need_commit(struct nfs_inode *nfsi) 507nfs_need_commit(struct nfs_inode *nfsi)
@@ -573,11 +523,17 @@ static int
573nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages) 523nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
574{ 524{
575 struct nfs_inode *nfsi = NFS_I(inode); 525 struct nfs_inode *nfsi = NFS_I(inode);
526 int ret;
576 527
577 if (!nfs_need_commit(nfsi)) 528 if (!nfs_need_commit(nfsi))
578 return 0; 529 return 0;
579 530
580 return nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT); 531 ret = nfs_scan_list(nfsi, dst, idx_start, npages, NFS_PAGE_TAG_COMMIT);
532 if (ret > 0)
533 nfsi->ncommit -= ret;
534 if (nfs_need_commit(NFS_I(inode)))
535 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
536 return ret;
581} 537}
582#else 538#else
583static inline int nfs_need_commit(struct nfs_inode *nfsi) 539static inline int nfs_need_commit(struct nfs_inode *nfsi)
@@ -642,9 +598,10 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
642 spin_lock(&inode->i_lock); 598 spin_lock(&inode->i_lock);
643 } 599 }
644 600
645 if (nfs_clear_request_commit(req)) 601 if (nfs_clear_request_commit(req) &&
646 radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree, 602 radix_tree_tag_clear(&NFS_I(inode)->nfs_page_tree,
647 req->wb_index, NFS_PAGE_TAG_COMMIT); 603 req->wb_index, NFS_PAGE_TAG_COMMIT) != NULL)
604 NFS_I(inode)->ncommit--;
648 605
649 /* Okay, the request matches. Update the region */ 606 /* Okay, the request matches. Update the region */
650 if (offset < req->wb_offset) { 607 if (offset < req->wb_offset) {
@@ -1391,7 +1348,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
1391 .rpc_release = nfs_commit_release, 1348 .rpc_release = nfs_commit_release,
1392}; 1349};
1393 1350
1394int nfs_commit_inode(struct inode *inode, int how) 1351static int nfs_commit_inode(struct inode *inode, int how)
1395{ 1352{
1396 LIST_HEAD(head); 1353 LIST_HEAD(head);
1397 int res; 1354 int res;
@@ -1406,92 +1363,51 @@ int nfs_commit_inode(struct inode *inode, int how)
1406 } 1363 }
1407 return res; 1364 return res;
1408} 1365}
1409#else
1410static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1411{
1412 return 0;
1413}
1414#endif
1415 1366
1416long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how) 1367static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1417{ 1368{
1418 struct inode *inode = mapping->host; 1369 struct nfs_inode *nfsi = NFS_I(inode);
1419 pgoff_t idx_start, idx_end; 1370 int flags = FLUSH_SYNC;
1420 unsigned int npages = 0; 1371 int ret = 0;
1421 LIST_HEAD(head); 1372
1422 int nocommit = how & FLUSH_NOCOMMIT; 1373 /* Don't commit yet if this is a non-blocking flush and there are
1423 long pages, ret; 1374 * lots of outstanding writes for this mapping.
1424 1375 */
1425 /* FIXME */ 1376 if (wbc->sync_mode == WB_SYNC_NONE &&
1426 if (wbc->range_cyclic) 1377 nfsi->ncommit <= (nfsi->npages >> 1))
1427 idx_start = 0; 1378 goto out_mark_dirty;
1428 else { 1379
1429 idx_start = wbc->range_start >> PAGE_CACHE_SHIFT; 1380 if (wbc->nonblocking || wbc->for_background)
1430 idx_end = wbc->range_end >> PAGE_CACHE_SHIFT; 1381 flags = 0;
1431 if (idx_end > idx_start) { 1382 ret = nfs_commit_inode(inode, flags);
1432 pgoff_t l_npages = 1 + idx_end - idx_start; 1383 if (ret >= 0) {
1433 npages = l_npages; 1384 if (wbc->sync_mode == WB_SYNC_NONE) {
1434 if (sizeof(npages) != sizeof(l_npages) && 1385 if (ret < wbc->nr_to_write)
1435 (pgoff_t)npages != l_npages) 1386 wbc->nr_to_write -= ret;
1436 npages = 0; 1387 else
1388 wbc->nr_to_write = 0;
1437 } 1389 }
1390 return 0;
1438 } 1391 }
1439 how &= ~FLUSH_NOCOMMIT; 1392out_mark_dirty:
1440 spin_lock(&inode->i_lock); 1393 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1441 do {
1442 ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1443 if (ret != 0)
1444 continue;
1445 if (nocommit)
1446 break;
1447 pages = nfs_scan_commit(inode, &head, idx_start, npages);
1448 if (pages == 0)
1449 break;
1450 if (how & FLUSH_INVALIDATE) {
1451 spin_unlock(&inode->i_lock);
1452 nfs_cancel_commit_list(&head);
1453 ret = pages;
1454 spin_lock(&inode->i_lock);
1455 continue;
1456 }
1457 pages += nfs_scan_commit(inode, &head, 0, 0);
1458 spin_unlock(&inode->i_lock);
1459 ret = nfs_commit_list(inode, &head, how);
1460 spin_lock(&inode->i_lock);
1461
1462 } while (ret >= 0);
1463 spin_unlock(&inode->i_lock);
1464 return ret; 1394 return ret;
1465} 1395}
1466 1396#else
1467static int __nfs_write_mapping(struct address_space *mapping, struct writeback_control *wbc, int how) 1397static int nfs_commit_inode(struct inode *inode, int how)
1468{ 1398{
1469 int ret;
1470
1471 ret = nfs_writepages(mapping, wbc);
1472 if (ret < 0)
1473 goto out;
1474 ret = nfs_sync_mapping_wait(mapping, wbc, how);
1475 if (ret < 0)
1476 goto out;
1477 return 0; 1399 return 0;
1478out:
1479 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1480 return ret;
1481} 1400}
1482 1401
1483/* Two pass sync: first using WB_SYNC_NONE, then WB_SYNC_ALL */ 1402static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
1484static int nfs_write_mapping(struct address_space *mapping, int how)
1485{ 1403{
1486 struct writeback_control wbc = { 1404 return 0;
1487 .bdi = mapping->backing_dev_info, 1405}
1488 .sync_mode = WB_SYNC_ALL, 1406#endif
1489 .nr_to_write = LONG_MAX,
1490 .range_start = 0,
1491 .range_end = LLONG_MAX,
1492 };
1493 1407
1494 return __nfs_write_mapping(mapping, &wbc, how); 1408int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1409{
1410 return nfs_commit_unstable_pages(inode, wbc);
1495} 1411}
1496 1412
1497/* 1413/*
@@ -1499,37 +1415,26 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
1499 */ 1415 */
1500int nfs_wb_all(struct inode *inode) 1416int nfs_wb_all(struct inode *inode)
1501{ 1417{
1502 return nfs_write_mapping(inode->i_mapping, 0); 1418 struct writeback_control wbc = {
1503} 1419 .sync_mode = WB_SYNC_ALL,
1420 .nr_to_write = LONG_MAX,
1421 .range_start = 0,
1422 .range_end = LLONG_MAX,
1423 };
1504 1424
1505int nfs_wb_nocommit(struct inode *inode) 1425 return sync_inode(inode, &wbc);
1506{
1507 return nfs_write_mapping(inode->i_mapping, FLUSH_NOCOMMIT);
1508} 1426}
1509 1427
1510int nfs_wb_page_cancel(struct inode *inode, struct page *page) 1428int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1511{ 1429{
1512 struct nfs_page *req; 1430 struct nfs_page *req;
1513 loff_t range_start = page_offset(page);
1514 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1515 struct writeback_control wbc = {
1516 .bdi = page->mapping->backing_dev_info,
1517 .sync_mode = WB_SYNC_ALL,
1518 .nr_to_write = LONG_MAX,
1519 .range_start = range_start,
1520 .range_end = range_end,
1521 };
1522 int ret = 0; 1431 int ret = 0;
1523 1432
1524 BUG_ON(!PageLocked(page)); 1433 BUG_ON(!PageLocked(page));
1525 for (;;) { 1434 for (;;) {
1526 req = nfs_page_find_request(page); 1435 req = nfs_page_find_request(page);
1527 if (req == NULL) 1436 if (req == NULL)
1528 goto out;
1529 if (test_bit(PG_CLEAN, &req->wb_flags)) {
1530 nfs_release_request(req);
1531 break; 1437 break;
1532 }
1533 if (nfs_lock_request_dontget(req)) { 1438 if (nfs_lock_request_dontget(req)) {
1534 nfs_inode_remove_request(req); 1439 nfs_inode_remove_request(req);
1535 /* 1440 /*
@@ -1543,54 +1448,54 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1543 ret = nfs_wait_on_request(req); 1448 ret = nfs_wait_on_request(req);
1544 nfs_release_request(req); 1449 nfs_release_request(req);
1545 if (ret < 0) 1450 if (ret < 0)
1546 goto out; 1451 break;
1547 } 1452 }
1548 if (!PagePrivate(page))
1549 return 0;
1550 ret = nfs_sync_mapping_wait(page->mapping, &wbc, FLUSH_INVALIDATE);
1551out:
1552 return ret; 1453 return ret;
1553} 1454}
1554 1455
1555static int nfs_wb_page_priority(struct inode *inode, struct page *page, 1456/*
1556 int how) 1457 * Write back all requests on one page - we do this before reading it.
1458 */
1459int nfs_wb_page(struct inode *inode, struct page *page)
1557{ 1460{
1558 loff_t range_start = page_offset(page); 1461 loff_t range_start = page_offset(page);
1559 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1462 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1560 struct writeback_control wbc = { 1463 struct writeback_control wbc = {
1561 .bdi = page->mapping->backing_dev_info,
1562 .sync_mode = WB_SYNC_ALL, 1464 .sync_mode = WB_SYNC_ALL,
1563 .nr_to_write = LONG_MAX, 1465 .nr_to_write = 0,
1564 .range_start = range_start, 1466 .range_start = range_start,
1565 .range_end = range_end, 1467 .range_end = range_end,
1566 }; 1468 };
1469 struct nfs_page *req;
1470 int need_commit;
1567 int ret; 1471 int ret;
1568 1472
1569 do { 1473 while(PagePrivate(page)) {
1570 if (clear_page_dirty_for_io(page)) { 1474 if (clear_page_dirty_for_io(page)) {
1571 ret = nfs_writepage_locked(page, &wbc); 1475 ret = nfs_writepage_locked(page, &wbc);
1572 if (ret < 0) 1476 if (ret < 0)
1573 goto out_error; 1477 goto out_error;
1574 } else if (!PagePrivate(page)) 1478 }
1479 req = nfs_find_and_lock_request(page);
1480 if (!req)
1575 break; 1481 break;
1576 ret = nfs_sync_mapping_wait(page->mapping, &wbc, how); 1482 if (IS_ERR(req)) {
1577 if (ret < 0) 1483 ret = PTR_ERR(req);
1578 goto out_error; 1484 goto out_error;
1579 } while (PagePrivate(page)); 1485 }
1486 need_commit = test_bit(PG_CLEAN, &req->wb_flags);
1487 nfs_clear_page_tag_locked(req);
1488 if (need_commit) {
1489 ret = nfs_commit_inode(inode, FLUSH_SYNC);
1490 if (ret < 0)
1491 goto out_error;
1492 }
1493 }
1580 return 0; 1494 return 0;
1581out_error: 1495out_error:
1582 __mark_inode_dirty(inode, I_DIRTY_PAGES);
1583 return ret; 1496 return ret;
1584} 1497}
1585 1498
1586/*
1587 * Write back all requests on one page - we do this before reading it.
1588 */
1589int nfs_wb_page(struct inode *inode, struct page* page)
1590{
1591 return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1592}
1593
1594#ifdef CONFIG_MIGRATION 1499#ifdef CONFIG_MIGRATION
1595int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 1500int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1596 struct page *page) 1501 struct page *page)
@@ -1598,8 +1503,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1598 struct nfs_page *req; 1503 struct nfs_page *req;
1599 int ret; 1504 int ret;
1600 1505
1601 if (PageFsCache(page)) 1506 nfs_fscache_release_page(page, GFP_KERNEL);
1602 nfs_fscache_release_page(page, GFP_KERNEL);
1603 1507
1604 req = nfs_find_and_lock_request(page); 1508 req = nfs_find_and_lock_request(page);
1605 ret = PTR_ERR(req); 1509 ret = PTR_ERR(req);