aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c144
1 files changed, 88 insertions, 56 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index e99d04d3fe82..e998009c0f52 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -40,6 +40,7 @@
40#include "xfs_rw.h" 40#include "xfs_rw.h"
41#include "xfs_iomap.h" 41#include "xfs_iomap.h"
42#include <linux/mpage.h> 42#include <linux/mpage.h>
43#include <linux/pagevec.h>
43#include <linux/writeback.h> 44#include <linux/writeback.h>
44 45
45STATIC void xfs_count_page_state(struct page *, int *, int *, int *); 46STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
@@ -501,18 +502,13 @@ xfs_map_at_offset(
501 */ 502 */
502STATIC unsigned int 503STATIC unsigned int
503xfs_probe_unmapped_page( 504xfs_probe_unmapped_page(
504 struct address_space *mapping, 505 struct page *page,
505 pgoff_t index,
506 unsigned int pg_offset) 506 unsigned int pg_offset)
507{ 507{
508 struct page *page;
509 int ret = 0; 508 int ret = 0;
510 509
511 page = find_trylock_page(mapping, index);
512 if (!page)
513 return 0;
514 if (PageWriteback(page)) 510 if (PageWriteback(page))
515 goto out; 511 return 0;
516 512
517 if (page->mapping && PageDirty(page)) { 513 if (page->mapping && PageDirty(page)) {
518 if (page_has_buffers(page)) { 514 if (page_has_buffers(page)) {
@@ -530,8 +526,6 @@ xfs_probe_unmapped_page(
530 ret = PAGE_CACHE_SIZE; 526 ret = PAGE_CACHE_SIZE;
531 } 527 }
532 528
533out:
534 unlock_page(page);
535 return ret; 529 return ret;
536} 530}
537 531
@@ -542,59 +536,75 @@ xfs_probe_unmapped_cluster(
542 struct buffer_head *bh, 536 struct buffer_head *bh,
543 struct buffer_head *head) 537 struct buffer_head *head)
544{ 538{
545 size_t len, total = 0; 539 struct pagevec pvec;
546 pgoff_t tindex, tlast, tloff; 540 pgoff_t tindex, tlast, tloff;
547 unsigned int pg_offset; 541 size_t total = 0;
548 struct address_space *mapping = inode->i_mapping; 542 int done = 0, i;
549 543
550 /* First sum forwards in this page */ 544 /* First sum forwards in this page */
551 do { 545 do {
552 if (buffer_mapped(bh)) 546 if (buffer_mapped(bh))
553 break; 547 return total;
554 total += bh->b_size; 548 total += bh->b_size;
555 } while ((bh = bh->b_this_page) != head); 549 } while ((bh = bh->b_this_page) != head);
556 550
557 /* If we reached the end of the page, sum forwards in 551 /* if we reached the end of the page, sum forwards in following pages */
558 * following pages. 552 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
559 */ 553 tindex = startpage->index + 1;
560 if (bh == head) { 554
561 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 555 /* Prune this back to avoid pathological behavior */
562 /* Prune this back to avoid pathological behavior */ 556 tloff = min(tlast, startpage->index + 64);
563 tloff = min(tlast, startpage->index + 64); 557
564 for (tindex = startpage->index + 1; tindex < tloff; tindex++) { 558 pagevec_init(&pvec, 0);
565 len = xfs_probe_unmapped_page(mapping, tindex, 559 while (!done && tindex <= tloff) {
566 PAGE_CACHE_SIZE); 560 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
567 if (!len) 561
568 return total; 562 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
563 break;
564
565 for (i = 0; i < pagevec_count(&pvec); i++) {
566 struct page *page = pvec.pages[i];
567 size_t pg_offset, len = 0;
568
569 if (tindex == tlast) {
570 pg_offset =
571 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
572 if (!pg_offset)
573 break;
574 } else
575 pg_offset = PAGE_CACHE_SIZE;
576
577 if (page->index == tindex && !TestSetPageLocked(page)) {
578 len = xfs_probe_unmapped_page(page, pg_offset);
579 unlock_page(page);
580 }
581
582 if (!len) {
583 done = 1;
584 break;
585 }
586
569 total += len; 587 total += len;
570 } 588 }
571 if (tindex == tlast && 589
572 (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 590 pagevec_release(&pvec);
573 total += xfs_probe_unmapped_page(mapping, 591 cond_resched();
574 tindex, pg_offset);
575 }
576 } 592 }
593
577 return total; 594 return total;
578} 595}
579 596
580/* 597/*
581 * Probe for a given page (index) in the inode and test if it is suitable 598 * Test if a given page is suitable for writing as part of an unwritten
582 * for writing as part of an unwritten or delayed allocate extent. 599 * or delayed allocate extent.
583 * Returns page locked and with an extra reference count if so, else NULL.
584 */ 600 */
585STATIC struct page * 601STATIC int
586xfs_probe_delayed_page( 602xfs_is_delayed_page(
587 struct inode *inode, 603 struct page *page,
588 pgoff_t index,
589 unsigned int type) 604 unsigned int type)
590{ 605{
591 struct page *page;
592
593 page = find_trylock_page(inode->i_mapping, index);
594 if (!page)
595 return NULL;
596 if (PageWriteback(page)) 606 if (PageWriteback(page))
597 goto out; 607 return 0;
598 608
599 if (page->mapping && page_has_buffers(page)) { 609 if (page->mapping && page_has_buffers(page)) {
600 struct buffer_head *bh, *head; 610 struct buffer_head *bh, *head;
@@ -611,12 +621,10 @@ xfs_probe_delayed_page(
611 } while ((bh = bh->b_this_page) != head); 621 } while ((bh = bh->b_this_page) != head);
612 622
613 if (acceptable) 623 if (acceptable)
614 return page; 624 return 1;
615 } 625 }
616 626
617out: 627 return 0;
618 unlock_page(page);
619 return NULL;
620} 628}
621 629
622/* 630/*
@@ -629,10 +637,10 @@ STATIC int
629xfs_convert_page( 637xfs_convert_page(
630 struct inode *inode, 638 struct inode *inode,
631 struct page *page, 639 struct page *page,
640 loff_t tindex,
632 xfs_iomap_t *iomapp, 641 xfs_iomap_t *iomapp,
633 xfs_ioend_t **ioendp, 642 xfs_ioend_t **ioendp,
634 struct writeback_control *wbc, 643 struct writeback_control *wbc,
635 void *private,
636 int startio, 644 int startio,
637 int all_bh) 645 int all_bh)
638{ 646{
@@ -644,6 +652,17 @@ xfs_convert_page(
644 int len, page_dirty; 652 int len, page_dirty;
645 int count = 0, done = 0, uptodate = 1; 653 int count = 0, done = 0, uptodate = 1;
646 654
655 if (page->index != tindex)
656 goto fail;
657 if (TestSetPageLocked(page))
658 goto fail;
659 if (PageWriteback(page))
660 goto fail_unlock_page;
661 if (page->mapping != inode->i_mapping)
662 goto fail_unlock_page;
663 if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
664 goto fail_unlock_page;
665
647 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)); 666 end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
648 667
649 /* 668 /*
@@ -715,6 +734,10 @@ xfs_convert_page(
715 } 734 }
716 735
717 return done; 736 return done;
737 fail_unlock_page:
738 unlock_page(page);
739 fail:
740 return 1;
718} 741}
719 742
720/* 743/*
@@ -732,16 +755,25 @@ xfs_cluster_write(
732 int all_bh, 755 int all_bh,
733 pgoff_t tlast) 756 pgoff_t tlast)
734{ 757{
735 struct page *page; 758 struct pagevec pvec;
736 unsigned int type = (*ioendp)->io_type; 759 int done = 0, i;
737 int done;
738 760
739 for (done = 0; tindex <= tlast && !done; tindex++) { 761 pagevec_init(&pvec, 0);
740 page = xfs_probe_delayed_page(inode, tindex, type); 762 while (!done && tindex <= tlast) {
741 if (!page) 763 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
764
765 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
742 break; 766 break;
743 done = xfs_convert_page(inode, page, iomapp, ioendp, 767
744 wbc, NULL, startio, all_bh); 768 for (i = 0; i < pagevec_count(&pvec); i++) {
769 done = xfs_convert_page(inode, pvec.pages[i], tindex++,
770 iomapp, ioendp, wbc, startio, all_bh);
771 if (done)
772 break;
773 }
774
775 pagevec_release(&pvec);
776 cond_resched();
745 } 777 }
746} 778}
747 779