aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@sgi.com>2006-01-11 04:49:28 -0500
committerNathan Scott <nathans@sgi.com>2006-01-11 04:49:28 -0500
commit6c4fe19f66a839bce68fcb7b99cdcb0f31c7a59e (patch)
treec3f1bd213af0ccc5dfed6ce68f83f126a03fa48f /fs/xfs
parent7336cea8c2737bbaf0296d67782f760828301d56 (diff)
[XFS] cluster rewrites We can cluster mapped pages aswell, this improves
performances on rewrites since we can reduce the number of allocator calls. SGI-PV: 947118 SGI-Modid: xfs-linux-melb:xfs-kern:203829a Signed-off-by: Christoph Hellwig <hch@sgi.com> Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c63
1 files changed, 47 insertions, 16 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 562867316639..9ea33ea6a225 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -470,13 +470,13 @@ xfs_map_at_offset(
470} 470}
471 471
472/* 472/*
473 * Look for a page at index which is unlocked and not mapped 473 * Look for a page at index that is suitable for clustering.
474 * yet - clustering for mmap write case.
475 */ 474 */
476STATIC unsigned int 475STATIC unsigned int
477xfs_probe_unmapped_page( 476xfs_probe_page(
478 struct page *page, 477 struct page *page,
479 unsigned int pg_offset) 478 unsigned int pg_offset,
479 int mapped)
480{ 480{
481 int ret = 0; 481 int ret = 0;
482 482
@@ -489,25 +489,28 @@ xfs_probe_unmapped_page(
489 489
490 bh = head = page_buffers(page); 490 bh = head = page_buffers(page);
491 do { 491 do {
492 if (buffer_mapped(bh) || !buffer_uptodate(bh)) 492 if (!buffer_uptodate(bh))
493 break;
494 if (mapped != buffer_mapped(bh))
493 break; 495 break;
494 ret += bh->b_size; 496 ret += bh->b_size;
495 if (ret >= pg_offset) 497 if (ret >= pg_offset)
496 break; 498 break;
497 } while ((bh = bh->b_this_page) != head); 499 } while ((bh = bh->b_this_page) != head);
498 } else 500 } else
499 ret = PAGE_CACHE_SIZE; 501 ret = mapped ? 0 : PAGE_CACHE_SIZE;
500 } 502 }
501 503
502 return ret; 504 return ret;
503} 505}
504 506
505STATIC size_t 507STATIC size_t
506xfs_probe_unmapped_cluster( 508xfs_probe_cluster(
507 struct inode *inode, 509 struct inode *inode,
508 struct page *startpage, 510 struct page *startpage,
509 struct buffer_head *bh, 511 struct buffer_head *bh,
510 struct buffer_head *head) 512 struct buffer_head *head,
513 int mapped)
511{ 514{
512 struct pagevec pvec; 515 struct pagevec pvec;
513 pgoff_t tindex, tlast, tloff; 516 pgoff_t tindex, tlast, tloff;
@@ -516,7 +519,7 @@ xfs_probe_unmapped_cluster(
516 519
517 /* First sum forwards in this page */ 520 /* First sum forwards in this page */
518 do { 521 do {
519 if (buffer_mapped(bh)) 522 if (mapped != buffer_mapped(bh))
520 return total; 523 return total;
521 total += bh->b_size; 524 total += bh->b_size;
522 } while ((bh = bh->b_this_page) != head); 525 } while ((bh = bh->b_this_page) != head);
@@ -550,7 +553,7 @@ xfs_probe_unmapped_cluster(
550 pg_offset = PAGE_CACHE_SIZE; 553 pg_offset = PAGE_CACHE_SIZE;
551 554
552 if (page->index == tindex && !TestSetPageLocked(page)) { 555 if (page->index == tindex && !TestSetPageLocked(page)) {
553 len = xfs_probe_unmapped_page(page, pg_offset); 556 len = xfs_probe_page(page, pg_offset, mapped);
554 unlock_page(page); 557 unlock_page(page);
555 } 558 }
556 559
@@ -592,6 +595,8 @@ xfs_is_delayed_page(
592 acceptable = (type == IOMAP_UNWRITTEN); 595 acceptable = (type == IOMAP_UNWRITTEN);
593 else if (buffer_delay(bh)) 596 else if (buffer_delay(bh))
594 acceptable = (type == IOMAP_DELAY); 597 acceptable = (type == IOMAP_DELAY);
598 else if (buffer_mapped(bh))
599 acceptable = (type == 0);
595 else 600 else
596 break; 601 break;
597 } while ((bh = bh->b_this_page) != head); 602 } while ((bh = bh->b_this_page) != head);
@@ -804,6 +809,7 @@ xfs_page_state_convert(
804 ssize_t size, len; 809 ssize_t size, len;
805 int flags, err, iomap_valid = 0, uptodate = 1; 810 int flags, err, iomap_valid = 0, uptodate = 1;
806 int page_dirty, count = 0, trylock_flag = 0; 811 int page_dirty, count = 0, trylock_flag = 0;
812 int all_bh = unmapped;
807 813
808 /* wait for other IO threads? */ 814 /* wait for other IO threads? */
809 if (startio && wbc->sync_mode != WB_SYNC_NONE) 815 if (startio && wbc->sync_mode != WB_SYNC_NONE)
@@ -845,6 +851,8 @@ xfs_page_state_convert(
845 851
846 bh = head = page_buffers(page); 852 bh = head = page_buffers(page);
847 offset = page_offset(page); 853 offset = page_offset(page);
854 flags = -1;
855 type = 0;
848 856
849 /* TODO: cleanup count and page_dirty */ 857 /* TODO: cleanup count and page_dirty */
850 858
@@ -878,6 +886,12 @@ xfs_page_state_convert(
878 if (buffer_unwritten(bh) || buffer_delay(bh) || 886 if (buffer_unwritten(bh) || buffer_delay(bh) ||
879 ((buffer_uptodate(bh) || PageUptodate(page)) && 887 ((buffer_uptodate(bh) || PageUptodate(page)) &&
880 !buffer_mapped(bh) && (unmapped || startio))) { 888 !buffer_mapped(bh) && (unmapped || startio))) {
889 /*
890 * Make sure we don't use a read-only iomap
891 */
892 if (flags == BMAPI_READ)
893 iomap_valid = 0;
894
881 if (buffer_unwritten(bh)) { 895 if (buffer_unwritten(bh)) {
882 type = IOMAP_UNWRITTEN; 896 type = IOMAP_UNWRITTEN;
883 flags = BMAPI_WRITE|BMAPI_IGNSTATE; 897 flags = BMAPI_WRITE|BMAPI_IGNSTATE;
@@ -887,14 +901,14 @@ xfs_page_state_convert(
887 if (!startio) 901 if (!startio)
888 flags |= trylock_flag; 902 flags |= trylock_flag;
889 } else { 903 } else {
890 type = 0; 904 type = IOMAP_NEW;
891 flags = BMAPI_WRITE|BMAPI_MMAP; 905 flags = BMAPI_WRITE|BMAPI_MMAP;
892 } 906 }
893 907
894 if (!iomap_valid) { 908 if (!iomap_valid) {
895 if (type == 0) { 909 if (type == IOMAP_NEW) {
896 size = xfs_probe_unmapped_cluster(inode, 910 size = xfs_probe_cluster(inode,
897 page, bh, head); 911 page, bh, head, 0);
898 } else { 912 } else {
899 size = len; 913 size = len;
900 } 914 }
@@ -921,10 +935,27 @@ xfs_page_state_convert(
921 count++; 935 count++;
922 } 936 }
923 } else if (buffer_uptodate(bh) && startio) { 937 } else if (buffer_uptodate(bh) && startio) {
924 type = 0; 938 /*
939 * we got here because the buffer is already mapped.
940 * That means it must already have extents allocated
941 * underneath it. Map the extent by reading it.
942 */
943 if (!iomap_valid || type != 0) {
944 flags = BMAPI_READ;
945 size = xfs_probe_cluster(inode, page, bh,
946 head, 1);
947 err = xfs_map_blocks(inode, offset, size,
948 &iomap, flags);
949 if (err)
950 goto error;
951 iomap_valid = xfs_iomap_valid(&iomap, offset);
952 }
925 953
954 type = 0;
926 if (!test_and_set_bit(BH_Lock, &bh->b_state)) { 955 if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
927 ASSERT(buffer_mapped(bh)); 956 ASSERT(buffer_mapped(bh));
957 if (iomap_valid)
958 all_bh = 1;
928 xfs_add_to_ioend(inode, bh, offset, type, 959 xfs_add_to_ioend(inode, bh, offset, type,
929 &ioend, !iomap_valid); 960 &ioend, !iomap_valid);
930 page_dirty--; 961 page_dirty--;
@@ -953,7 +984,7 @@ xfs_page_state_convert(
953 PAGE_CACHE_SHIFT; 984 PAGE_CACHE_SHIFT;
954 tlast = min_t(pgoff_t, offset, last_index); 985 tlast = min_t(pgoff_t, offset, last_index);
955 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend, 986 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
956 wbc, startio, unmapped, tlast); 987 wbc, startio, all_bh, tlast);
957 } 988 }
958 989
959 if (iohead) 990 if (iohead)