aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@sgi.com>2006-01-11 04:48:33 -0500
committerNathan Scott <nathans@sgi.com>2006-01-11 04:48:33 -0500
commit1defeac9d4fffa3eabc4fba887e8ff5b1da7f361 (patch)
tree2943a4d8f5fda0dc4e5722af95ced19fe5d4bdb1 /fs/xfs
parent10ce444428c2329eb2aaf84850b5c7c09cecc58c (diff)
[XFS] clean up the xfs_offset_to_map interface Currently we pass a struct
page and a relative offset into that page around, and returns the current xfs_iomap_t if the block at the specified offset fits into it, or a NULL pointer otherwise. This patch passed the full 64bit offset into the inode that all callers have anyway, and changes the return value to a simple boolean. Also the function gets a more descriptive name: xfs_iomap_valid. SGI-PV: 947118 SGI-Modid: xfs-linux-melb:xfs-kern:203825a Signed-off-by: Christoph Hellwig <hch@sgi.com> Signed-off-by: Nathan Scott <nathans@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c136
1 files changed, 53 insertions, 83 deletions
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index e998009c0f52..b306e25f0f07 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -228,29 +228,13 @@ xfs_map_blocks(
228 return -error; 228 return -error;
229} 229}
230 230
231/* 231STATIC inline int
232 * Finds the corresponding mapping in block @map array of the 232xfs_iomap_valid(
233 * given @offset within a @page.
234 */
235STATIC xfs_iomap_t *
236xfs_offset_to_map(
237 struct page *page,
238 xfs_iomap_t *iomapp, 233 xfs_iomap_t *iomapp,
239 unsigned long offset) 234 loff_t offset)
240{ 235{
241 xfs_off_t full_offset; /* offset from start of file */ 236 return offset >= iomapp->iomap_offset &&
242 237 offset < iomapp->iomap_offset + iomapp->iomap_bsize;
243 ASSERT(offset < PAGE_CACHE_SIZE);
244
245 full_offset = page->index; /* NB: using 64bit number */
246 full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
247 full_offset += offset; /* offset from page start */
248
249 if (full_offset < iomapp->iomap_offset)
250 return NULL;
251 if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
252 return iomapp;
253 return NULL;
254} 238}
255 239
256/* 240/*
@@ -461,31 +445,23 @@ xfs_add_to_ioend(
461 445
462STATIC void 446STATIC void
463xfs_map_at_offset( 447xfs_map_at_offset(
464 struct page *page,
465 struct buffer_head *bh, 448 struct buffer_head *bh,
466 unsigned long offset, 449 loff_t offset,
467 int block_bits, 450 int block_bits,
468 xfs_iomap_t *iomapp, 451 xfs_iomap_t *iomapp)
469 xfs_ioend_t *ioend)
470{ 452{
471 xfs_daddr_t bn; 453 xfs_daddr_t bn;
472 xfs_off_t delta;
473 int sector_shift; 454 int sector_shift;
474 455
475 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE)); 456 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
476 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY)); 457 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
477 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL); 458 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
478 459
479 delta = page->index;
480 delta <<= PAGE_CACHE_SHIFT;
481 delta += offset;
482 delta -= iomapp->iomap_offset;
483 delta >>= block_bits;
484
485 sector_shift = block_bits - BBSHIFT; 460 sector_shift = block_bits - BBSHIFT;
486 bn = iomapp->iomap_bn >> sector_shift; 461 bn = (iomapp->iomap_bn >> sector_shift) +
487 bn += delta; 462 ((offset - iomapp->iomap_offset) >> block_bits);
488 BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME)); 463
464 ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
489 ASSERT((bn << sector_shift) >= iomapp->iomap_bn); 465 ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
490 466
491 lock_buffer(bh); 467 lock_buffer(bh);
@@ -569,8 +545,10 @@ xfs_probe_unmapped_cluster(
569 if (tindex == tlast) { 545 if (tindex == tlast) {
570 pg_offset = 546 pg_offset =
571 i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 547 i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
572 if (!pg_offset) 548 if (!pg_offset) {
549 done = 1;
573 break; 550 break;
551 }
574 } else 552 } else
575 pg_offset = PAGE_CACHE_SIZE; 553 pg_offset = PAGE_CACHE_SIZE;
576 554
@@ -585,6 +563,7 @@ xfs_probe_unmapped_cluster(
585 } 563 }
586 564
587 total += len; 565 total += len;
566 tindex++;
588 } 567 }
589 568
590 pagevec_release(&pvec); 569 pagevec_release(&pvec);
@@ -638,19 +617,19 @@ xfs_convert_page(
638 struct inode *inode, 617 struct inode *inode,
639 struct page *page, 618 struct page *page,
640 loff_t tindex, 619 loff_t tindex,
641 xfs_iomap_t *iomapp, 620 xfs_iomap_t *mp,
642 xfs_ioend_t **ioendp, 621 xfs_ioend_t **ioendp,
643 struct writeback_control *wbc, 622 struct writeback_control *wbc,
644 int startio, 623 int startio,
645 int all_bh) 624 int all_bh)
646{ 625{
647 struct buffer_head *bh, *head; 626 struct buffer_head *bh, *head;
648 xfs_iomap_t *mp = iomapp, *tmp;
649 unsigned long p_offset, end_offset; 627 unsigned long p_offset, end_offset;
650 unsigned int type; 628 unsigned int type;
651 int bbits = inode->i_blkbits; 629 int bbits = inode->i_blkbits;
652 int len, page_dirty; 630 int len, page_dirty;
653 int count = 0, done = 0, uptodate = 1; 631 int count = 0, done = 0, uptodate = 1;
632 xfs_off_t f_offset = page_offset(page);
654 633
655 if (page->index != tindex) 634 if (page->index != tindex)
656 goto fail; 635 goto fail;
@@ -703,15 +682,15 @@ xfs_convert_page(
703 } 682 }
704 continue; 683 continue;
705 } 684 }
706 tmp = xfs_offset_to_map(page, mp, p_offset); 685
707 if (!tmp) { 686 if (!xfs_iomap_valid(mp, f_offset + p_offset)) {
708 done = 1; 687 done = 1;
709 continue; 688 continue;
710 } 689 }
711 ASSERT(!(tmp->iomap_flags & IOMAP_HOLE)); 690 ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
712 ASSERT(!(tmp->iomap_flags & IOMAP_DELAY)); 691 ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
713 692
714 xfs_map_at_offset(page, bh, p_offset, bbits, tmp, *ioendp); 693 xfs_map_at_offset(bh, f_offset + p_offset, bbits, mp);
715 if (startio) { 694 if (startio) {
716 xfs_add_to_ioend(inode, bh, p_offset, 695 xfs_add_to_ioend(inode, bh, p_offset,
717 type, ioendp, done); 696 type, ioendp, done);
@@ -805,15 +784,14 @@ xfs_page_state_convert(
805 int unmapped) /* also implies page uptodate */ 784 int unmapped) /* also implies page uptodate */
806{ 785{
807 struct buffer_head *bh, *head; 786 struct buffer_head *bh, *head;
808 xfs_iomap_t *iomp, iomap; 787 xfs_iomap_t iomap;
809 xfs_ioend_t *ioend = NULL, *iohead = NULL; 788 xfs_ioend_t *ioend = NULL, *iohead = NULL;
810 loff_t offset; 789 loff_t offset;
811 unsigned long p_offset = 0; 790 unsigned long p_offset = 0;
812 unsigned int type; 791 unsigned int type;
813 __uint64_t end_offset; 792 __uint64_t end_offset;
814 pgoff_t end_index, last_index, tlast; 793 pgoff_t end_index, last_index, tlast;
815 int flags, len, err, done = 1; 794 int flags, len, err, iomap_valid = 0, uptodate = 1;
816 int uptodate = 1;
817 int page_dirty, count = 0, trylock_flag = 0; 795 int page_dirty, count = 0, trylock_flag = 0;
818 796
819 /* wait for other IO threads? */ 797 /* wait for other IO threads? */
@@ -854,11 +832,9 @@ xfs_page_state_convert(
854 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 832 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
855 page_dirty = p_offset / len; 833 page_dirty = p_offset / len;
856 834
857 iomp = NULL;
858 bh = head = page_buffers(page); 835 bh = head = page_buffers(page);
859 offset = page_offset(page); 836 offset = page_offset(page);
860 837
861 /* TODO: fix up "done" variable and iomap pointer (boolean) */
862 /* TODO: cleanup count and page_dirty */ 838 /* TODO: cleanup count and page_dirty */
863 839
864 do { 840 do {
@@ -867,14 +843,16 @@ xfs_page_state_convert(
867 if (!buffer_uptodate(bh)) 843 if (!buffer_uptodate(bh))
868 uptodate = 0; 844 uptodate = 0;
869 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) { 845 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
870 done = 1; 846 /*
847 * the iomap is actually still valid, but the ioend
848 * isn't. shouldn't happen too often.
849 */
850 iomap_valid = 0;
871 continue; 851 continue;
872 } 852 }
873 853
874 if (iomp) { 854 if (iomap_valid)
875 iomp = xfs_offset_to_map(page, &iomap, p_offset); 855 iomap_valid = xfs_iomap_valid(&iomap, offset);
876 done = (iomp == NULL);
877 }
878 856
879 /* 857 /*
880 * First case, map an unwritten extent and prepare for 858 * First case, map an unwritten extent and prepare for
@@ -894,22 +872,20 @@ xfs_page_state_convert(
894 flags |= trylock_flag; 872 flags |= trylock_flag;
895 } 873 }
896 874
897 if (!iomp) { 875 if (!iomap_valid) {
898 done = 1;
899 err = xfs_map_blocks(inode, offset, len, &iomap, 876 err = xfs_map_blocks(inode, offset, len, &iomap,
900 flags); 877 flags);
901 if (err) 878 if (err)
902 goto error; 879 goto error;
903 iomp = xfs_offset_to_map(page, &iomap, 880 iomap_valid = xfs_iomap_valid(&iomap, offset);
904 p_offset);
905 done = (iomp == NULL);
906 } 881 }
907 if (iomp) { 882 if (iomap_valid) {
908 xfs_map_at_offset(page, bh, p_offset, 883 xfs_map_at_offset(bh, offset,
909 inode->i_blkbits, iomp, ioend); 884 inode->i_blkbits, &iomap);
910 if (startio) { 885 if (startio) {
911 xfs_add_to_ioend(inode, bh, p_offset, 886 xfs_add_to_ioend(inode, bh, p_offset,
912 type, &ioend, done); 887 type, &ioend,
888 !iomap_valid);
913 } else { 889 } else {
914 set_buffer_dirty(bh); 890 set_buffer_dirty(bh);
915 unlock_buffer(bh); 891 unlock_buffer(bh);
@@ -917,8 +893,6 @@ xfs_page_state_convert(
917 } 893 }
918 page_dirty--; 894 page_dirty--;
919 count++; 895 count++;
920 } else {
921 done = 1;
922 } 896 }
923 } else if ((buffer_uptodate(bh) || PageUptodate(page)) && 897 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
924 (unmapped || startio)) { 898 (unmapped || startio)) {
@@ -931,7 +905,7 @@ xfs_page_state_convert(
931 * was found, and we are in a path where we 905 * was found, and we are in a path where we
932 * need to write the whole page out. 906 * need to write the whole page out.
933 */ 907 */
934 if (!iomp) { 908 if (!iomap_valid) {
935 int size; 909 int size;
936 910
937 size = xfs_probe_unmapped_cluster( 911 size = xfs_probe_unmapped_cluster(
@@ -939,21 +913,19 @@ xfs_page_state_convert(
939 err = xfs_map_blocks(inode, offset, 913 err = xfs_map_blocks(inode, offset,
940 size, &iomap, 914 size, &iomap,
941 BMAPI_WRITE|BMAPI_MMAP); 915 BMAPI_WRITE|BMAPI_MMAP);
942 if (err) { 916 if (err)
943 goto error; 917 goto error;
944 } 918 iomap_valid = xfs_iomap_valid(&iomap,
945 iomp = xfs_offset_to_map(page, &iomap, 919 offset);
946 p_offset);
947 done = (iomp == NULL);
948 } 920 }
949 if (iomp) { 921 if (iomap_valid) {
950 xfs_map_at_offset(page, bh, p_offset, 922 xfs_map_at_offset(bh, offset,
951 inode->i_blkbits, iomp, 923 inode->i_blkbits,
952 ioend); 924 &iomap);
953 if (startio) { 925 if (startio) {
954 xfs_add_to_ioend(inode, 926 xfs_add_to_ioend(inode,
955 bh, p_offset, type, 927 bh, p_offset, type,
956 &ioend, done); 928 &ioend, !iomap_valid);
957 } else { 929 } else {
958 set_buffer_dirty(bh); 930 set_buffer_dirty(bh);
959 unlock_buffer(bh); 931 unlock_buffer(bh);
@@ -961,8 +933,6 @@ xfs_page_state_convert(
961 } 933 }
962 page_dirty--; 934 page_dirty--;
963 count++; 935 count++;
964 } else {
965 done = 1;
966 } 936 }
967 } else if (startio) { 937 } else if (startio) {
968 if (buffer_uptodate(bh) && 938 if (buffer_uptodate(bh) &&
@@ -970,14 +940,14 @@ xfs_page_state_convert(
970 ASSERT(buffer_mapped(bh)); 940 ASSERT(buffer_mapped(bh));
971 xfs_add_to_ioend(inode, 941 xfs_add_to_ioend(inode,
972 bh, p_offset, type, 942 bh, p_offset, type,
973 &ioend, done); 943 &ioend, !iomap_valid);
974 page_dirty--; 944 page_dirty--;
975 count++; 945 count++;
976 } else { 946 } else {
977 done = 1; 947 iomap_valid = 0;
978 } 948 }
979 } else { 949 } else {
980 done = 1; 950 iomap_valid = 0;
981 } 951 }
982 } 952 }
983 953
@@ -992,11 +962,11 @@ xfs_page_state_convert(
992 if (startio) 962 if (startio)
993 xfs_start_page_writeback(page, wbc, 1, count); 963 xfs_start_page_writeback(page, wbc, 1, count);
994 964
995 if (ioend && iomp && !done) { 965 if (ioend && iomap_valid) {
996 offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >> 966 offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
997 PAGE_CACHE_SHIFT; 967 PAGE_CACHE_SHIFT;
998 tlast = min_t(pgoff_t, offset, last_index); 968 tlast = min_t(pgoff_t, offset, last_index);
999 xfs_cluster_write(inode, page->index + 1, iomp, &ioend, 969 xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
1000 wbc, startio, unmapped, tlast); 970 wbc, startio, unmapped, tlast);
1001 } 971 }
1002 972