diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 13:44:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 13:44:50 -0400 |
commit | f793f2961170c0b49c1650e69e7825484159ce62 (patch) | |
tree | 06d27973f9db1080c1460f32155ce2baf610c3d4 /fs | |
parent | dabcbb1bae0f55378060b285062b20f6ec648c6a (diff) | |
parent | b99b98dc2673a123a73068f16720232d7be7e669 (diff) |
Merge http://sucs.org/~rohan/git/gfs2-3.0-nmw
* http://sucs.org/~rohan/git/gfs2-3.0-nmw: (24 commits)
GFS2: Move readahead of metadata during deallocation into its own function
GFS2: Remove two unused variables
GFS2: Misc fixes
GFS2: rewrite fallocate code to write blocks directly
GFS2: speed up delete/unlink performance for large files
GFS2: Fix off-by-one in gfs2_blk2rgrpd
GFS2: Clean up ->page_mkwrite
GFS2: Correctly set goal block after allocation
GFS2: Fix AIL flush issue during fsync
GFS2: Use cached rgrp in gfs2_rlist_add()
GFS2: Call do_strip() directly from recursive_scan()
GFS2: Remove obsolete assert
GFS2: Cache the most recently used resource group in the inode
GFS2: Make resource groups "append only" during life of fs
GFS2: Use rbtree for resource groups and clean up bitmap buffer ref count scheme
GFS2: Fix lseek after SEEK_DATA, SEEK_HOLE have been added
GFS2: Clean up gfs2_create
GFS2: Use ->dirty_inode()
GFS2: Fix bug trap and journaled data fsync
GFS2: Fix inode allocation error path
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/gfs2/acl.c | 5 | ||||
-rw-r--r-- | fs/gfs2/aops.c | 8 | ||||
-rw-r--r-- | fs/gfs2/bmap.c | 199 | ||||
-rw-r--r-- | fs/gfs2/dir.c | 50 | ||||
-rw-r--r-- | fs/gfs2/file.c | 295 | ||||
-rw-r--r-- | fs/gfs2/glops.c | 89 | ||||
-rw-r--r-- | fs/gfs2/glops.h | 2 | ||||
-rw-r--r-- | fs/gfs2/incore.h | 23 | ||||
-rw-r--r-- | fs/gfs2/inode.c | 112 | ||||
-rw-r--r-- | fs/gfs2/inode.h | 2 | ||||
-rw-r--r-- | fs/gfs2/lops.c | 66 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 6 | ||||
-rw-r--r-- | fs/gfs2/quota.c | 28 | ||||
-rw-r--r-- | fs/gfs2/rgrp.c | 573 | ||||
-rw-r--r-- | fs/gfs2/rgrp.h | 31 | ||||
-rw-r--r-- | fs/gfs2/super.c | 134 | ||||
-rw-r--r-- | fs/gfs2/trans.c | 5 | ||||
-rw-r--r-- | fs/gfs2/trans.h | 22 | ||||
-rw-r--r-- | fs/gfs2/xattr.c | 28 |
19 files changed, 666 insertions, 1012 deletions
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 34501b64bc4..65978d7885c 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c | |||
@@ -82,7 +82,7 @@ static int gfs2_set_mode(struct inode *inode, umode_t mode) | |||
82 | iattr.ia_valid = ATTR_MODE; | 82 | iattr.ia_valid = ATTR_MODE; |
83 | iattr.ia_mode = mode; | 83 | iattr.ia_mode = mode; |
84 | 84 | ||
85 | error = gfs2_setattr_simple(GFS2_I(inode), &iattr); | 85 | error = gfs2_setattr_simple(inode, &iattr); |
86 | } | 86 | } |
87 | 87 | ||
88 | return error; | 88 | return error; |
@@ -160,6 +160,7 @@ out: | |||
160 | 160 | ||
161 | int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) | 161 | int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) |
162 | { | 162 | { |
163 | struct inode *inode = &ip->i_inode; | ||
163 | struct posix_acl *acl; | 164 | struct posix_acl *acl; |
164 | char *data; | 165 | char *data; |
165 | unsigned int len; | 166 | unsigned int len; |
@@ -169,7 +170,7 @@ int gfs2_acl_chmod(struct gfs2_inode *ip, struct iattr *attr) | |||
169 | if (IS_ERR(acl)) | 170 | if (IS_ERR(acl)) |
170 | return PTR_ERR(acl); | 171 | return PTR_ERR(acl); |
171 | if (!acl) | 172 | if (!acl) |
172 | return gfs2_setattr_simple(ip, attr); | 173 | return gfs2_setattr_simple(inode, attr); |
173 | 174 | ||
174 | error = posix_acl_chmod(&acl, GFP_NOFS, attr->ia_mode); | 175 | error = posix_acl_chmod(&acl, GFP_NOFS, attr->ia_mode); |
175 | if (error) | 176 | if (error) |
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index f9fbbe96c22..4858e1fed8b 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c | |||
@@ -663,7 +663,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, | |||
663 | if (&ip->i_inode == sdp->sd_rindex) | 663 | if (&ip->i_inode == sdp->sd_rindex) |
664 | rblocks += 2 * RES_STATFS; | 664 | rblocks += 2 * RES_STATFS; |
665 | if (alloc_required) | 665 | if (alloc_required) |
666 | rblocks += gfs2_rg_blocks(al); | 666 | rblocks += gfs2_rg_blocks(ip); |
667 | 667 | ||
668 | error = gfs2_trans_begin(sdp, rblocks, | 668 | error = gfs2_trans_begin(sdp, rblocks, |
669 | PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); | 669 | PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); |
@@ -787,7 +787,6 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, | |||
787 | u64 to = pos + copied; | 787 | u64 to = pos + copied; |
788 | void *kaddr; | 788 | void *kaddr; |
789 | unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); | 789 | unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); |
790 | struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; | ||
791 | 790 | ||
792 | BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); | 791 | BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); |
793 | kaddr = kmap_atomic(page, KM_USER0); | 792 | kaddr = kmap_atomic(page, KM_USER0); |
@@ -804,7 +803,6 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, | |||
804 | if (copied) { | 803 | if (copied) { |
805 | if (inode->i_size < to) | 804 | if (inode->i_size < to) |
806 | i_size_write(inode, to); | 805 | i_size_write(inode, to); |
807 | gfs2_dinode_out(ip, di); | ||
808 | mark_inode_dirty(inode); | 806 | mark_inode_dirty(inode); |
809 | } | 807 | } |
810 | 808 | ||
@@ -873,10 +871,6 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, | |||
873 | gfs2_page_add_databufs(ip, page, from, to); | 871 | gfs2_page_add_databufs(ip, page, from, to); |
874 | 872 | ||
875 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); | 873 | ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); |
876 | if (ret > 0) { | ||
877 | gfs2_dinode_out(ip, dibh->b_data); | ||
878 | mark_inode_dirty(inode); | ||
879 | } | ||
880 | 874 | ||
881 | if (inode == sdp->sd_rindex) { | 875 | if (inode == sdp->sd_rindex) { |
882 | adjust_fs_space(inode); | 876 | adjust_fs_space(inode); |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 7878c473ae6..41d494d7970 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/spinlock.h> | 10 | #include <linux/spinlock.h> |
11 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
12 | #include <linux/buffer_head.h> | 12 | #include <linux/buffer_head.h> |
13 | #include <linux/blkdev.h> | ||
13 | #include <linux/gfs2_ondisk.h> | 14 | #include <linux/gfs2_ondisk.h> |
14 | #include <linux/crc32.h> | 15 | #include <linux/crc32.h> |
15 | 16 | ||
@@ -36,11 +37,6 @@ struct metapath { | |||
36 | __u16 mp_list[GFS2_MAX_META_HEIGHT]; | 37 | __u16 mp_list[GFS2_MAX_META_HEIGHT]; |
37 | }; | 38 | }; |
38 | 39 | ||
39 | typedef int (*block_call_t) (struct gfs2_inode *ip, struct buffer_head *dibh, | ||
40 | struct buffer_head *bh, __be64 *top, | ||
41 | __be64 *bottom, unsigned int height, | ||
42 | void *data); | ||
43 | |||
44 | struct strip_mine { | 40 | struct strip_mine { |
45 | int sm_first; | 41 | int sm_first; |
46 | unsigned int sm_height; | 42 | unsigned int sm_height; |
@@ -273,6 +269,30 @@ static inline __be64 *metapointer(unsigned int height, const struct metapath *mp | |||
273 | return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height]; | 269 | return ((__be64 *)(bh->b_data + head_size)) + mp->mp_list[height]; |
274 | } | 270 | } |
275 | 271 | ||
272 | static void gfs2_metapath_ra(struct gfs2_glock *gl, | ||
273 | const struct buffer_head *bh, const __be64 *pos) | ||
274 | { | ||
275 | struct buffer_head *rabh; | ||
276 | const __be64 *endp = (const __be64 *)(bh->b_data + bh->b_size); | ||
277 | const __be64 *t; | ||
278 | |||
279 | for (t = pos; t < endp; t++) { | ||
280 | if (!*t) | ||
281 | continue; | ||
282 | |||
283 | rabh = gfs2_getbuf(gl, be64_to_cpu(*t), CREATE); | ||
284 | if (trylock_buffer(rabh)) { | ||
285 | if (!buffer_uptodate(rabh)) { | ||
286 | rabh->b_end_io = end_buffer_read_sync; | ||
287 | submit_bh(READA | REQ_META, rabh); | ||
288 | continue; | ||
289 | } | ||
290 | unlock_buffer(rabh); | ||
291 | } | ||
292 | brelse(rabh); | ||
293 | } | ||
294 | } | ||
295 | |||
276 | /** | 296 | /** |
277 | * lookup_metapath - Walk the metadata tree to a specific point | 297 | * lookup_metapath - Walk the metadata tree to a specific point |
278 | * @ip: The inode | 298 | * @ip: The inode |
@@ -432,12 +452,14 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, | |||
432 | { | 452 | { |
433 | struct gfs2_inode *ip = GFS2_I(inode); | 453 | struct gfs2_inode *ip = GFS2_I(inode); |
434 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 454 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
455 | struct super_block *sb = sdp->sd_vfs; | ||
435 | struct buffer_head *dibh = mp->mp_bh[0]; | 456 | struct buffer_head *dibh = mp->mp_bh[0]; |
436 | u64 bn, dblock = 0; | 457 | u64 bn, dblock = 0; |
437 | unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; | 458 | unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0; |
438 | unsigned dblks = 0; | 459 | unsigned dblks = 0; |
439 | unsigned ptrs_per_blk; | 460 | unsigned ptrs_per_blk; |
440 | const unsigned end_of_metadata = height - 1; | 461 | const unsigned end_of_metadata = height - 1; |
462 | int ret; | ||
441 | int eob = 0; | 463 | int eob = 0; |
442 | enum alloc_state state; | 464 | enum alloc_state state; |
443 | __be64 *ptr; | 465 | __be64 *ptr; |
@@ -540,6 +562,15 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock, | |||
540 | dblock = bn; | 562 | dblock = bn; |
541 | while (n-- > 0) | 563 | while (n-- > 0) |
542 | *ptr++ = cpu_to_be64(bn++); | 564 | *ptr++ = cpu_to_be64(bn++); |
565 | if (buffer_zeronew(bh_map)) { | ||
566 | ret = sb_issue_zeroout(sb, dblock, dblks, | ||
567 | GFP_NOFS); | ||
568 | if (ret) { | ||
569 | fs_err(sdp, | ||
570 | "Failed to zero data buffers\n"); | ||
571 | clear_buffer_zeronew(bh_map); | ||
572 | } | ||
573 | } | ||
543 | break; | 574 | break; |
544 | } | 575 | } |
545 | } while ((state != ALLOC_DATA) || !dblock); | 576 | } while ((state != ALLOC_DATA) || !dblock); |
@@ -668,76 +699,6 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi | |||
668 | } | 699 | } |
669 | 700 | ||
670 | /** | 701 | /** |
671 | * recursive_scan - recursively scan through the end of a file | ||
672 | * @ip: the inode | ||
673 | * @dibh: the dinode buffer | ||
674 | * @mp: the path through the metadata to the point to start | ||
675 | * @height: the height the recursion is at | ||
676 | * @block: the indirect block to look at | ||
677 | * @first: 1 if this is the first block | ||
678 | * @bc: the call to make for each piece of metadata | ||
679 | * @data: data opaque to this function to pass to @bc | ||
680 | * | ||
681 | * When this is first called @height and @block should be zero and | ||
682 | * @first should be 1. | ||
683 | * | ||
684 | * Returns: errno | ||
685 | */ | ||
686 | |||
687 | static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, | ||
688 | struct metapath *mp, unsigned int height, | ||
689 | u64 block, int first, block_call_t bc, | ||
690 | void *data) | ||
691 | { | ||
692 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
693 | struct buffer_head *bh = NULL; | ||
694 | __be64 *top, *bottom; | ||
695 | u64 bn; | ||
696 | int error; | ||
697 | int mh_size = sizeof(struct gfs2_meta_header); | ||
698 | |||
699 | if (!height) { | ||
700 | error = gfs2_meta_inode_buffer(ip, &bh); | ||
701 | if (error) | ||
702 | return error; | ||
703 | dibh = bh; | ||
704 | |||
705 | top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; | ||
706 | bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; | ||
707 | } else { | ||
708 | error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); | ||
709 | if (error) | ||
710 | return error; | ||
711 | |||
712 | top = (__be64 *)(bh->b_data + mh_size) + | ||
713 | (first ? mp->mp_list[height] : 0); | ||
714 | |||
715 | bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; | ||
716 | } | ||
717 | |||
718 | error = bc(ip, dibh, bh, top, bottom, height, data); | ||
719 | if (error) | ||
720 | goto out; | ||
721 | |||
722 | if (height < ip->i_height - 1) | ||
723 | for (; top < bottom; top++, first = 0) { | ||
724 | if (!*top) | ||
725 | continue; | ||
726 | |||
727 | bn = be64_to_cpu(*top); | ||
728 | |||
729 | error = recursive_scan(ip, dibh, mp, height + 1, bn, | ||
730 | first, bc, data); | ||
731 | if (error) | ||
732 | break; | ||
733 | } | ||
734 | |||
735 | out: | ||
736 | brelse(bh); | ||
737 | return error; | ||
738 | } | ||
739 | |||
740 | /** | ||
741 | * do_strip - Look for a layer a particular layer of the file and strip it off | 702 | * do_strip - Look for a layer a particular layer of the file and strip it off |
742 | * @ip: the inode | 703 | * @ip: the inode |
743 | * @dibh: the dinode buffer | 704 | * @dibh: the dinode buffer |
@@ -752,9 +713,8 @@ out: | |||
752 | 713 | ||
753 | static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | 714 | static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, |
754 | struct buffer_head *bh, __be64 *top, __be64 *bottom, | 715 | struct buffer_head *bh, __be64 *top, __be64 *bottom, |
755 | unsigned int height, void *data) | 716 | unsigned int height, struct strip_mine *sm) |
756 | { | 717 | { |
757 | struct strip_mine *sm = data; | ||
758 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 718 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
759 | struct gfs2_rgrp_list rlist; | 719 | struct gfs2_rgrp_list rlist; |
760 | u64 bn, bstart; | 720 | u64 bn, bstart; |
@@ -783,11 +743,6 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
783 | else if (ip->i_depth) | 743 | else if (ip->i_depth) |
784 | revokes = sdp->sd_inptrs; | 744 | revokes = sdp->sd_inptrs; |
785 | 745 | ||
786 | if (ip != GFS2_I(sdp->sd_rindex)) | ||
787 | error = gfs2_rindex_hold(sdp, &ip->i_alloc->al_ri_gh); | ||
788 | else if (!sdp->sd_rgrps) | ||
789 | error = gfs2_ri_update(ip); | ||
790 | |||
791 | if (error) | 746 | if (error) |
792 | return error; | 747 | return error; |
793 | 748 | ||
@@ -805,7 +760,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
805 | blen++; | 760 | blen++; |
806 | else { | 761 | else { |
807 | if (bstart) | 762 | if (bstart) |
808 | gfs2_rlist_add(sdp, &rlist, bstart); | 763 | gfs2_rlist_add(ip, &rlist, bstart); |
809 | 764 | ||
810 | bstart = bn; | 765 | bstart = bn; |
811 | blen = 1; | 766 | blen = 1; |
@@ -813,7 +768,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, | |||
813 | } | 768 | } |
814 | 769 | ||
815 | if (bstart) | 770 | if (bstart) |
816 | gfs2_rlist_add(sdp, &rlist, bstart); | 771 | gfs2_rlist_add(ip, &rlist, bstart); |
817 | else | 772 | else |
818 | goto out; /* Nothing to do */ | 773 | goto out; /* Nothing to do */ |
819 | 774 | ||
@@ -887,12 +842,82 @@ out_rg_gunlock: | |||
887 | out_rlist: | 842 | out_rlist: |
888 | gfs2_rlist_free(&rlist); | 843 | gfs2_rlist_free(&rlist); |
889 | out: | 844 | out: |
890 | if (ip != GFS2_I(sdp->sd_rindex)) | ||
891 | gfs2_glock_dq_uninit(&ip->i_alloc->al_ri_gh); | ||
892 | return error; | 845 | return error; |
893 | } | 846 | } |
894 | 847 | ||
895 | /** | 848 | /** |
849 | * recursive_scan - recursively scan through the end of a file | ||
850 | * @ip: the inode | ||
851 | * @dibh: the dinode buffer | ||
852 | * @mp: the path through the metadata to the point to start | ||
853 | * @height: the height the recursion is at | ||
854 | * @block: the indirect block to look at | ||
855 | * @first: 1 if this is the first block | ||
856 | * @sm: data opaque to this function to pass to @bc | ||
857 | * | ||
858 | * When this is first called @height and @block should be zero and | ||
859 | * @first should be 1. | ||
860 | * | ||
861 | * Returns: errno | ||
862 | */ | ||
863 | |||
864 | static int recursive_scan(struct gfs2_inode *ip, struct buffer_head *dibh, | ||
865 | struct metapath *mp, unsigned int height, | ||
866 | u64 block, int first, struct strip_mine *sm) | ||
867 | { | ||
868 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
869 | struct buffer_head *bh = NULL; | ||
870 | __be64 *top, *bottom; | ||
871 | u64 bn; | ||
872 | int error; | ||
873 | int mh_size = sizeof(struct gfs2_meta_header); | ||
874 | |||
875 | if (!height) { | ||
876 | error = gfs2_meta_inode_buffer(ip, &bh); | ||
877 | if (error) | ||
878 | return error; | ||
879 | dibh = bh; | ||
880 | |||
881 | top = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + mp->mp_list[0]; | ||
882 | bottom = (__be64 *)(bh->b_data + sizeof(struct gfs2_dinode)) + sdp->sd_diptrs; | ||
883 | } else { | ||
884 | error = gfs2_meta_indirect_buffer(ip, height, block, 0, &bh); | ||
885 | if (error) | ||
886 | return error; | ||
887 | |||
888 | top = (__be64 *)(bh->b_data + mh_size) + | ||
889 | (first ? mp->mp_list[height] : 0); | ||
890 | |||
891 | bottom = (__be64 *)(bh->b_data + mh_size) + sdp->sd_inptrs; | ||
892 | } | ||
893 | |||
894 | error = do_strip(ip, dibh, bh, top, bottom, height, sm); | ||
895 | if (error) | ||
896 | goto out; | ||
897 | |||
898 | if (height < ip->i_height - 1) { | ||
899 | |||
900 | gfs2_metapath_ra(ip->i_gl, bh, top); | ||
901 | |||
902 | for (; top < bottom; top++, first = 0) { | ||
903 | if (!*top) | ||
904 | continue; | ||
905 | |||
906 | bn = be64_to_cpu(*top); | ||
907 | |||
908 | error = recursive_scan(ip, dibh, mp, height + 1, bn, | ||
909 | first, sm); | ||
910 | if (error) | ||
911 | break; | ||
912 | } | ||
913 | } | ||
914 | out: | ||
915 | brelse(bh); | ||
916 | return error; | ||
917 | } | ||
918 | |||
919 | |||
920 | /** | ||
896 | * gfs2_block_truncate_page - Deal with zeroing out data for truncate | 921 | * gfs2_block_truncate_page - Deal with zeroing out data for truncate |
897 | * | 922 | * |
898 | * This is partly borrowed from ext3. | 923 | * This is partly borrowed from ext3. |
@@ -1031,7 +1056,7 @@ static int trunc_dealloc(struct gfs2_inode *ip, u64 size) | |||
1031 | sm.sm_first = !!size; | 1056 | sm.sm_first = !!size; |
1032 | sm.sm_height = height; | 1057 | sm.sm_height = height; |
1033 | 1058 | ||
1034 | error = recursive_scan(ip, NULL, &mp, 0, 0, 1, do_strip, &sm); | 1059 | error = recursive_scan(ip, NULL, &mp, 0, 0, 1, &sm); |
1035 | if (error) | 1060 | if (error) |
1036 | break; | 1061 | break; |
1037 | } | 1062 | } |
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 1cc2f8ec52a..8ccad2467cb 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c | |||
@@ -240,16 +240,15 @@ fail: | |||
240 | return error; | 240 | return error; |
241 | } | 241 | } |
242 | 242 | ||
243 | static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, char *buf, | 243 | static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, __be64 *buf, |
244 | u64 offset, unsigned int size) | 244 | unsigned int size) |
245 | { | 245 | { |
246 | struct buffer_head *dibh; | 246 | struct buffer_head *dibh; |
247 | int error; | 247 | int error; |
248 | 248 | ||
249 | error = gfs2_meta_inode_buffer(ip, &dibh); | 249 | error = gfs2_meta_inode_buffer(ip, &dibh); |
250 | if (!error) { | 250 | if (!error) { |
251 | offset += sizeof(struct gfs2_dinode); | 251 | memcpy(buf, dibh->b_data + sizeof(struct gfs2_dinode), size); |
252 | memcpy(buf, dibh->b_data + offset, size); | ||
253 | brelse(dibh); | 252 | brelse(dibh); |
254 | } | 253 | } |
255 | 254 | ||
@@ -261,13 +260,12 @@ static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, char *buf, | |||
261 | * gfs2_dir_read_data - Read a data from a directory inode | 260 | * gfs2_dir_read_data - Read a data from a directory inode |
262 | * @ip: The GFS2 Inode | 261 | * @ip: The GFS2 Inode |
263 | * @buf: The buffer to place result into | 262 | * @buf: The buffer to place result into |
264 | * @offset: File offset to begin jdata_readng from | ||
265 | * @size: Amount of data to transfer | 263 | * @size: Amount of data to transfer |
266 | * | 264 | * |
267 | * Returns: The amount of data actually copied or the error | 265 | * Returns: The amount of data actually copied or the error |
268 | */ | 266 | */ |
269 | static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset, | 267 | static int gfs2_dir_read_data(struct gfs2_inode *ip, __be64 *buf, |
270 | unsigned int size, unsigned ra) | 268 | unsigned int size) |
271 | { | 269 | { |
272 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 270 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
273 | u64 lblock, dblock; | 271 | u64 lblock, dblock; |
@@ -275,24 +273,14 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset, | |||
275 | unsigned int o; | 273 | unsigned int o; |
276 | int copied = 0; | 274 | int copied = 0; |
277 | int error = 0; | 275 | int error = 0; |
278 | u64 disksize = i_size_read(&ip->i_inode); | ||
279 | |||
280 | if (offset >= disksize) | ||
281 | return 0; | ||
282 | |||
283 | if (offset + size > disksize) | ||
284 | size = disksize - offset; | ||
285 | |||
286 | if (!size) | ||
287 | return 0; | ||
288 | 276 | ||
289 | if (gfs2_is_stuffed(ip)) | 277 | if (gfs2_is_stuffed(ip)) |
290 | return gfs2_dir_read_stuffed(ip, buf, offset, size); | 278 | return gfs2_dir_read_stuffed(ip, buf, size); |
291 | 279 | ||
292 | if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) | 280 | if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip))) |
293 | return -EINVAL; | 281 | return -EINVAL; |
294 | 282 | ||
295 | lblock = offset; | 283 | lblock = 0; |
296 | o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); | 284 | o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header); |
297 | 285 | ||
298 | while (copied < size) { | 286 | while (copied < size) { |
@@ -311,8 +299,6 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset, | |||
311 | if (error || !dblock) | 299 | if (error || !dblock) |
312 | goto fail; | 300 | goto fail; |
313 | BUG_ON(extlen < 1); | 301 | BUG_ON(extlen < 1); |
314 | if (!ra) | ||
315 | extlen = 1; | ||
316 | bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); | 302 | bh = gfs2_meta_ra(ip->i_gl, dblock, extlen); |
317 | } else { | 303 | } else { |
318 | error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, &bh); | 304 | error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, &bh); |
@@ -328,7 +314,7 @@ static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset, | |||
328 | extlen--; | 314 | extlen--; |
329 | memcpy(buf, bh->b_data + o, amount); | 315 | memcpy(buf, bh->b_data + o, amount); |
330 | brelse(bh); | 316 | brelse(bh); |
331 | buf += amount; | 317 | buf += (amount/sizeof(__be64)); |
332 | copied += amount; | 318 | copied += amount; |
333 | lblock++; | 319 | lblock++; |
334 | o = sizeof(struct gfs2_meta_header); | 320 | o = sizeof(struct gfs2_meta_header); |
@@ -371,7 +357,7 @@ static __be64 *gfs2_dir_get_hash_table(struct gfs2_inode *ip) | |||
371 | if (hc == NULL) | 357 | if (hc == NULL) |
372 | return ERR_PTR(-ENOMEM); | 358 | return ERR_PTR(-ENOMEM); |
373 | 359 | ||
374 | ret = gfs2_dir_read_data(ip, (char *)hc, 0, hsize, 1); | 360 | ret = gfs2_dir_read_data(ip, hc, hsize); |
375 | if (ret < 0) { | 361 | if (ret < 0) { |
376 | kfree(hc); | 362 | kfree(hc); |
377 | return ERR_PTR(ret); | 363 | return ERR_PTR(ret); |
@@ -1695,7 +1681,6 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry) | |||
1695 | const struct qstr *name = &dentry->d_name; | 1681 | const struct qstr *name = &dentry->d_name; |
1696 | struct gfs2_dirent *dent, *prev = NULL; | 1682 | struct gfs2_dirent *dent, *prev = NULL; |
1697 | struct buffer_head *bh; | 1683 | struct buffer_head *bh; |
1698 | int error; | ||
1699 | 1684 | ||
1700 | /* Returns _either_ the entry (if its first in block) or the | 1685 | /* Returns _either_ the entry (if its first in block) or the |
1701 | previous entry otherwise */ | 1686 | previous entry otherwise */ |
@@ -1724,22 +1709,15 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry) | |||
1724 | } | 1709 | } |
1725 | brelse(bh); | 1710 | brelse(bh); |
1726 | 1711 | ||
1727 | error = gfs2_meta_inode_buffer(dip, &bh); | ||
1728 | if (error) | ||
1729 | return error; | ||
1730 | |||
1731 | if (!dip->i_entries) | 1712 | if (!dip->i_entries) |
1732 | gfs2_consist_inode(dip); | 1713 | gfs2_consist_inode(dip); |
1733 | gfs2_trans_add_bh(dip->i_gl, bh, 1); | ||
1734 | dip->i_entries--; | 1714 | dip->i_entries--; |
1735 | dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; | 1715 | dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; |
1736 | if (S_ISDIR(dentry->d_inode->i_mode)) | 1716 | if (S_ISDIR(dentry->d_inode->i_mode)) |
1737 | drop_nlink(&dip->i_inode); | 1717 | drop_nlink(&dip->i_inode); |
1738 | gfs2_dinode_out(dip, bh->b_data); | ||
1739 | brelse(bh); | ||
1740 | mark_inode_dirty(&dip->i_inode); | 1718 | mark_inode_dirty(&dip->i_inode); |
1741 | 1719 | ||
1742 | return error; | 1720 | return 0; |
1743 | } | 1721 | } |
1744 | 1722 | ||
1745 | /** | 1723 | /** |
@@ -1829,10 +1807,6 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, | |||
1829 | if (error) | 1807 | if (error) |
1830 | goto out_put; | 1808 | goto out_put; |
1831 | 1809 | ||
1832 | error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh); | ||
1833 | if (error) | ||
1834 | goto out_qs; | ||
1835 | |||
1836 | /* Count the number of leaves */ | 1810 | /* Count the number of leaves */ |
1837 | bh = leaf_bh; | 1811 | bh = leaf_bh; |
1838 | 1812 | ||
@@ -1847,7 +1821,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, | |||
1847 | if (blk != leaf_no) | 1821 | if (blk != leaf_no) |
1848 | brelse(bh); | 1822 | brelse(bh); |
1849 | 1823 | ||
1850 | gfs2_rlist_add(sdp, &rlist, blk); | 1824 | gfs2_rlist_add(dip, &rlist, blk); |
1851 | l_blocks++; | 1825 | l_blocks++; |
1852 | } | 1826 | } |
1853 | 1827 | ||
@@ -1911,8 +1885,6 @@ out_rg_gunlock: | |||
1911 | gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); | 1885 | gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); |
1912 | out_rlist: | 1886 | out_rlist: |
1913 | gfs2_rlist_free(&rlist); | 1887 | gfs2_rlist_free(&rlist); |
1914 | gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh); | ||
1915 | out_qs: | ||
1916 | gfs2_quota_unhold(dip); | 1888 | gfs2_quota_unhold(dip); |
1917 | out_put: | 1889 | out_put: |
1918 | gfs2_alloc_put(dip); | 1890 | gfs2_alloc_put(dip); |
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index edeb9e80290..5002408dabe 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c | |||
@@ -59,15 +59,24 @@ static loff_t gfs2_llseek(struct file *file, loff_t offset, int origin) | |||
59 | struct gfs2_holder i_gh; | 59 | struct gfs2_holder i_gh; |
60 | loff_t error; | 60 | loff_t error; |
61 | 61 | ||
62 | if (origin == 2) { | 62 | switch (origin) { |
63 | case SEEK_END: /* These reference inode->i_size */ | ||
64 | case SEEK_DATA: | ||
65 | case SEEK_HOLE: | ||
63 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, | 66 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, |
64 | &i_gh); | 67 | &i_gh); |
65 | if (!error) { | 68 | if (!error) { |
66 | error = generic_file_llseek_unlocked(file, offset, origin); | 69 | error = generic_file_llseek_unlocked(file, offset, origin); |
67 | gfs2_glock_dq_uninit(&i_gh); | 70 | gfs2_glock_dq_uninit(&i_gh); |
68 | } | 71 | } |
69 | } else | 72 | break; |
73 | case SEEK_CUR: | ||
74 | case SEEK_SET: | ||
70 | error = generic_file_llseek_unlocked(file, offset, origin); | 75 | error = generic_file_llseek_unlocked(file, offset, origin); |
76 | break; | ||
77 | default: | ||
78 | error = -EINVAL; | ||
79 | } | ||
71 | 80 | ||
72 | return error; | 81 | return error; |
73 | } | 82 | } |
@@ -357,8 +366,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
357 | unsigned int data_blocks, ind_blocks, rblocks; | 366 | unsigned int data_blocks, ind_blocks, rblocks; |
358 | struct gfs2_holder gh; | 367 | struct gfs2_holder gh; |
359 | struct gfs2_alloc *al; | 368 | struct gfs2_alloc *al; |
369 | loff_t size; | ||
360 | int ret; | 370 | int ret; |
361 | 371 | ||
372 | /* Wait if fs is frozen. This is racy so we check again later on | ||
373 | * and retry if the fs has been frozen after the page lock has | ||
374 | * been acquired | ||
375 | */ | ||
376 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | ||
377 | |||
362 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); | 378 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); |
363 | ret = gfs2_glock_nq(&gh); | 379 | ret = gfs2_glock_nq(&gh); |
364 | if (ret) | 380 | if (ret) |
@@ -367,8 +383,15 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
367 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); | 383 | set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); |
368 | set_bit(GIF_SW_PAGED, &ip->i_flags); | 384 | set_bit(GIF_SW_PAGED, &ip->i_flags); |
369 | 385 | ||
370 | if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) | 386 | if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { |
387 | lock_page(page); | ||
388 | if (!PageUptodate(page) || page->mapping != inode->i_mapping) { | ||
389 | ret = -EAGAIN; | ||
390 | unlock_page(page); | ||
391 | } | ||
371 | goto out_unlock; | 392 | goto out_unlock; |
393 | } | ||
394 | |||
372 | ret = -ENOMEM; | 395 | ret = -ENOMEM; |
373 | al = gfs2_alloc_get(ip); | 396 | al = gfs2_alloc_get(ip); |
374 | if (al == NULL) | 397 | if (al == NULL) |
@@ -388,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
388 | rblocks += data_blocks ? data_blocks : 1; | 411 | rblocks += data_blocks ? data_blocks : 1; |
389 | if (ind_blocks || data_blocks) { | 412 | if (ind_blocks || data_blocks) { |
390 | rblocks += RES_STATFS + RES_QUOTA; | 413 | rblocks += RES_STATFS + RES_QUOTA; |
391 | rblocks += gfs2_rg_blocks(al); | 414 | rblocks += gfs2_rg_blocks(ip); |
392 | } | 415 | } |
393 | ret = gfs2_trans_begin(sdp, rblocks, 0); | 416 | ret = gfs2_trans_begin(sdp, rblocks, 0); |
394 | if (ret) | 417 | if (ret) |
@@ -396,21 +419,29 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
396 | 419 | ||
397 | lock_page(page); | 420 | lock_page(page); |
398 | ret = -EINVAL; | 421 | ret = -EINVAL; |
399 | last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT; | 422 | size = i_size_read(inode); |
400 | if (page->index > last_index) | 423 | last_index = (size - 1) >> PAGE_CACHE_SHIFT; |
401 | goto out_unlock_page; | 424 | /* Check page index against inode size */ |
425 | if (size == 0 || (page->index > last_index)) | ||
426 | goto out_trans_end; | ||
427 | |||
428 | ret = -EAGAIN; | ||
429 | /* If truncated, we must retry the operation, we may have raced | ||
430 | * with the glock demotion code. | ||
431 | */ | ||
432 | if (!PageUptodate(page) || page->mapping != inode->i_mapping) | ||
433 | goto out_trans_end; | ||
434 | |||
435 | /* Unstuff, if required, and allocate backing blocks for page */ | ||
402 | ret = 0; | 436 | ret = 0; |
403 | if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping) | 437 | if (gfs2_is_stuffed(ip)) |
404 | goto out_unlock_page; | ||
405 | if (gfs2_is_stuffed(ip)) { | ||
406 | ret = gfs2_unstuff_dinode(ip, page); | 438 | ret = gfs2_unstuff_dinode(ip, page); |
407 | if (ret) | 439 | if (ret == 0) |
408 | goto out_unlock_page; | 440 | ret = gfs2_allocate_page_backing(page); |
409 | } | ||
410 | ret = gfs2_allocate_page_backing(page); | ||
411 | 441 | ||
412 | out_unlock_page: | 442 | out_trans_end: |
413 | unlock_page(page); | 443 | if (ret) |
444 | unlock_page(page); | ||
414 | gfs2_trans_end(sdp); | 445 | gfs2_trans_end(sdp); |
415 | out_trans_fail: | 446 | out_trans_fail: |
416 | gfs2_inplace_release(ip); | 447 | gfs2_inplace_release(ip); |
@@ -422,11 +453,17 @@ out_unlock: | |||
422 | gfs2_glock_dq(&gh); | 453 | gfs2_glock_dq(&gh); |
423 | out: | 454 | out: |
424 | gfs2_holder_uninit(&gh); | 455 | gfs2_holder_uninit(&gh); |
425 | if (ret == -ENOMEM) | 456 | if (ret == 0) { |
426 | ret = VM_FAULT_OOM; | 457 | set_page_dirty(page); |
427 | else if (ret) | 458 | /* This check must be post dropping of transaction lock */ |
428 | ret = VM_FAULT_SIGBUS; | 459 | if (inode->i_sb->s_frozen == SB_UNFROZEN) { |
429 | return ret; | 460 | wait_on_page_writeback(page); |
461 | } else { | ||
462 | ret = -EAGAIN; | ||
463 | unlock_page(page); | ||
464 | } | ||
465 | } | ||
466 | return block_page_mkwrite_return(ret); | ||
430 | } | 467 | } |
431 | 468 | ||
432 | static const struct vm_operations_struct gfs2_vm_ops = { | 469 | static const struct vm_operations_struct gfs2_vm_ops = { |
@@ -551,8 +588,16 @@ static int gfs2_close(struct inode *inode, struct file *file) | |||
551 | * @end: the end position in the file to sync | 588 | * @end: the end position in the file to sync |
552 | * @datasync: set if we can ignore timestamp changes | 589 | * @datasync: set if we can ignore timestamp changes |
553 | * | 590 | * |
554 | * The VFS will flush data for us. We only need to worry | 591 | * We split the data flushing here so that we don't wait for the data |
555 | * about metadata here. | 592 | * until after we've also sent the metadata to disk. Note that for |
593 | * data=ordered, we will write & wait for the data at the log flush | ||
594 | * stage anyway, so this is unlikely to make much of a difference | ||
595 | * except in the data=writeback case. | ||
596 | * | ||
597 | * If the fdatawrite fails due to any reason except -EIO, we will | ||
598 | * continue the remainder of the fsync, although we'll still report | ||
599 | * the error at the end. This is to match filemap_write_and_wait_range() | ||
600 | * behaviour. | ||
556 | * | 601 | * |
557 | * Returns: errno | 602 | * Returns: errno |
558 | */ | 603 | */ |
@@ -560,30 +605,34 @@ static int gfs2_close(struct inode *inode, struct file *file) | |||
560 | static int gfs2_fsync(struct file *file, loff_t start, loff_t end, | 605 | static int gfs2_fsync(struct file *file, loff_t start, loff_t end, |
561 | int datasync) | 606 | int datasync) |
562 | { | 607 | { |
563 | struct inode *inode = file->f_mapping->host; | 608 | struct address_space *mapping = file->f_mapping; |
609 | struct inode *inode = mapping->host; | ||
564 | int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); | 610 | int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC); |
565 | struct gfs2_inode *ip = GFS2_I(inode); | 611 | struct gfs2_inode *ip = GFS2_I(inode); |
566 | int ret; | 612 | int ret, ret1 = 0; |
567 | 613 | ||
568 | ret = filemap_write_and_wait_range(inode->i_mapping, start, end); | 614 | if (mapping->nrpages) { |
569 | if (ret) | 615 | ret1 = filemap_fdatawrite_range(mapping, start, end); |
570 | return ret; | 616 | if (ret1 == -EIO) |
571 | mutex_lock(&inode->i_mutex); | 617 | return ret1; |
618 | } | ||
572 | 619 | ||
573 | if (datasync) | 620 | if (datasync) |
574 | sync_state &= ~I_DIRTY_SYNC; | 621 | sync_state &= ~I_DIRTY_SYNC; |
575 | 622 | ||
576 | if (sync_state) { | 623 | if (sync_state) { |
577 | ret = sync_inode_metadata(inode, 1); | 624 | ret = sync_inode_metadata(inode, 1); |
578 | if (ret) { | 625 | if (ret) |
579 | mutex_unlock(&inode->i_mutex); | ||
580 | return ret; | 626 | return ret; |
581 | } | 627 | if (gfs2_is_jdata(ip)) |
582 | gfs2_ail_flush(ip->i_gl); | 628 | filemap_write_and_wait(mapping); |
629 | gfs2_ail_flush(ip->i_gl, 1); | ||
583 | } | 630 | } |
584 | 631 | ||
585 | mutex_unlock(&inode->i_mutex); | 632 | if (mapping->nrpages) |
586 | return 0; | 633 | ret = filemap_fdatawait_range(mapping, start, end); |
634 | |||
635 | return ret ? ret : ret1; | ||
587 | } | 636 | } |
588 | 637 | ||
589 | /** | 638 | /** |
@@ -620,135 +669,18 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
620 | return generic_file_aio_write(iocb, iov, nr_segs, pos); | 669 | return generic_file_aio_write(iocb, iov, nr_segs, pos); |
621 | } | 670 | } |
622 | 671 | ||
623 | static int empty_write_end(struct page *page, unsigned from, | ||
624 | unsigned to, int mode) | ||
625 | { | ||
626 | struct inode *inode = page->mapping->host; | ||
627 | struct gfs2_inode *ip = GFS2_I(inode); | ||
628 | struct buffer_head *bh; | ||
629 | unsigned offset, blksize = 1 << inode->i_blkbits; | ||
630 | pgoff_t end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; | ||
631 | |||
632 | zero_user(page, from, to-from); | ||
633 | mark_page_accessed(page); | ||
634 | |||
635 | if (page->index < end_index || !(mode & FALLOC_FL_KEEP_SIZE)) { | ||
636 | if (!gfs2_is_writeback(ip)) | ||
637 | gfs2_page_add_databufs(ip, page, from, to); | ||
638 | |||
639 | block_commit_write(page, from, to); | ||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | offset = 0; | ||
644 | bh = page_buffers(page); | ||
645 | while (offset < to) { | ||
646 | if (offset >= from) { | ||
647 | set_buffer_uptodate(bh); | ||
648 | mark_buffer_dirty(bh); | ||
649 | clear_buffer_new(bh); | ||
650 | write_dirty_buffer(bh, WRITE); | ||
651 | } | ||
652 | offset += blksize; | ||
653 | bh = bh->b_this_page; | ||
654 | } | ||
655 | |||
656 | offset = 0; | ||
657 | bh = page_buffers(page); | ||
658 | while (offset < to) { | ||
659 | if (offset >= from) { | ||
660 | wait_on_buffer(bh); | ||
661 | if (!buffer_uptodate(bh)) | ||
662 | return -EIO; | ||
663 | } | ||
664 | offset += blksize; | ||
665 | bh = bh->b_this_page; | ||
666 | } | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | static int needs_empty_write(sector_t block, struct inode *inode) | ||
671 | { | ||
672 | int error; | ||
673 | struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; | ||
674 | |||
675 | bh_map.b_size = 1 << inode->i_blkbits; | ||
676 | error = gfs2_block_map(inode, block, &bh_map, 0); | ||
677 | if (unlikely(error)) | ||
678 | return error; | ||
679 | return !buffer_mapped(&bh_map); | ||
680 | } | ||
681 | |||
682 | static int write_empty_blocks(struct page *page, unsigned from, unsigned to, | ||
683 | int mode) | ||
684 | { | ||
685 | struct inode *inode = page->mapping->host; | ||
686 | unsigned start, end, next, blksize; | ||
687 | sector_t block = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
688 | int ret; | ||
689 | |||
690 | blksize = 1 << inode->i_blkbits; | ||
691 | next = end = 0; | ||
692 | while (next < from) { | ||
693 | next += blksize; | ||
694 | block++; | ||
695 | } | ||
696 | start = next; | ||
697 | do { | ||
698 | next += blksize; | ||
699 | ret = needs_empty_write(block, inode); | ||
700 | if (unlikely(ret < 0)) | ||
701 | return ret; | ||
702 | if (ret == 0) { | ||
703 | if (end) { | ||
704 | ret = __block_write_begin(page, start, end - start, | ||
705 | gfs2_block_map); | ||
706 | if (unlikely(ret)) | ||
707 | return ret; | ||
708 | ret = empty_write_end(page, start, end, mode); | ||
709 | if (unlikely(ret)) | ||
710 | return ret; | ||
711 | end = 0; | ||
712 | } | ||
713 | start = next; | ||
714 | } | ||
715 | else | ||
716 | end = next; | ||
717 | block++; | ||
718 | } while (next < to); | ||
719 | |||
720 | if (end) { | ||
721 | ret = __block_write_begin(page, start, end - start, gfs2_block_map); | ||
722 | if (unlikely(ret)) | ||
723 | return ret; | ||
724 | ret = empty_write_end(page, start, end, mode); | ||
725 | if (unlikely(ret)) | ||
726 | return ret; | ||
727 | } | ||
728 | |||
729 | return 0; | ||
730 | } | ||
731 | |||
732 | static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, | 672 | static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, |
733 | int mode) | 673 | int mode) |
734 | { | 674 | { |
735 | struct gfs2_inode *ip = GFS2_I(inode); | 675 | struct gfs2_inode *ip = GFS2_I(inode); |
736 | struct buffer_head *dibh; | 676 | struct buffer_head *dibh; |
737 | int error; | 677 | int error; |
738 | u64 start = offset >> PAGE_CACHE_SHIFT; | 678 | unsigned int nr_blks; |
739 | unsigned int start_offset = offset & ~PAGE_CACHE_MASK; | 679 | sector_t lblock = offset >> inode->i_blkbits; |
740 | u64 end = (offset + len - 1) >> PAGE_CACHE_SHIFT; | ||
741 | pgoff_t curr; | ||
742 | struct page *page; | ||
743 | unsigned int end_offset = (offset + len) & ~PAGE_CACHE_MASK; | ||
744 | unsigned int from, to; | ||
745 | |||
746 | if (!end_offset) | ||
747 | end_offset = PAGE_CACHE_SIZE; | ||
748 | 680 | ||
749 | error = gfs2_meta_inode_buffer(ip, &dibh); | 681 | error = gfs2_meta_inode_buffer(ip, &dibh); |
750 | if (unlikely(error)) | 682 | if (unlikely(error)) |
751 | goto out; | 683 | return error; |
752 | 684 | ||
753 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 685 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
754 | 686 | ||
@@ -758,40 +690,31 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len, | |||
758 | goto out; | 690 | goto out; |
759 | } | 691 | } |
760 | 692 | ||
761 | curr = start; | 693 | while (len) { |
762 | offset = start << PAGE_CACHE_SHIFT; | 694 | struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 }; |
763 | from = start_offset; | 695 | bh_map.b_size = len; |
764 | to = PAGE_CACHE_SIZE; | 696 | set_buffer_zeronew(&bh_map); |
765 | while (curr <= end) { | ||
766 | page = grab_cache_page_write_begin(inode->i_mapping, curr, | ||
767 | AOP_FLAG_NOFS); | ||
768 | if (unlikely(!page)) { | ||
769 | error = -ENOMEM; | ||
770 | goto out; | ||
771 | } | ||
772 | 697 | ||
773 | if (curr == end) | 698 | error = gfs2_block_map(inode, lblock, &bh_map, 1); |
774 | to = end_offset; | 699 | if (unlikely(error)) |
775 | error = write_empty_blocks(page, from, to, mode); | ||
776 | if (!error && offset + to > inode->i_size && | ||
777 | !(mode & FALLOC_FL_KEEP_SIZE)) { | ||
778 | i_size_write(inode, offset + to); | ||
779 | } | ||
780 | unlock_page(page); | ||
781 | page_cache_release(page); | ||
782 | if (error) | ||
783 | goto out; | 700 | goto out; |
784 | curr++; | 701 | len -= bh_map.b_size; |
785 | offset += PAGE_CACHE_SIZE; | 702 | nr_blks = bh_map.b_size >> inode->i_blkbits; |
786 | from = 0; | 703 | lblock += nr_blks; |
704 | if (!buffer_new(&bh_map)) | ||
705 | continue; | ||
706 | if (unlikely(!buffer_zeronew(&bh_map))) { | ||
707 | error = -EIO; | ||
708 | goto out; | ||
709 | } | ||
787 | } | 710 | } |
711 | if (offset + len > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE)) | ||
712 | i_size_write(inode, offset + len); | ||
788 | 713 | ||
789 | gfs2_dinode_out(ip, dibh->b_data); | ||
790 | mark_inode_dirty(inode); | 714 | mark_inode_dirty(inode); |
791 | 715 | ||
792 | brelse(dibh); | ||
793 | |||
794 | out: | 716 | out: |
717 | brelse(dibh); | ||
795 | return error; | 718 | return error; |
796 | } | 719 | } |
797 | 720 | ||
@@ -799,7 +722,7 @@ static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, | |||
799 | unsigned int *data_blocks, unsigned int *ind_blocks) | 722 | unsigned int *data_blocks, unsigned int *ind_blocks) |
800 | { | 723 | { |
801 | const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 724 | const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
802 | unsigned int max_blocks = ip->i_alloc->al_rgd->rd_free_clone; | 725 | unsigned int max_blocks = ip->i_rgd->rd_free_clone; |
803 | unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); | 726 | unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); |
804 | 727 | ||
805 | for (tmp = max_data; tmp > sdp->sd_diptrs;) { | 728 | for (tmp = max_data; tmp > sdp->sd_diptrs;) { |
@@ -831,6 +754,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, | |||
831 | int error; | 754 | int error; |
832 | loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); | 755 | loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); |
833 | loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; | 756 | loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; |
757 | loff_t max_chunk_size = UINT_MAX & bsize_mask; | ||
834 | next = (next + 1) << sdp->sd_sb.sb_bsize_shift; | 758 | next = (next + 1) << sdp->sd_sb.sb_bsize_shift; |
835 | 759 | ||
836 | /* We only support the FALLOC_FL_KEEP_SIZE mode */ | 760 | /* We only support the FALLOC_FL_KEEP_SIZE mode */ |
@@ -884,11 +808,12 @@ retry: | |||
884 | goto out_qunlock; | 808 | goto out_qunlock; |
885 | } | 809 | } |
886 | max_bytes = bytes; | 810 | max_bytes = bytes; |
887 | calc_max_reserv(ip, len, &max_bytes, &data_blocks, &ind_blocks); | 811 | calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len, |
812 | &max_bytes, &data_blocks, &ind_blocks); | ||
888 | al->al_requested = data_blocks + ind_blocks; | 813 | al->al_requested = data_blocks + ind_blocks; |
889 | 814 | ||
890 | rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + | 815 | rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + |
891 | RES_RG_HDR + gfs2_rg_blocks(al); | 816 | RES_RG_HDR + gfs2_rg_blocks(ip); |
892 | if (gfs2_is_jdata(ip)) | 817 | if (gfs2_is_jdata(ip)) |
893 | rblocks += data_blocks ? data_blocks : 1; | 818 | rblocks += data_blocks ? data_blocks : 1; |
894 | 819 | ||
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index da21ecaafcc..78418b4fa85 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -28,40 +28,55 @@ | |||
28 | #include "trans.h" | 28 | #include "trans.h" |
29 | #include "dir.h" | 29 | #include "dir.h" |
30 | 30 | ||
31 | static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) | ||
32 | { | ||
33 | fs_err(gl->gl_sbd, "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page state 0x%lx\n", | ||
34 | bh, (unsigned long long)bh->b_blocknr, bh->b_state, | ||
35 | bh->b_page->mapping, bh->b_page->flags); | ||
36 | fs_err(gl->gl_sbd, "AIL glock %u:%llu mapping %p\n", | ||
37 | gl->gl_name.ln_type, gl->gl_name.ln_number, | ||
38 | gfs2_glock2aspace(gl)); | ||
39 | gfs2_lm_withdraw(gl->gl_sbd, "AIL error\n"); | ||
40 | } | ||
41 | |||
31 | /** | 42 | /** |
32 | * __gfs2_ail_flush - remove all buffers for a given lock from the AIL | 43 | * __gfs2_ail_flush - remove all buffers for a given lock from the AIL |
33 | * @gl: the glock | 44 | * @gl: the glock |
45 | * @fsync: set when called from fsync (not all buffers will be clean) | ||
34 | * | 46 | * |
35 | * None of the buffers should be dirty, locked, or pinned. | 47 | * None of the buffers should be dirty, locked, or pinned. |
36 | */ | 48 | */ |
37 | 49 | ||
38 | static void __gfs2_ail_flush(struct gfs2_glock *gl) | 50 | static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) |
39 | { | 51 | { |
40 | struct gfs2_sbd *sdp = gl->gl_sbd; | 52 | struct gfs2_sbd *sdp = gl->gl_sbd; |
41 | struct list_head *head = &gl->gl_ail_list; | 53 | struct list_head *head = &gl->gl_ail_list; |
42 | struct gfs2_bufdata *bd; | 54 | struct gfs2_bufdata *bd, *tmp; |
43 | struct buffer_head *bh; | 55 | struct buffer_head *bh; |
56 | const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock); | ||
57 | sector_t blocknr; | ||
44 | 58 | ||
59 | gfs2_log_lock(sdp); | ||
45 | spin_lock(&sdp->sd_ail_lock); | 60 | spin_lock(&sdp->sd_ail_lock); |
46 | while (!list_empty(head)) { | 61 | list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { |
47 | bd = list_entry(head->next, struct gfs2_bufdata, | ||
48 | bd_ail_gl_list); | ||
49 | bh = bd->bd_bh; | 62 | bh = bd->bd_bh; |
50 | gfs2_remove_from_ail(bd); | 63 | if (bh->b_state & b_state) { |
51 | bd->bd_bh = NULL; | 64 | if (fsync) |
65 | continue; | ||
66 | gfs2_ail_error(gl, bh); | ||
67 | } | ||
68 | blocknr = bh->b_blocknr; | ||
52 | bh->b_private = NULL; | 69 | bh->b_private = NULL; |
53 | spin_unlock(&sdp->sd_ail_lock); | 70 | gfs2_remove_from_ail(bd); /* drops ref on bh */ |
54 | 71 | ||
55 | bd->bd_blkno = bh->b_blocknr; | 72 | bd->bd_bh = NULL; |
56 | gfs2_log_lock(sdp); | 73 | bd->bd_blkno = blocknr; |
57 | gfs2_assert_withdraw(sdp, !buffer_busy(bh)); | ||
58 | gfs2_trans_add_revoke(sdp, bd); | ||
59 | gfs2_log_unlock(sdp); | ||
60 | 74 | ||
61 | spin_lock(&sdp->sd_ail_lock); | 75 | gfs2_trans_add_revoke(sdp, bd); |
62 | } | 76 | } |
63 | gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count)); | 77 | BUG_ON(!fsync && atomic_read(&gl->gl_ail_count)); |
64 | spin_unlock(&sdp->sd_ail_lock); | 78 | spin_unlock(&sdp->sd_ail_lock); |
79 | gfs2_log_unlock(sdp); | ||
65 | } | 80 | } |
66 | 81 | ||
67 | 82 | ||
@@ -84,13 +99,13 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) | |||
84 | BUG_ON(current->journal_info); | 99 | BUG_ON(current->journal_info); |
85 | current->journal_info = &tr; | 100 | current->journal_info = &tr; |
86 | 101 | ||
87 | __gfs2_ail_flush(gl); | 102 | __gfs2_ail_flush(gl, 0); |
88 | 103 | ||
89 | gfs2_trans_end(sdp); | 104 | gfs2_trans_end(sdp); |
90 | gfs2_log_flush(sdp, NULL); | 105 | gfs2_log_flush(sdp, NULL); |
91 | } | 106 | } |
92 | 107 | ||
93 | void gfs2_ail_flush(struct gfs2_glock *gl) | 108 | void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) |
94 | { | 109 | { |
95 | struct gfs2_sbd *sdp = gl->gl_sbd; | 110 | struct gfs2_sbd *sdp = gl->gl_sbd; |
96 | unsigned int revokes = atomic_read(&gl->gl_ail_count); | 111 | unsigned int revokes = atomic_read(&gl->gl_ail_count); |
@@ -102,7 +117,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl) | |||
102 | ret = gfs2_trans_begin(sdp, 0, revokes); | 117 | ret = gfs2_trans_begin(sdp, 0, revokes); |
103 | if (ret) | 118 | if (ret) |
104 | return; | 119 | return; |
105 | __gfs2_ail_flush(gl); | 120 | __gfs2_ail_flush(gl, fsync); |
106 | gfs2_trans_end(sdp); | 121 | gfs2_trans_end(sdp); |
107 | gfs2_log_flush(sdp, NULL); | 122 | gfs2_log_flush(sdp, NULL); |
108 | } | 123 | } |
@@ -119,6 +134,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl) | |||
119 | static void rgrp_go_sync(struct gfs2_glock *gl) | 134 | static void rgrp_go_sync(struct gfs2_glock *gl) |
120 | { | 135 | { |
121 | struct address_space *metamapping = gfs2_glock2aspace(gl); | 136 | struct address_space *metamapping = gfs2_glock2aspace(gl); |
137 | struct gfs2_rgrpd *rgd; | ||
122 | int error; | 138 | int error; |
123 | 139 | ||
124 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) | 140 | if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) |
@@ -130,6 +146,12 @@ static void rgrp_go_sync(struct gfs2_glock *gl) | |||
130 | error = filemap_fdatawait(metamapping); | 146 | error = filemap_fdatawait(metamapping); |
131 | mapping_set_error(metamapping, error); | 147 | mapping_set_error(metamapping, error); |
132 | gfs2_ail_empty_gl(gl); | 148 | gfs2_ail_empty_gl(gl); |
149 | |||
150 | spin_lock(&gl->gl_spin); | ||
151 | rgd = gl->gl_object; | ||
152 | if (rgd) | ||
153 | gfs2_free_clones(rgd); | ||
154 | spin_unlock(&gl->gl_spin); | ||
133 | } | 155 | } |
134 | 156 | ||
135 | /** | 157 | /** |
@@ -430,33 +452,6 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) | |||
430 | } | 452 | } |
431 | 453 | ||
432 | /** | 454 | /** |
433 | * rgrp_go_lock - operation done after an rgrp lock is locked by | ||
434 | * a first holder on this node. | ||
435 | * @gl: the glock | ||
436 | * @flags: | ||
437 | * | ||
438 | * Returns: errno | ||
439 | */ | ||
440 | |||
441 | static int rgrp_go_lock(struct gfs2_holder *gh) | ||
442 | { | ||
443 | return gfs2_rgrp_bh_get(gh->gh_gl->gl_object); | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * rgrp_go_unlock - operation done before an rgrp lock is unlocked by | ||
448 | * a last holder on this node. | ||
449 | * @gl: the glock | ||
450 | * @flags: | ||
451 | * | ||
452 | */ | ||
453 | |||
454 | static void rgrp_go_unlock(struct gfs2_holder *gh) | ||
455 | { | ||
456 | gfs2_rgrp_bh_put(gh->gh_gl->gl_object); | ||
457 | } | ||
458 | |||
459 | /** | ||
460 | * trans_go_sync - promote/demote the transaction glock | 455 | * trans_go_sync - promote/demote the transaction glock |
461 | * @gl: the glock | 456 | * @gl: the glock |
462 | * @state: the requested state | 457 | * @state: the requested state |
@@ -558,8 +553,8 @@ const struct gfs2_glock_operations gfs2_inode_glops = { | |||
558 | const struct gfs2_glock_operations gfs2_rgrp_glops = { | 553 | const struct gfs2_glock_operations gfs2_rgrp_glops = { |
559 | .go_xmote_th = rgrp_go_sync, | 554 | .go_xmote_th = rgrp_go_sync, |
560 | .go_inval = rgrp_go_inval, | 555 | .go_inval = rgrp_go_inval, |
561 | .go_lock = rgrp_go_lock, | 556 | .go_lock = gfs2_rgrp_go_lock, |
562 | .go_unlock = rgrp_go_unlock, | 557 | .go_unlock = gfs2_rgrp_go_unlock, |
563 | .go_dump = gfs2_rgrp_dump, | 558 | .go_dump = gfs2_rgrp_dump, |
564 | .go_type = LM_TYPE_RGRP, | 559 | .go_type = LM_TYPE_RGRP, |
565 | .go_flags = GLOF_ASPACE, | 560 | .go_flags = GLOF_ASPACE, |
diff --git a/fs/gfs2/glops.h b/fs/gfs2/glops.h index 6fce409b5a5..bf95a2dc166 100644 --- a/fs/gfs2/glops.h +++ b/fs/gfs2/glops.h | |||
@@ -23,6 +23,6 @@ extern const struct gfs2_glock_operations gfs2_quota_glops; | |||
23 | extern const struct gfs2_glock_operations gfs2_journal_glops; | 23 | extern const struct gfs2_glock_operations gfs2_journal_glops; |
24 | extern const struct gfs2_glock_operations *gfs2_glops_list[]; | 24 | extern const struct gfs2_glock_operations *gfs2_glops_list[]; |
25 | 25 | ||
26 | extern void gfs2_ail_flush(struct gfs2_glock *gl); | 26 | extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync); |
27 | 27 | ||
28 | #endif /* __GLOPS_DOT_H__ */ | 28 | #endif /* __GLOPS_DOT_H__ */ |
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h index 892ac37de8a..7389dfdcc9e 100644 --- a/fs/gfs2/incore.h +++ b/fs/gfs2/incore.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/rcupdate.h> | 18 | #include <linux/rcupdate.h> |
19 | #include <linux/rculist_bl.h> | 19 | #include <linux/rculist_bl.h> |
20 | #include <linux/completion.h> | 20 | #include <linux/completion.h> |
21 | #include <linux/rbtree.h> | ||
21 | 22 | ||
22 | #define DIO_WAIT 0x00000010 | 23 | #define DIO_WAIT 0x00000010 |
23 | #define DIO_METADATA 0x00000020 | 24 | #define DIO_METADATA 0x00000020 |
@@ -78,8 +79,7 @@ struct gfs2_bitmap { | |||
78 | }; | 79 | }; |
79 | 80 | ||
80 | struct gfs2_rgrpd { | 81 | struct gfs2_rgrpd { |
81 | struct list_head rd_list; /* Link with superblock */ | 82 | struct rb_node rd_node; /* Link with superblock */ |
82 | struct list_head rd_list_mru; | ||
83 | struct gfs2_glock *rd_gl; /* Glock for this rgrp */ | 83 | struct gfs2_glock *rd_gl; /* Glock for this rgrp */ |
84 | u64 rd_addr; /* grp block disk address */ | 84 | u64 rd_addr; /* grp block disk address */ |
85 | u64 rd_data0; /* first data location */ | 85 | u64 rd_data0; /* first data location */ |
@@ -91,10 +91,7 @@ struct gfs2_rgrpd { | |||
91 | u32 rd_dinodes; | 91 | u32 rd_dinodes; |
92 | u64 rd_igeneration; | 92 | u64 rd_igeneration; |
93 | struct gfs2_bitmap *rd_bits; | 93 | struct gfs2_bitmap *rd_bits; |
94 | struct mutex rd_mutex; | ||
95 | struct gfs2_log_element rd_le; | ||
96 | struct gfs2_sbd *rd_sbd; | 94 | struct gfs2_sbd *rd_sbd; |
97 | unsigned int rd_bh_count; | ||
98 | u32 rd_last_alloc; | 95 | u32 rd_last_alloc; |
99 | u32 rd_flags; | 96 | u32 rd_flags; |
100 | #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */ | 97 | #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */ |
@@ -106,12 +103,15 @@ struct gfs2_rgrpd { | |||
106 | enum gfs2_state_bits { | 103 | enum gfs2_state_bits { |
107 | BH_Pinned = BH_PrivateStart, | 104 | BH_Pinned = BH_PrivateStart, |
108 | BH_Escaped = BH_PrivateStart + 1, | 105 | BH_Escaped = BH_PrivateStart + 1, |
106 | BH_Zeronew = BH_PrivateStart + 2, | ||
109 | }; | 107 | }; |
110 | 108 | ||
111 | BUFFER_FNS(Pinned, pinned) | 109 | BUFFER_FNS(Pinned, pinned) |
112 | TAS_BUFFER_FNS(Pinned, pinned) | 110 | TAS_BUFFER_FNS(Pinned, pinned) |
113 | BUFFER_FNS(Escaped, escaped) | 111 | BUFFER_FNS(Escaped, escaped) |
114 | TAS_BUFFER_FNS(Escaped, escaped) | 112 | TAS_BUFFER_FNS(Escaped, escaped) |
113 | BUFFER_FNS(Zeronew, zeronew) | ||
114 | TAS_BUFFER_FNS(Zeronew, zeronew) | ||
115 | 115 | ||
116 | struct gfs2_bufdata { | 116 | struct gfs2_bufdata { |
117 | struct buffer_head *bd_bh; | 117 | struct buffer_head *bd_bh; |
@@ -246,7 +246,6 @@ struct gfs2_glock { | |||
246 | 246 | ||
247 | struct gfs2_alloc { | 247 | struct gfs2_alloc { |
248 | /* Quota stuff */ | 248 | /* Quota stuff */ |
249 | |||
250 | struct gfs2_quota_data *al_qd[2*MAXQUOTAS]; | 249 | struct gfs2_quota_data *al_qd[2*MAXQUOTAS]; |
251 | struct gfs2_holder al_qd_ghs[2*MAXQUOTAS]; | 250 | struct gfs2_holder al_qd_ghs[2*MAXQUOTAS]; |
252 | unsigned int al_qd_num; | 251 | unsigned int al_qd_num; |
@@ -255,18 +254,13 @@ struct gfs2_alloc { | |||
255 | u32 al_alloced; /* Filled in by gfs2_alloc_*() */ | 254 | u32 al_alloced; /* Filled in by gfs2_alloc_*() */ |
256 | 255 | ||
257 | /* Filled in by gfs2_inplace_reserve() */ | 256 | /* Filled in by gfs2_inplace_reserve() */ |
258 | |||
259 | unsigned int al_line; | ||
260 | char *al_file; | ||
261 | struct gfs2_holder al_ri_gh; | ||
262 | struct gfs2_holder al_rgd_gh; | 257 | struct gfs2_holder al_rgd_gh; |
263 | struct gfs2_rgrpd *al_rgd; | ||
264 | |||
265 | }; | 258 | }; |
266 | 259 | ||
267 | enum { | 260 | enum { |
268 | GIF_INVALID = 0, | 261 | GIF_INVALID = 0, |
269 | GIF_QD_LOCKED = 1, | 262 | GIF_QD_LOCKED = 1, |
263 | GIF_ALLOC_FAILED = 2, | ||
270 | GIF_SW_PAGED = 3, | 264 | GIF_SW_PAGED = 3, |
271 | }; | 265 | }; |
272 | 266 | ||
@@ -282,6 +276,7 @@ struct gfs2_inode { | |||
282 | struct gfs2_holder i_iopen_gh; | 276 | struct gfs2_holder i_iopen_gh; |
283 | struct gfs2_holder i_gh; /* for prepare/commit_write only */ | 277 | struct gfs2_holder i_gh; /* for prepare/commit_write only */ |
284 | struct gfs2_alloc *i_alloc; | 278 | struct gfs2_alloc *i_alloc; |
279 | struct gfs2_rgrpd *i_rgd; | ||
285 | u64 i_goal; /* goal block for allocations */ | 280 | u64 i_goal; /* goal block for allocations */ |
286 | struct rw_semaphore i_rw_mutex; | 281 | struct rw_semaphore i_rw_mutex; |
287 | struct list_head i_trunc_list; | 282 | struct list_head i_trunc_list; |
@@ -574,9 +569,7 @@ struct gfs2_sbd { | |||
574 | int sd_rindex_uptodate; | 569 | int sd_rindex_uptodate; |
575 | spinlock_t sd_rindex_spin; | 570 | spinlock_t sd_rindex_spin; |
576 | struct mutex sd_rindex_mutex; | 571 | struct mutex sd_rindex_mutex; |
577 | struct list_head sd_rindex_list; | 572 | struct rb_root sd_rindex_tree; |
578 | struct list_head sd_rindex_mru_list; | ||
579 | struct gfs2_rgrpd *sd_rindex_forward; | ||
580 | unsigned int sd_rgrps; | 573 | unsigned int sd_rgrps; |
581 | unsigned int sd_max_rg_data; | 574 | unsigned int sd_max_rg_data; |
582 | 575 | ||
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 6525b804d5e..cfd4959b218 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c | |||
@@ -583,7 +583,7 @@ static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, | |||
583 | goto fail_quota_locks; | 583 | goto fail_quota_locks; |
584 | 584 | ||
585 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + | 585 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + |
586 | al->al_rgd->rd_length + | 586 | dip->i_rgd->rd_length + |
587 | 2 * RES_DINODE + | 587 | 2 * RES_DINODE + |
588 | RES_STATFS + RES_QUOTA, 0); | 588 | RES_STATFS + RES_QUOTA, 0); |
589 | if (error) | 589 | if (error) |
@@ -613,8 +613,7 @@ fail_end_trans: | |||
613 | gfs2_trans_end(sdp); | 613 | gfs2_trans_end(sdp); |
614 | 614 | ||
615 | fail_ipreserv: | 615 | fail_ipreserv: |
616 | if (dip->i_alloc->al_rgd) | 616 | gfs2_inplace_release(dip); |
617 | gfs2_inplace_release(dip); | ||
618 | 617 | ||
619 | fail_quota_locks: | 618 | fail_quota_locks: |
620 | gfs2_quota_unlock(dip); | 619 | gfs2_quota_unlock(dip); |
@@ -661,7 +660,7 @@ static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip, | |||
661 | 660 | ||
662 | static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, | 661 | static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, |
663 | unsigned int mode, dev_t dev, const char *symname, | 662 | unsigned int mode, dev_t dev, const char *symname, |
664 | unsigned int size) | 663 | unsigned int size, int excl) |
665 | { | 664 | { |
666 | const struct qstr *name = &dentry->d_name; | 665 | const struct qstr *name = &dentry->d_name; |
667 | struct gfs2_holder ghs[2]; | 666 | struct gfs2_holder ghs[2]; |
@@ -681,6 +680,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, | |||
681 | goto fail; | 680 | goto fail; |
682 | 681 | ||
683 | error = create_ok(dip, name, mode); | 682 | error = create_ok(dip, name, mode); |
683 | if ((error == -EEXIST) && S_ISREG(mode) && !excl) { | ||
684 | inode = gfs2_lookupi(dir, &dentry->d_name, 0); | ||
685 | gfs2_glock_dq_uninit(ghs); | ||
686 | d_instantiate(dentry, inode); | ||
687 | return IS_ERR(inode) ? PTR_ERR(inode) : 0; | ||
688 | } | ||
684 | if (error) | 689 | if (error) |
685 | goto fail_gunlock; | 690 | goto fail_gunlock; |
686 | 691 | ||
@@ -723,21 +728,22 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, | |||
723 | brelse(bh); | 728 | brelse(bh); |
724 | 729 | ||
725 | gfs2_trans_end(sdp); | 730 | gfs2_trans_end(sdp); |
726 | if (dip->i_alloc->al_rgd) | 731 | gfs2_inplace_release(dip); |
727 | gfs2_inplace_release(dip); | ||
728 | gfs2_quota_unlock(dip); | 732 | gfs2_quota_unlock(dip); |
729 | gfs2_alloc_put(dip); | 733 | gfs2_alloc_put(dip); |
730 | gfs2_glock_dq_uninit_m(2, ghs); | ||
731 | mark_inode_dirty(inode); | 734 | mark_inode_dirty(inode); |
735 | gfs2_glock_dq_uninit_m(2, ghs); | ||
732 | d_instantiate(dentry, inode); | 736 | d_instantiate(dentry, inode); |
733 | return 0; | 737 | return 0; |
734 | 738 | ||
735 | fail_gunlock2: | 739 | fail_gunlock2: |
736 | gfs2_glock_dq_uninit(ghs + 1); | 740 | gfs2_glock_dq_uninit(ghs + 1); |
737 | if (inode && !IS_ERR(inode)) | ||
738 | iput(inode); | ||
739 | fail_gunlock: | 741 | fail_gunlock: |
740 | gfs2_glock_dq_uninit(ghs); | 742 | gfs2_glock_dq_uninit(ghs); |
743 | if (inode && !IS_ERR(inode)) { | ||
744 | set_bit(GIF_ALLOC_FAILED, &GFS2_I(inode)->i_flags); | ||
745 | iput(inode); | ||
746 | } | ||
741 | fail: | 747 | fail: |
742 | if (bh) | 748 | if (bh) |
743 | brelse(bh); | 749 | brelse(bh); |
@@ -756,24 +762,10 @@ fail: | |||
756 | static int gfs2_create(struct inode *dir, struct dentry *dentry, | 762 | static int gfs2_create(struct inode *dir, struct dentry *dentry, |
757 | int mode, struct nameidata *nd) | 763 | int mode, struct nameidata *nd) |
758 | { | 764 | { |
759 | struct inode *inode; | 765 | int excl = 0; |
760 | int ret; | 766 | if (nd && (nd->flags & LOOKUP_EXCL)) |
761 | 767 | excl = 1; | |
762 | for (;;) { | 768 | return gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0, excl); |
763 | ret = gfs2_create_inode(dir, dentry, S_IFREG | mode, 0, NULL, 0); | ||
764 | if (ret != -EEXIST || (nd && (nd->flags & LOOKUP_EXCL))) | ||
765 | return ret; | ||
766 | |||
767 | inode = gfs2_lookupi(dir, &dentry->d_name, 0); | ||
768 | if (inode) { | ||
769 | if (!IS_ERR(inode)) | ||
770 | break; | ||
771 | return PTR_ERR(inode); | ||
772 | } | ||
773 | } | ||
774 | |||
775 | d_instantiate(dentry, inode); | ||
776 | return 0; | ||
777 | } | 769 | } |
778 | 770 | ||
779 | /** | 771 | /** |
@@ -900,7 +892,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, | |||
900 | goto out_gunlock_q; | 892 | goto out_gunlock_q; |
901 | 893 | ||
902 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + | 894 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + |
903 | gfs2_rg_blocks(al) + | 895 | gfs2_rg_blocks(dip) + |
904 | 2 * RES_DINODE + RES_STATFS + | 896 | 2 * RES_DINODE + RES_STATFS + |
905 | RES_QUOTA, 0); | 897 | RES_QUOTA, 0); |
906 | if (error) | 898 | if (error) |
@@ -922,8 +914,9 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, | |||
922 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | 914 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); |
923 | inc_nlink(&ip->i_inode); | 915 | inc_nlink(&ip->i_inode); |
924 | ip->i_inode.i_ctime = CURRENT_TIME; | 916 | ip->i_inode.i_ctime = CURRENT_TIME; |
925 | gfs2_dinode_out(ip, dibh->b_data); | 917 | ihold(inode); |
926 | mark_inode_dirty(&ip->i_inode); | 918 | d_instantiate(dentry, inode); |
919 | mark_inode_dirty(inode); | ||
927 | 920 | ||
928 | out_brelse: | 921 | out_brelse: |
929 | brelse(dibh); | 922 | brelse(dibh); |
@@ -945,11 +938,6 @@ out_child: | |||
945 | out_parent: | 938 | out_parent: |
946 | gfs2_holder_uninit(ghs); | 939 | gfs2_holder_uninit(ghs); |
947 | gfs2_holder_uninit(ghs + 1); | 940 | gfs2_holder_uninit(ghs + 1); |
948 | if (!error) { | ||
949 | ihold(inode); | ||
950 | d_instantiate(dentry, inode); | ||
951 | mark_inode_dirty(inode); | ||
952 | } | ||
953 | return error; | 941 | return error; |
954 | } | 942 | } |
955 | 943 | ||
@@ -1022,8 +1010,6 @@ static int gfs2_unlink_inode(struct gfs2_inode *dip, | |||
1022 | clear_nlink(inode); | 1010 | clear_nlink(inode); |
1023 | else | 1011 | else |
1024 | drop_nlink(inode); | 1012 | drop_nlink(inode); |
1025 | gfs2_trans_add_bh(ip->i_gl, bh, 1); | ||
1026 | gfs2_dinode_out(ip, bh->b_data); | ||
1027 | mark_inode_dirty(inode); | 1013 | mark_inode_dirty(inode); |
1028 | if (inode->i_nlink == 0) | 1014 | if (inode->i_nlink == 0) |
1029 | gfs2_unlink_di(inode); | 1015 | gfs2_unlink_di(inode); |
@@ -1051,13 +1037,8 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry) | |||
1051 | struct buffer_head *bh; | 1037 | struct buffer_head *bh; |
1052 | struct gfs2_holder ghs[3]; | 1038 | struct gfs2_holder ghs[3]; |
1053 | struct gfs2_rgrpd *rgd; | 1039 | struct gfs2_rgrpd *rgd; |
1054 | struct gfs2_holder ri_gh; | ||
1055 | int error; | 1040 | int error; |
1056 | 1041 | ||
1057 | error = gfs2_rindex_hold(sdp, &ri_gh); | ||
1058 | if (error) | ||
1059 | return error; | ||
1060 | |||
1061 | gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); | 1042 | gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); |
1062 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); | 1043 | gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); |
1063 | 1044 | ||
@@ -1114,7 +1095,6 @@ out_child: | |||
1114 | gfs2_glock_dq(ghs); | 1095 | gfs2_glock_dq(ghs); |
1115 | out_parent: | 1096 | out_parent: |
1116 | gfs2_holder_uninit(ghs); | 1097 | gfs2_holder_uninit(ghs); |
1117 | gfs2_glock_dq_uninit(&ri_gh); | ||
1118 | return error; | 1098 | return error; |
1119 | } | 1099 | } |
1120 | 1100 | ||
@@ -1137,7 +1117,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, | |||
1137 | if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1) | 1117 | if (size > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode) - 1) |
1138 | return -ENAMETOOLONG; | 1118 | return -ENAMETOOLONG; |
1139 | 1119 | ||
1140 | return gfs2_create_inode(dir, dentry, S_IFLNK | S_IRWXUGO, 0, symname, size); | 1120 | return gfs2_create_inode(dir, dentry, S_IFLNK | S_IRWXUGO, 0, symname, size, 0); |
1141 | } | 1121 | } |
1142 | 1122 | ||
1143 | /** | 1123 | /** |
@@ -1151,7 +1131,7 @@ static int gfs2_symlink(struct inode *dir, struct dentry *dentry, | |||
1151 | 1131 | ||
1152 | static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 1132 | static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
1153 | { | 1133 | { |
1154 | return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, 0); | 1134 | return gfs2_create_inode(dir, dentry, S_IFDIR | mode, 0, NULL, 0, 0); |
1155 | } | 1135 | } |
1156 | 1136 | ||
1157 | /** | 1137 | /** |
@@ -1166,7 +1146,7 @@ static int gfs2_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
1166 | static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode, | 1146 | static int gfs2_mknod(struct inode *dir, struct dentry *dentry, int mode, |
1167 | dev_t dev) | 1147 | dev_t dev) |
1168 | { | 1148 | { |
1169 | return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0); | 1149 | return gfs2_create_inode(dir, dentry, mode, dev, NULL, 0, 0); |
1170 | } | 1150 | } |
1171 | 1151 | ||
1172 | /* | 1152 | /* |
@@ -1232,7 +1212,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
1232 | struct gfs2_inode *ip = GFS2_I(odentry->d_inode); | 1212 | struct gfs2_inode *ip = GFS2_I(odentry->d_inode); |
1233 | struct gfs2_inode *nip = NULL; | 1213 | struct gfs2_inode *nip = NULL; |
1234 | struct gfs2_sbd *sdp = GFS2_SB(odir); | 1214 | struct gfs2_sbd *sdp = GFS2_SB(odir); |
1235 | struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }, ri_gh; | 1215 | struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, }; |
1236 | struct gfs2_rgrpd *nrgd; | 1216 | struct gfs2_rgrpd *nrgd; |
1237 | unsigned int num_gh; | 1217 | unsigned int num_gh; |
1238 | int dir_rename = 0; | 1218 | int dir_rename = 0; |
@@ -1246,10 +1226,6 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
1246 | return 0; | 1226 | return 0; |
1247 | } | 1227 | } |
1248 | 1228 | ||
1249 | error = gfs2_rindex_hold(sdp, &ri_gh); | ||
1250 | if (error) | ||
1251 | return error; | ||
1252 | |||
1253 | if (odip != ndip) { | 1229 | if (odip != ndip) { |
1254 | error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, | 1230 | error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, |
1255 | 0, &r_gh); | 1231 | 0, &r_gh); |
@@ -1386,12 +1362,12 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, | |||
1386 | 1362 | ||
1387 | al->al_requested = sdp->sd_max_dirres; | 1363 | al->al_requested = sdp->sd_max_dirres; |
1388 | 1364 | ||
1389 | error = gfs2_inplace_reserve_ri(ndip); | 1365 | error = gfs2_inplace_reserve(ndip); |
1390 | if (error) | 1366 | if (error) |
1391 | goto out_gunlock_q; | 1367 | goto out_gunlock_q; |
1392 | 1368 | ||
1393 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + | 1369 | error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + |
1394 | gfs2_rg_blocks(al) + | 1370 | gfs2_rg_blocks(ndip) + |
1395 | 4 * RES_DINODE + 4 * RES_LEAF + | 1371 | 4 * RES_DINODE + 4 * RES_LEAF + |
1396 | RES_STATFS + RES_QUOTA + 4, 0); | 1372 | RES_STATFS + RES_QUOTA + 4, 0); |
1397 | if (error) | 1373 | if (error) |
@@ -1457,7 +1433,6 @@ out_gunlock_r: | |||
1457 | if (r_gh.gh_gl) | 1433 | if (r_gh.gh_gl) |
1458 | gfs2_glock_dq_uninit(&r_gh); | 1434 | gfs2_glock_dq_uninit(&r_gh); |
1459 | out: | 1435 | out: |
1460 | gfs2_glock_dq_uninit(&ri_gh); | ||
1461 | return error; | 1436 | return error; |
1462 | } | 1437 | } |
1463 | 1438 | ||
@@ -1561,21 +1536,10 @@ int gfs2_permission(struct inode *inode, int mask) | |||
1561 | return error; | 1536 | return error; |
1562 | } | 1537 | } |
1563 | 1538 | ||
1564 | static int __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) | 1539 | static int __gfs2_setattr_simple(struct inode *inode, struct iattr *attr) |
1565 | { | 1540 | { |
1566 | struct inode *inode = &ip->i_inode; | ||
1567 | struct buffer_head *dibh; | ||
1568 | int error; | ||
1569 | |||
1570 | error = gfs2_meta_inode_buffer(ip, &dibh); | ||
1571 | if (error) | ||
1572 | return error; | ||
1573 | |||
1574 | setattr_copy(inode, attr); | 1541 | setattr_copy(inode, attr); |
1575 | mark_inode_dirty(inode); | 1542 | mark_inode_dirty(inode); |
1576 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
1577 | gfs2_dinode_out(ip, dibh->b_data); | ||
1578 | brelse(dibh); | ||
1579 | return 0; | 1543 | return 0; |
1580 | } | 1544 | } |
1581 | 1545 | ||
@@ -1587,19 +1551,19 @@ static int __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) | |||
1587 | * Returns: errno | 1551 | * Returns: errno |
1588 | */ | 1552 | */ |
1589 | 1553 | ||
1590 | int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) | 1554 | int gfs2_setattr_simple(struct inode *inode, struct iattr *attr) |
1591 | { | 1555 | { |
1592 | int error; | 1556 | int error; |
1593 | 1557 | ||
1594 | if (current->journal_info) | 1558 | if (current->journal_info) |
1595 | return __gfs2_setattr_simple(ip, attr); | 1559 | return __gfs2_setattr_simple(inode, attr); |
1596 | 1560 | ||
1597 | error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0); | 1561 | error = gfs2_trans_begin(GFS2_SB(inode), RES_DINODE, 0); |
1598 | if (error) | 1562 | if (error) |
1599 | return error; | 1563 | return error; |
1600 | 1564 | ||
1601 | error = __gfs2_setattr_simple(ip, attr); | 1565 | error = __gfs2_setattr_simple(inode, attr); |
1602 | gfs2_trans_end(GFS2_SB(&ip->i_inode)); | 1566 | gfs2_trans_end(GFS2_SB(inode)); |
1603 | return error; | 1567 | return error; |
1604 | } | 1568 | } |
1605 | 1569 | ||
@@ -1637,7 +1601,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr) | |||
1637 | if (error) | 1601 | if (error) |
1638 | goto out_gunlock_q; | 1602 | goto out_gunlock_q; |
1639 | 1603 | ||
1640 | error = gfs2_setattr_simple(ip, attr); | 1604 | error = gfs2_setattr_simple(inode, attr); |
1641 | if (error) | 1605 | if (error) |
1642 | goto out_end_trans; | 1606 | goto out_end_trans; |
1643 | 1607 | ||
@@ -1693,12 +1657,12 @@ static int gfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
1693 | else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode)) | 1657 | else if ((attr->ia_valid & ATTR_MODE) && IS_POSIXACL(inode)) |
1694 | error = gfs2_acl_chmod(ip, attr); | 1658 | error = gfs2_acl_chmod(ip, attr); |
1695 | else | 1659 | else |
1696 | error = gfs2_setattr_simple(ip, attr); | 1660 | error = gfs2_setattr_simple(inode, attr); |
1697 | 1661 | ||
1698 | out: | 1662 | out: |
1699 | gfs2_glock_dq_uninit(&i_gh); | ||
1700 | if (!error) | 1663 | if (!error) |
1701 | mark_inode_dirty(inode); | 1664 | mark_inode_dirty(inode); |
1665 | gfs2_glock_dq_uninit(&i_gh); | ||
1702 | return error; | 1666 | return error; |
1703 | } | 1667 | } |
1704 | 1668 | ||
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h index 8d90e0c0767..276e7b52b65 100644 --- a/fs/gfs2/inode.h +++ b/fs/gfs2/inode.h | |||
@@ -109,7 +109,7 @@ extern int gfs2_inode_refresh(struct gfs2_inode *ip); | |||
109 | extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, | 109 | extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, |
110 | int is_root); | 110 | int is_root); |
111 | extern int gfs2_permission(struct inode *inode, int mask); | 111 | extern int gfs2_permission(struct inode *inode, int mask); |
112 | extern int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr); | 112 | extern int gfs2_setattr_simple(struct inode *inode, struct iattr *attr); |
113 | extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); | 113 | extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); |
114 | extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); | 114 | extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); |
115 | 115 | ||
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 05bbb124699..0301be655b1 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -60,6 +60,29 @@ static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) | |||
60 | trace_gfs2_pin(bd, 1); | 60 | trace_gfs2_pin(bd, 1); |
61 | } | 61 | } |
62 | 62 | ||
63 | static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) | ||
64 | { | ||
65 | return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; | ||
66 | } | ||
67 | |||
68 | static void maybe_release_space(struct gfs2_bufdata *bd) | ||
69 | { | ||
70 | struct gfs2_glock *gl = bd->bd_gl; | ||
71 | struct gfs2_sbd *sdp = gl->gl_sbd; | ||
72 | struct gfs2_rgrpd *rgd = gl->gl_object; | ||
73 | unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; | ||
74 | struct gfs2_bitmap *bi = rgd->rd_bits + index; | ||
75 | |||
76 | if (bi->bi_clone == 0) | ||
77 | return; | ||
78 | if (sdp->sd_args.ar_discard) | ||
79 | gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi); | ||
80 | memcpy(bi->bi_clone + bi->bi_offset, | ||
81 | bd->bd_bh->b_data + bi->bi_offset, bi->bi_len); | ||
82 | clear_bit(GBF_FULL, &bi->bi_flags); | ||
83 | rgd->rd_free_clone = rgd->rd_free; | ||
84 | } | ||
85 | |||
63 | /** | 86 | /** |
64 | * gfs2_unpin - Unpin a buffer | 87 | * gfs2_unpin - Unpin a buffer |
65 | * @sdp: the filesystem the buffer belongs to | 88 | * @sdp: the filesystem the buffer belongs to |
@@ -81,6 +104,9 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, | |||
81 | mark_buffer_dirty(bh); | 104 | mark_buffer_dirty(bh); |
82 | clear_buffer_pinned(bh); | 105 | clear_buffer_pinned(bh); |
83 | 106 | ||
107 | if (buffer_is_rgrp(bd)) | ||
108 | maybe_release_space(bd); | ||
109 | |||
84 | spin_lock(&sdp->sd_ail_lock); | 110 | spin_lock(&sdp->sd_ail_lock); |
85 | if (bd->bd_ail) { | 111 | if (bd->bd_ail) { |
86 | list_del(&bd->bd_ail_st_list); | 112 | list_del(&bd->bd_ail_st_list); |
@@ -469,42 +495,6 @@ static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) | |||
469 | gfs2_revoke_clean(sdp); | 495 | gfs2_revoke_clean(sdp); |
470 | } | 496 | } |
471 | 497 | ||
472 | static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) | ||
473 | { | ||
474 | struct gfs2_rgrpd *rgd; | ||
475 | struct gfs2_trans *tr = current->journal_info; | ||
476 | |||
477 | tr->tr_touched = 1; | ||
478 | |||
479 | rgd = container_of(le, struct gfs2_rgrpd, rd_le); | ||
480 | |||
481 | gfs2_log_lock(sdp); | ||
482 | if (!list_empty(&le->le_list)){ | ||
483 | gfs2_log_unlock(sdp); | ||
484 | return; | ||
485 | } | ||
486 | gfs2_rgrp_bh_hold(rgd); | ||
487 | sdp->sd_log_num_rg++; | ||
488 | list_add(&le->le_list, &sdp->sd_log_le_rg); | ||
489 | gfs2_log_unlock(sdp); | ||
490 | } | ||
491 | |||
492 | static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai) | ||
493 | { | ||
494 | struct list_head *head = &sdp->sd_log_le_rg; | ||
495 | struct gfs2_rgrpd *rgd; | ||
496 | |||
497 | while (!list_empty(head)) { | ||
498 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list); | ||
499 | list_del_init(&rgd->rd_le.le_list); | ||
500 | sdp->sd_log_num_rg--; | ||
501 | |||
502 | gfs2_rgrp_repolish_clones(rgd); | ||
503 | gfs2_rgrp_bh_put(rgd); | ||
504 | } | ||
505 | gfs2_assert_warn(sdp, !sdp->sd_log_num_rg); | ||
506 | } | ||
507 | |||
508 | /** | 498 | /** |
509 | * databuf_lo_add - Add a databuf to the transaction. | 499 | * databuf_lo_add - Add a databuf to the transaction. |
510 | * | 500 | * |
@@ -705,8 +695,6 @@ static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, | |||
705 | 695 | ||
706 | brelse(bh_log); | 696 | brelse(bh_log); |
707 | brelse(bh_ip); | 697 | brelse(bh_ip); |
708 | if (error) | ||
709 | break; | ||
710 | 698 | ||
711 | sdp->sd_replayed_blocks++; | 699 | sdp->sd_replayed_blocks++; |
712 | } | 700 | } |
@@ -771,8 +759,6 @@ const struct gfs2_log_operations gfs2_revoke_lops = { | |||
771 | }; | 759 | }; |
772 | 760 | ||
773 | const struct gfs2_log_operations gfs2_rg_lops = { | 761 | const struct gfs2_log_operations gfs2_rg_lops = { |
774 | .lo_add = rg_lo_add, | ||
775 | .lo_after_commit = rg_lo_after_commit, | ||
776 | .lo_name = "rg", | 762 | .lo_name = "rg", |
777 | }; | 763 | }; |
778 | 764 | ||
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 079587e5384..7e823bbd245 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -77,8 +77,7 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) | |||
77 | 77 | ||
78 | spin_lock_init(&sdp->sd_rindex_spin); | 78 | spin_lock_init(&sdp->sd_rindex_spin); |
79 | mutex_init(&sdp->sd_rindex_mutex); | 79 | mutex_init(&sdp->sd_rindex_mutex); |
80 | INIT_LIST_HEAD(&sdp->sd_rindex_list); | 80 | sdp->sd_rindex_tree.rb_node = NULL; |
81 | INIT_LIST_HEAD(&sdp->sd_rindex_mru_list); | ||
82 | 81 | ||
83 | INIT_LIST_HEAD(&sdp->sd_jindex_list); | 82 | INIT_LIST_HEAD(&sdp->sd_jindex_list); |
84 | spin_lock_init(&sdp->sd_jindex_spin); | 83 | spin_lock_init(&sdp->sd_jindex_spin); |
@@ -652,7 +651,6 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) | |||
652 | fs_err(sdp, "can't lookup journal index: %d\n", error); | 651 | fs_err(sdp, "can't lookup journal index: %d\n", error); |
653 | return PTR_ERR(sdp->sd_jindex); | 652 | return PTR_ERR(sdp->sd_jindex); |
654 | } | 653 | } |
655 | ip = GFS2_I(sdp->sd_jindex); | ||
656 | 654 | ||
657 | /* Load in the journal index special file */ | 655 | /* Load in the journal index special file */ |
658 | 656 | ||
@@ -764,7 +762,6 @@ fail: | |||
764 | static int init_inodes(struct gfs2_sbd *sdp, int undo) | 762 | static int init_inodes(struct gfs2_sbd *sdp, int undo) |
765 | { | 763 | { |
766 | int error = 0; | 764 | int error = 0; |
767 | struct gfs2_inode *ip; | ||
768 | struct inode *master = sdp->sd_master_dir->d_inode; | 765 | struct inode *master = sdp->sd_master_dir->d_inode; |
769 | 766 | ||
770 | if (undo) | 767 | if (undo) |
@@ -789,7 +786,6 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) | |||
789 | fs_err(sdp, "can't get resource index inode: %d\n", error); | 786 | fs_err(sdp, "can't get resource index inode: %d\n", error); |
790 | goto fail_statfs; | 787 | goto fail_statfs; |
791 | } | 788 | } |
792 | ip = GFS2_I(sdp->sd_rindex); | ||
793 | sdp->sd_rindex_uptodate = 0; | 789 | sdp->sd_rindex_uptodate = 0; |
794 | 790 | ||
795 | /* Read in the quota inode */ | 791 | /* Read in the quota inode */ |
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 0e8bb13381e..7e528dc14f8 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -638,15 +638,18 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
638 | unsigned long index = loc >> PAGE_CACHE_SHIFT; | 638 | unsigned long index = loc >> PAGE_CACHE_SHIFT; |
639 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); | 639 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); |
640 | unsigned blocksize, iblock, pos; | 640 | unsigned blocksize, iblock, pos; |
641 | struct buffer_head *bh, *dibh; | 641 | struct buffer_head *bh; |
642 | struct page *page; | 642 | struct page *page; |
643 | void *kaddr, *ptr; | 643 | void *kaddr, *ptr; |
644 | struct gfs2_quota q, *qp; | 644 | struct gfs2_quota q, *qp; |
645 | int err, nbytes; | 645 | int err, nbytes; |
646 | u64 size; | 646 | u64 size; |
647 | 647 | ||
648 | if (gfs2_is_stuffed(ip)) | 648 | if (gfs2_is_stuffed(ip)) { |
649 | gfs2_unstuff_dinode(ip, NULL); | 649 | err = gfs2_unstuff_dinode(ip, NULL); |
650 | if (err) | ||
651 | return err; | ||
652 | } | ||
650 | 653 | ||
651 | memset(&q, 0, sizeof(struct gfs2_quota)); | 654 | memset(&q, 0, sizeof(struct gfs2_quota)); |
652 | err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q)); | 655 | err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q)); |
@@ -736,22 +739,13 @@ get_a_page: | |||
736 | goto get_a_page; | 739 | goto get_a_page; |
737 | } | 740 | } |
738 | 741 | ||
739 | /* Update the disk inode timestamp and size (if extended) */ | ||
740 | err = gfs2_meta_inode_buffer(ip, &dibh); | ||
741 | if (err) | ||
742 | goto out; | ||
743 | |||
744 | size = loc + sizeof(struct gfs2_quota); | 742 | size = loc + sizeof(struct gfs2_quota); |
745 | if (size > inode->i_size) | 743 | if (size > inode->i_size) |
746 | i_size_write(inode, size); | 744 | i_size_write(inode, size); |
747 | inode->i_mtime = inode->i_atime = CURRENT_TIME; | 745 | inode->i_mtime = inode->i_atime = CURRENT_TIME; |
748 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
749 | gfs2_dinode_out(ip, dibh->b_data); | ||
750 | brelse(dibh); | ||
751 | mark_inode_dirty(inode); | 746 | mark_inode_dirty(inode); |
752 | |||
753 | out: | ||
754 | return err; | 747 | return err; |
748 | |||
755 | unlock_out: | 749 | unlock_out: |
756 | unlock_page(page); | 750 | unlock_page(page); |
757 | page_cache_release(page); | 751 | page_cache_release(page); |
@@ -822,7 +816,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
822 | goto out_alloc; | 816 | goto out_alloc; |
823 | 817 | ||
824 | if (nalloc) | 818 | if (nalloc) |
825 | blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS; | 819 | blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS; |
826 | 820 | ||
827 | error = gfs2_trans_begin(sdp, blocks, 0); | 821 | error = gfs2_trans_begin(sdp, blocks, 0); |
828 | if (error) | 822 | if (error) |
@@ -936,7 +930,9 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
936 | unsigned int x; | 930 | unsigned int x; |
937 | int error = 0; | 931 | int error = 0; |
938 | 932 | ||
939 | gfs2_quota_hold(ip, uid, gid); | 933 | error = gfs2_quota_hold(ip, uid, gid); |
934 | if (error) | ||
935 | return error; | ||
940 | 936 | ||
941 | if (capable(CAP_SYS_RESOURCE) || | 937 | if (capable(CAP_SYS_RESOURCE) || |
942 | sdp->sd_args.ar_quota != GFS2_QUOTA_ON) | 938 | sdp->sd_args.ar_quota != GFS2_QUOTA_ON) |
@@ -1607,7 +1603,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, | |||
1607 | error = gfs2_inplace_reserve(ip); | 1603 | error = gfs2_inplace_reserve(ip); |
1608 | if (error) | 1604 | if (error) |
1609 | goto out_alloc; | 1605 | goto out_alloc; |
1610 | blocks += gfs2_rg_blocks(al); | 1606 | blocks += gfs2_rg_blocks(ip); |
1611 | } | 1607 | } |
1612 | 1608 | ||
1613 | /* Some quotas span block boundaries and can update two blocks, | 1609 | /* Some quotas span block boundaries and can update two blocks, |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 7f8af1eb02d..96bd6d759f2 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/gfs2_ondisk.h> | 15 | #include <linux/gfs2_ondisk.h> |
16 | #include <linux/prefetch.h> | 16 | #include <linux/prefetch.h> |
17 | #include <linux/blkdev.h> | 17 | #include <linux/blkdev.h> |
18 | #include <linux/rbtree.h> | ||
18 | 19 | ||
19 | #include "gfs2.h" | 20 | #include "gfs2.h" |
20 | #include "incore.h" | 21 | #include "incore.h" |
@@ -328,18 +329,22 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block) | |||
328 | 329 | ||
329 | struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) | 330 | struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) |
330 | { | 331 | { |
331 | struct gfs2_rgrpd *rgd; | 332 | struct rb_node **newn; |
333 | struct gfs2_rgrpd *cur; | ||
332 | 334 | ||
333 | spin_lock(&sdp->sd_rindex_spin); | 335 | spin_lock(&sdp->sd_rindex_spin); |
334 | 336 | newn = &sdp->sd_rindex_tree.rb_node; | |
335 | list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) { | 337 | while (*newn) { |
336 | if (rgrp_contains_block(rgd, blk)) { | 338 | cur = rb_entry(*newn, struct gfs2_rgrpd, rd_node); |
337 | list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); | 339 | if (blk < cur->rd_addr) |
340 | newn = &((*newn)->rb_left); | ||
341 | else if (blk >= cur->rd_data0 + cur->rd_data) | ||
342 | newn = &((*newn)->rb_right); | ||
343 | else { | ||
338 | spin_unlock(&sdp->sd_rindex_spin); | 344 | spin_unlock(&sdp->sd_rindex_spin); |
339 | return rgd; | 345 | return cur; |
340 | } | 346 | } |
341 | } | 347 | } |
342 | |||
343 | spin_unlock(&sdp->sd_rindex_spin); | 348 | spin_unlock(&sdp->sd_rindex_spin); |
344 | 349 | ||
345 | return NULL; | 350 | return NULL; |
@@ -354,8 +359,15 @@ struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk) | |||
354 | 359 | ||
355 | struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) | 360 | struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) |
356 | { | 361 | { |
357 | gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list)); | 362 | const struct rb_node *n; |
358 | return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list); | 363 | struct gfs2_rgrpd *rgd; |
364 | |||
365 | spin_lock(&sdp->sd_rindex_spin); | ||
366 | n = rb_first(&sdp->sd_rindex_tree); | ||
367 | rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); | ||
368 | spin_unlock(&sdp->sd_rindex_spin); | ||
369 | |||
370 | return rgd; | ||
359 | } | 371 | } |
360 | 372 | ||
361 | /** | 373 | /** |
@@ -367,47 +379,60 @@ struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) | |||
367 | 379 | ||
368 | struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) | 380 | struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd) |
369 | { | 381 | { |
370 | if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list) | 382 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
383 | const struct rb_node *n; | ||
384 | |||
385 | spin_lock(&sdp->sd_rindex_spin); | ||
386 | n = rb_next(&rgd->rd_node); | ||
387 | if (n == NULL) | ||
388 | n = rb_first(&sdp->sd_rindex_tree); | ||
389 | |||
390 | if (unlikely(&rgd->rd_node == n)) { | ||
391 | spin_unlock(&sdp->sd_rindex_spin); | ||
371 | return NULL; | 392 | return NULL; |
372 | return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list); | 393 | } |
394 | rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); | ||
395 | spin_unlock(&sdp->sd_rindex_spin); | ||
396 | return rgd; | ||
373 | } | 397 | } |
374 | 398 | ||
375 | static void clear_rgrpdi(struct gfs2_sbd *sdp) | 399 | void gfs2_free_clones(struct gfs2_rgrpd *rgd) |
376 | { | 400 | { |
377 | struct list_head *head; | 401 | int x; |
402 | |||
403 | for (x = 0; x < rgd->rd_length; x++) { | ||
404 | struct gfs2_bitmap *bi = rgd->rd_bits + x; | ||
405 | kfree(bi->bi_clone); | ||
406 | bi->bi_clone = NULL; | ||
407 | } | ||
408 | } | ||
409 | |||
410 | void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) | ||
411 | { | ||
412 | struct rb_node *n; | ||
378 | struct gfs2_rgrpd *rgd; | 413 | struct gfs2_rgrpd *rgd; |
379 | struct gfs2_glock *gl; | 414 | struct gfs2_glock *gl; |
380 | 415 | ||
381 | spin_lock(&sdp->sd_rindex_spin); | 416 | while ((n = rb_first(&sdp->sd_rindex_tree))) { |
382 | sdp->sd_rindex_forward = NULL; | 417 | rgd = rb_entry(n, struct gfs2_rgrpd, rd_node); |
383 | spin_unlock(&sdp->sd_rindex_spin); | ||
384 | |||
385 | head = &sdp->sd_rindex_list; | ||
386 | while (!list_empty(head)) { | ||
387 | rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list); | ||
388 | gl = rgd->rd_gl; | 418 | gl = rgd->rd_gl; |
389 | 419 | ||
390 | list_del(&rgd->rd_list); | 420 | rb_erase(n, &sdp->sd_rindex_tree); |
391 | list_del(&rgd->rd_list_mru); | ||
392 | 421 | ||
393 | if (gl) { | 422 | if (gl) { |
423 | spin_lock(&gl->gl_spin); | ||
394 | gl->gl_object = NULL; | 424 | gl->gl_object = NULL; |
425 | spin_unlock(&gl->gl_spin); | ||
395 | gfs2_glock_add_to_lru(gl); | 426 | gfs2_glock_add_to_lru(gl); |
396 | gfs2_glock_put(gl); | 427 | gfs2_glock_put(gl); |
397 | } | 428 | } |
398 | 429 | ||
430 | gfs2_free_clones(rgd); | ||
399 | kfree(rgd->rd_bits); | 431 | kfree(rgd->rd_bits); |
400 | kmem_cache_free(gfs2_rgrpd_cachep, rgd); | 432 | kmem_cache_free(gfs2_rgrpd_cachep, rgd); |
401 | } | 433 | } |
402 | } | 434 | } |
403 | 435 | ||
404 | void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) | ||
405 | { | ||
406 | mutex_lock(&sdp->sd_rindex_mutex); | ||
407 | clear_rgrpdi(sdp); | ||
408 | mutex_unlock(&sdp->sd_rindex_mutex); | ||
409 | } | ||
410 | |||
411 | static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd) | 436 | static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd) |
412 | { | 437 | { |
413 | printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); | 438 | printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr); |
@@ -524,22 +549,34 @@ u64 gfs2_ri_total(struct gfs2_sbd *sdp) | |||
524 | return total_data; | 549 | return total_data; |
525 | } | 550 | } |
526 | 551 | ||
527 | static void gfs2_rindex_in(struct gfs2_rgrpd *rgd, const void *buf) | 552 | static void rgd_insert(struct gfs2_rgrpd *rgd) |
528 | { | 553 | { |
529 | const struct gfs2_rindex *str = buf; | 554 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
555 | struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL; | ||
556 | |||
557 | /* Figure out where to put new node */ | ||
558 | while (*newn) { | ||
559 | struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd, | ||
560 | rd_node); | ||
561 | |||
562 | parent = *newn; | ||
563 | if (rgd->rd_addr < cur->rd_addr) | ||
564 | newn = &((*newn)->rb_left); | ||
565 | else if (rgd->rd_addr > cur->rd_addr) | ||
566 | newn = &((*newn)->rb_right); | ||
567 | else | ||
568 | return; | ||
569 | } | ||
530 | 570 | ||
531 | rgd->rd_addr = be64_to_cpu(str->ri_addr); | 571 | rb_link_node(&rgd->rd_node, parent, newn); |
532 | rgd->rd_length = be32_to_cpu(str->ri_length); | 572 | rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree); |
533 | rgd->rd_data0 = be64_to_cpu(str->ri_data0); | ||
534 | rgd->rd_data = be32_to_cpu(str->ri_data); | ||
535 | rgd->rd_bitbytes = be32_to_cpu(str->ri_bitbytes); | ||
536 | } | 573 | } |
537 | 574 | ||
538 | /** | 575 | /** |
539 | * read_rindex_entry - Pull in a new resource index entry from the disk | 576 | * read_rindex_entry - Pull in a new resource index entry from the disk |
540 | * @gl: The glock covering the rindex inode | 577 | * @gl: The glock covering the rindex inode |
541 | * | 578 | * |
542 | * Returns: 0 on success, error code otherwise | 579 | * Returns: 0 on success, > 0 on EOF, error code otherwise |
543 | */ | 580 | */ |
544 | 581 | ||
545 | static int read_rindex_entry(struct gfs2_inode *ip, | 582 | static int read_rindex_entry(struct gfs2_inode *ip, |
@@ -547,44 +584,53 @@ static int read_rindex_entry(struct gfs2_inode *ip, | |||
547 | { | 584 | { |
548 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 585 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
549 | loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); | 586 | loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); |
550 | char buf[sizeof(struct gfs2_rindex)]; | 587 | struct gfs2_rindex buf; |
551 | int error; | 588 | int error; |
552 | struct gfs2_rgrpd *rgd; | 589 | struct gfs2_rgrpd *rgd; |
553 | 590 | ||
554 | error = gfs2_internal_read(ip, ra_state, buf, &pos, | 591 | if (pos >= i_size_read(&ip->i_inode)) |
592 | return 1; | ||
593 | |||
594 | error = gfs2_internal_read(ip, ra_state, (char *)&buf, &pos, | ||
555 | sizeof(struct gfs2_rindex)); | 595 | sizeof(struct gfs2_rindex)); |
556 | if (!error) | 596 | |
557 | return 0; | 597 | if (error != sizeof(struct gfs2_rindex)) |
558 | if (error != sizeof(struct gfs2_rindex)) { | 598 | return (error == 0) ? 1 : error; |
559 | if (error > 0) | ||
560 | error = -EIO; | ||
561 | return error; | ||
562 | } | ||
563 | 599 | ||
564 | rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS); | 600 | rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS); |
565 | error = -ENOMEM; | 601 | error = -ENOMEM; |
566 | if (!rgd) | 602 | if (!rgd) |
567 | return error; | 603 | return error; |
568 | 604 | ||
569 | mutex_init(&rgd->rd_mutex); | ||
570 | lops_init_le(&rgd->rd_le, &gfs2_rg_lops); | ||
571 | rgd->rd_sbd = sdp; | 605 | rgd->rd_sbd = sdp; |
606 | rgd->rd_addr = be64_to_cpu(buf.ri_addr); | ||
607 | rgd->rd_length = be32_to_cpu(buf.ri_length); | ||
608 | rgd->rd_data0 = be64_to_cpu(buf.ri_data0); | ||
609 | rgd->rd_data = be32_to_cpu(buf.ri_data); | ||
610 | rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes); | ||
572 | 611 | ||
573 | list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list); | ||
574 | list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); | ||
575 | |||
576 | gfs2_rindex_in(rgd, buf); | ||
577 | error = compute_bitstructs(rgd); | 612 | error = compute_bitstructs(rgd); |
578 | if (error) | 613 | if (error) |
579 | return error; | 614 | goto fail; |
580 | 615 | ||
581 | error = gfs2_glock_get(sdp, rgd->rd_addr, | 616 | error = gfs2_glock_get(sdp, rgd->rd_addr, |
582 | &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); | 617 | &gfs2_rgrp_glops, CREATE, &rgd->rd_gl); |
583 | if (error) | 618 | if (error) |
584 | return error; | 619 | goto fail; |
585 | 620 | ||
586 | rgd->rd_gl->gl_object = rgd; | 621 | rgd->rd_gl->gl_object = rgd; |
587 | rgd->rd_flags &= ~GFS2_RDF_UPTODATE; | 622 | rgd->rd_flags &= ~GFS2_RDF_UPTODATE; |
623 | if (rgd->rd_data > sdp->sd_max_rg_data) | ||
624 | sdp->sd_max_rg_data = rgd->rd_data; | ||
625 | spin_lock(&sdp->sd_rindex_spin); | ||
626 | rgd_insert(rgd); | ||
627 | sdp->sd_rgrps++; | ||
628 | spin_unlock(&sdp->sd_rindex_spin); | ||
629 | return error; | ||
630 | |||
631 | fail: | ||
632 | kfree(rgd->rd_bits); | ||
633 | kmem_cache_free(gfs2_rgrpd_cachep, rgd); | ||
588 | return error; | 634 | return error; |
589 | } | 635 | } |
590 | 636 | ||
@@ -595,40 +641,28 @@ static int read_rindex_entry(struct gfs2_inode *ip, | |||
595 | * Returns: 0 on successful update, error code otherwise | 641 | * Returns: 0 on successful update, error code otherwise |
596 | */ | 642 | */ |
597 | 643 | ||
598 | int gfs2_ri_update(struct gfs2_inode *ip) | 644 | static int gfs2_ri_update(struct gfs2_inode *ip) |
599 | { | 645 | { |
600 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 646 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
601 | struct inode *inode = &ip->i_inode; | 647 | struct inode *inode = &ip->i_inode; |
602 | struct file_ra_state ra_state; | 648 | struct file_ra_state ra_state; |
603 | u64 rgrp_count = i_size_read(inode); | ||
604 | struct gfs2_rgrpd *rgd; | ||
605 | unsigned int max_data = 0; | ||
606 | int error; | 649 | int error; |
607 | 650 | ||
608 | do_div(rgrp_count, sizeof(struct gfs2_rindex)); | ||
609 | clear_rgrpdi(sdp); | ||
610 | |||
611 | file_ra_state_init(&ra_state, inode->i_mapping); | 651 | file_ra_state_init(&ra_state, inode->i_mapping); |
612 | for (sdp->sd_rgrps = 0; sdp->sd_rgrps < rgrp_count; sdp->sd_rgrps++) { | 652 | do { |
613 | error = read_rindex_entry(ip, &ra_state); | 653 | error = read_rindex_entry(ip, &ra_state); |
614 | if (error) { | 654 | } while (error == 0); |
615 | clear_rgrpdi(sdp); | 655 | |
616 | return error; | 656 | if (error < 0) |
617 | } | 657 | return error; |
618 | } | ||
619 | 658 | ||
620 | list_for_each_entry(rgd, &sdp->sd_rindex_list, rd_list) | ||
621 | if (rgd->rd_data > max_data) | ||
622 | max_data = rgd->rd_data; | ||
623 | sdp->sd_max_rg_data = max_data; | ||
624 | sdp->sd_rindex_uptodate = 1; | 659 | sdp->sd_rindex_uptodate = 1; |
625 | return 0; | 660 | return 0; |
626 | } | 661 | } |
627 | 662 | ||
628 | /** | 663 | /** |
629 | * gfs2_rindex_hold - Grab a lock on the rindex | 664 | * gfs2_rindex_update - Update the rindex if required |
630 | * @sdp: The GFS2 superblock | 665 | * @sdp: The GFS2 superblock |
631 | * @ri_gh: the glock holder | ||
632 | * | 666 | * |
633 | * We grab a lock on the rindex inode to make sure that it doesn't | 667 | * We grab a lock on the rindex inode to make sure that it doesn't |
634 | * change whilst we are performing an operation. We keep this lock | 668 | * change whilst we are performing an operation. We keep this lock |
@@ -640,30 +674,29 @@ int gfs2_ri_update(struct gfs2_inode *ip) | |||
640 | * special file, which might have been updated if someone expanded the | 674 | * special file, which might have been updated if someone expanded the |
641 | * filesystem (via gfs2_grow utility), which adds new resource groups. | 675 | * filesystem (via gfs2_grow utility), which adds new resource groups. |
642 | * | 676 | * |
643 | * Returns: 0 on success, error code otherwise | 677 | * Returns: 0 on succeess, error code otherwise |
644 | */ | 678 | */ |
645 | 679 | ||
646 | int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh) | 680 | int gfs2_rindex_update(struct gfs2_sbd *sdp) |
647 | { | 681 | { |
648 | struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); | 682 | struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); |
649 | struct gfs2_glock *gl = ip->i_gl; | 683 | struct gfs2_glock *gl = ip->i_gl; |
650 | int error; | 684 | struct gfs2_holder ri_gh; |
651 | 685 | int error = 0; | |
652 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh); | ||
653 | if (error) | ||
654 | return error; | ||
655 | 686 | ||
656 | /* Read new copy from disk if we don't have the latest */ | 687 | /* Read new copy from disk if we don't have the latest */ |
657 | if (!sdp->sd_rindex_uptodate) { | 688 | if (!sdp->sd_rindex_uptodate) { |
658 | mutex_lock(&sdp->sd_rindex_mutex); | 689 | mutex_lock(&sdp->sd_rindex_mutex); |
659 | if (!sdp->sd_rindex_uptodate) { | 690 | error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh); |
691 | if (error) | ||
692 | return error; | ||
693 | if (!sdp->sd_rindex_uptodate) | ||
660 | error = gfs2_ri_update(ip); | 694 | error = gfs2_ri_update(ip); |
661 | if (error) | 695 | gfs2_glock_dq_uninit(&ri_gh); |
662 | gfs2_glock_dq_uninit(ri_gh); | ||
663 | } | ||
664 | mutex_unlock(&sdp->sd_rindex_mutex); | 696 | mutex_unlock(&sdp->sd_rindex_mutex); |
665 | } | 697 | } |
666 | 698 | ||
699 | |||
667 | return error; | 700 | return error; |
668 | } | 701 | } |
669 | 702 | ||
@@ -694,7 +727,7 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) | |||
694 | } | 727 | } |
695 | 728 | ||
696 | /** | 729 | /** |
697 | * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps | 730 | * gfs2_rgrp_go_lock - Read in a RG's header and bitmaps |
698 | * @rgd: the struct gfs2_rgrpd describing the RG to read in | 731 | * @rgd: the struct gfs2_rgrpd describing the RG to read in |
699 | * | 732 | * |
700 | * Read in all of a Resource Group's header and bitmap blocks. | 733 | * Read in all of a Resource Group's header and bitmap blocks. |
@@ -703,8 +736,9 @@ static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) | |||
703 | * Returns: errno | 736 | * Returns: errno |
704 | */ | 737 | */ |
705 | 738 | ||
706 | int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) | 739 | int gfs2_rgrp_go_lock(struct gfs2_holder *gh) |
707 | { | 740 | { |
741 | struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; | ||
708 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 742 | struct gfs2_sbd *sdp = rgd->rd_sbd; |
709 | struct gfs2_glock *gl = rgd->rd_gl; | 743 | struct gfs2_glock *gl = rgd->rd_gl; |
710 | unsigned int length = rgd->rd_length; | 744 | unsigned int length = rgd->rd_length; |
@@ -712,17 +746,6 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) | |||
712 | unsigned int x, y; | 746 | unsigned int x, y; |
713 | int error; | 747 | int error; |
714 | 748 | ||
715 | mutex_lock(&rgd->rd_mutex); | ||
716 | |||
717 | spin_lock(&sdp->sd_rindex_spin); | ||
718 | if (rgd->rd_bh_count) { | ||
719 | rgd->rd_bh_count++; | ||
720 | spin_unlock(&sdp->sd_rindex_spin); | ||
721 | mutex_unlock(&rgd->rd_mutex); | ||
722 | return 0; | ||
723 | } | ||
724 | spin_unlock(&sdp->sd_rindex_spin); | ||
725 | |||
726 | for (x = 0; x < length; x++) { | 749 | for (x = 0; x < length; x++) { |
727 | bi = rgd->rd_bits + x; | 750 | bi = rgd->rd_bits + x; |
728 | error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh); | 751 | error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh); |
@@ -747,15 +770,9 @@ int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd) | |||
747 | clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags); | 770 | clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags); |
748 | gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data); | 771 | gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data); |
749 | rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); | 772 | rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK); |
773 | rgd->rd_free_clone = rgd->rd_free; | ||
750 | } | 774 | } |
751 | 775 | ||
752 | spin_lock(&sdp->sd_rindex_spin); | ||
753 | rgd->rd_free_clone = rgd->rd_free; | ||
754 | rgd->rd_bh_count++; | ||
755 | spin_unlock(&sdp->sd_rindex_spin); | ||
756 | |||
757 | mutex_unlock(&rgd->rd_mutex); | ||
758 | |||
759 | return 0; | 776 | return 0; |
760 | 777 | ||
761 | fail: | 778 | fail: |
@@ -765,52 +782,32 @@ fail: | |||
765 | bi->bi_bh = NULL; | 782 | bi->bi_bh = NULL; |
766 | gfs2_assert_warn(sdp, !bi->bi_clone); | 783 | gfs2_assert_warn(sdp, !bi->bi_clone); |
767 | } | 784 | } |
768 | mutex_unlock(&rgd->rd_mutex); | ||
769 | 785 | ||
770 | return error; | 786 | return error; |
771 | } | 787 | } |
772 | 788 | ||
773 | void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd) | ||
774 | { | ||
775 | struct gfs2_sbd *sdp = rgd->rd_sbd; | ||
776 | |||
777 | spin_lock(&sdp->sd_rindex_spin); | ||
778 | gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); | ||
779 | rgd->rd_bh_count++; | ||
780 | spin_unlock(&sdp->sd_rindex_spin); | ||
781 | } | ||
782 | |||
783 | /** | 789 | /** |
784 | * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get() | 790 | * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get() |
785 | * @rgd: the struct gfs2_rgrpd describing the RG to read in | 791 | * @rgd: the struct gfs2_rgrpd describing the RG to read in |
786 | * | 792 | * |
787 | */ | 793 | */ |
788 | 794 | ||
789 | void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd) | 795 | void gfs2_rgrp_go_unlock(struct gfs2_holder *gh) |
790 | { | 796 | { |
791 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 797 | struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object; |
792 | int x, length = rgd->rd_length; | 798 | int x, length = rgd->rd_length; |
793 | 799 | ||
794 | spin_lock(&sdp->sd_rindex_spin); | ||
795 | gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count); | ||
796 | if (--rgd->rd_bh_count) { | ||
797 | spin_unlock(&sdp->sd_rindex_spin); | ||
798 | return; | ||
799 | } | ||
800 | |||
801 | for (x = 0; x < length; x++) { | 800 | for (x = 0; x < length; x++) { |
802 | struct gfs2_bitmap *bi = rgd->rd_bits + x; | 801 | struct gfs2_bitmap *bi = rgd->rd_bits + x; |
803 | kfree(bi->bi_clone); | ||
804 | bi->bi_clone = NULL; | ||
805 | brelse(bi->bi_bh); | 802 | brelse(bi->bi_bh); |
806 | bi->bi_bh = NULL; | 803 | bi->bi_bh = NULL; |
807 | } | 804 | } |
808 | 805 | ||
809 | spin_unlock(&sdp->sd_rindex_spin); | ||
810 | } | 806 | } |
811 | 807 | ||
812 | static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, | 808 | void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, |
813 | const struct gfs2_bitmap *bi) | 809 | struct buffer_head *bh, |
810 | const struct gfs2_bitmap *bi) | ||
814 | { | 811 | { |
815 | struct super_block *sb = sdp->sd_vfs; | 812 | struct super_block *sb = sdp->sd_vfs; |
816 | struct block_device *bdev = sb->s_bdev; | 813 | struct block_device *bdev = sb->s_bdev; |
@@ -823,7 +820,7 @@ static void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, | |||
823 | unsigned int x; | 820 | unsigned int x; |
824 | 821 | ||
825 | for (x = 0; x < bi->bi_len; x++) { | 822 | for (x = 0; x < bi->bi_len; x++) { |
826 | const u8 *orig = bi->bi_bh->b_data + bi->bi_offset + x; | 823 | const u8 *orig = bh->b_data + bi->bi_offset + x; |
827 | const u8 *clone = bi->bi_clone + bi->bi_offset + x; | 824 | const u8 *clone = bi->bi_clone + bi->bi_offset + x; |
828 | u8 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); | 825 | u8 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1)); |
829 | diff &= 0x55; | 826 | diff &= 0x55; |
@@ -862,28 +859,6 @@ fail: | |||
862 | sdp->sd_args.ar_discard = 0; | 859 | sdp->sd_args.ar_discard = 0; |
863 | } | 860 | } |
864 | 861 | ||
865 | void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) | ||
866 | { | ||
867 | struct gfs2_sbd *sdp = rgd->rd_sbd; | ||
868 | unsigned int length = rgd->rd_length; | ||
869 | unsigned int x; | ||
870 | |||
871 | for (x = 0; x < length; x++) { | ||
872 | struct gfs2_bitmap *bi = rgd->rd_bits + x; | ||
873 | if (!bi->bi_clone) | ||
874 | continue; | ||
875 | if (sdp->sd_args.ar_discard) | ||
876 | gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bi); | ||
877 | clear_bit(GBF_FULL, &bi->bi_flags); | ||
878 | memcpy(bi->bi_clone + bi->bi_offset, | ||
879 | bi->bi_bh->b_data + bi->bi_offset, bi->bi_len); | ||
880 | } | ||
881 | |||
882 | spin_lock(&sdp->sd_rindex_spin); | ||
883 | rgd->rd_free_clone = rgd->rd_free; | ||
884 | spin_unlock(&sdp->sd_rindex_spin); | ||
885 | } | ||
886 | |||
887 | /** | 862 | /** |
888 | * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode | 863 | * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode |
889 | * @ip: the incore GFS2 inode structure | 864 | * @ip: the incore GFS2 inode structure |
@@ -893,38 +868,35 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd) | |||
893 | 868 | ||
894 | struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) | 869 | struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip) |
895 | { | 870 | { |
871 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
872 | int error; | ||
896 | BUG_ON(ip->i_alloc != NULL); | 873 | BUG_ON(ip->i_alloc != NULL); |
897 | ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS); | 874 | ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS); |
875 | error = gfs2_rindex_update(sdp); | ||
876 | if (error) | ||
877 | fs_warn(sdp, "rindex update returns %d\n", error); | ||
898 | return ip->i_alloc; | 878 | return ip->i_alloc; |
899 | } | 879 | } |
900 | 880 | ||
901 | /** | 881 | /** |
902 | * try_rgrp_fit - See if a given reservation will fit in a given RG | 882 | * try_rgrp_fit - See if a given reservation will fit in a given RG |
903 | * @rgd: the RG data | 883 | * @rgd: the RG data |
904 | * @al: the struct gfs2_alloc structure describing the reservation | 884 | * @ip: the inode |
905 | * | 885 | * |
906 | * If there's room for the requested blocks to be allocated from the RG: | 886 | * If there's room for the requested blocks to be allocated from the RG: |
907 | * Sets the $al_rgd field in @al. | ||
908 | * | 887 | * |
909 | * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) | 888 | * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) |
910 | */ | 889 | */ |
911 | 890 | ||
912 | static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al) | 891 | static int try_rgrp_fit(const struct gfs2_rgrpd *rgd, const struct gfs2_inode *ip) |
913 | { | 892 | { |
914 | struct gfs2_sbd *sdp = rgd->rd_sbd; | 893 | const struct gfs2_alloc *al = ip->i_alloc; |
915 | int ret = 0; | ||
916 | 894 | ||
917 | if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) | 895 | if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) |
918 | return 0; | 896 | return 0; |
919 | 897 | if (rgd->rd_free_clone >= al->al_requested) | |
920 | spin_lock(&sdp->sd_rindex_spin); | 898 | return 1; |
921 | if (rgd->rd_free_clone >= al->al_requested) { | 899 | return 0; |
922 | al->al_rgd = rgd; | ||
923 | ret = 1; | ||
924 | } | ||
925 | spin_unlock(&sdp->sd_rindex_spin); | ||
926 | |||
927 | return ret; | ||
928 | } | 900 | } |
929 | 901 | ||
930 | /** | 902 | /** |
@@ -992,76 +964,6 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip | |||
992 | } | 964 | } |
993 | 965 | ||
994 | /** | 966 | /** |
995 | * recent_rgrp_next - get next RG from "recent" list | ||
996 | * @cur_rgd: current rgrp | ||
997 | * | ||
998 | * Returns: The next rgrp in the recent list | ||
999 | */ | ||
1000 | |||
1001 | static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd) | ||
1002 | { | ||
1003 | struct gfs2_sbd *sdp = cur_rgd->rd_sbd; | ||
1004 | struct list_head *head; | ||
1005 | struct gfs2_rgrpd *rgd; | ||
1006 | |||
1007 | spin_lock(&sdp->sd_rindex_spin); | ||
1008 | head = &sdp->sd_rindex_mru_list; | ||
1009 | if (unlikely(cur_rgd->rd_list_mru.next == head)) { | ||
1010 | spin_unlock(&sdp->sd_rindex_spin); | ||
1011 | return NULL; | ||
1012 | } | ||
1013 | rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru); | ||
1014 | spin_unlock(&sdp->sd_rindex_spin); | ||
1015 | return rgd; | ||
1016 | } | ||
1017 | |||
1018 | /** | ||
1019 | * forward_rgrp_get - get an rgrp to try next from full list | ||
1020 | * @sdp: The GFS2 superblock | ||
1021 | * | ||
1022 | * Returns: The rgrp to try next | ||
1023 | */ | ||
1024 | |||
1025 | static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp) | ||
1026 | { | ||
1027 | struct gfs2_rgrpd *rgd; | ||
1028 | unsigned int journals = gfs2_jindex_size(sdp); | ||
1029 | unsigned int rg = 0, x; | ||
1030 | |||
1031 | spin_lock(&sdp->sd_rindex_spin); | ||
1032 | |||
1033 | rgd = sdp->sd_rindex_forward; | ||
1034 | if (!rgd) { | ||
1035 | if (sdp->sd_rgrps >= journals) | ||
1036 | rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals; | ||
1037 | |||
1038 | for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg; | ||
1039 | x++, rgd = gfs2_rgrpd_get_next(rgd)) | ||
1040 | /* Do Nothing */; | ||
1041 | |||
1042 | sdp->sd_rindex_forward = rgd; | ||
1043 | } | ||
1044 | |||
1045 | spin_unlock(&sdp->sd_rindex_spin); | ||
1046 | |||
1047 | return rgd; | ||
1048 | } | ||
1049 | |||
1050 | /** | ||
1051 | * forward_rgrp_set - set the forward rgrp pointer | ||
1052 | * @sdp: the filesystem | ||
1053 | * @rgd: The new forward rgrp | ||
1054 | * | ||
1055 | */ | ||
1056 | |||
1057 | static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd) | ||
1058 | { | ||
1059 | spin_lock(&sdp->sd_rindex_spin); | ||
1060 | sdp->sd_rindex_forward = rgd; | ||
1061 | spin_unlock(&sdp->sd_rindex_spin); | ||
1062 | } | ||
1063 | |||
1064 | /** | ||
1065 | * get_local_rgrp - Choose and lock a rgrp for allocation | 967 | * get_local_rgrp - Choose and lock a rgrp for allocation |
1066 | * @ip: the inode to reserve space for | 968 | * @ip: the inode to reserve space for |
1067 | * @rgp: the chosen and locked rgrp | 969 | * @rgp: the chosen and locked rgrp |
@@ -1076,14 +978,18 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
1076 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 978 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1077 | struct gfs2_rgrpd *rgd, *begin = NULL; | 979 | struct gfs2_rgrpd *rgd, *begin = NULL; |
1078 | struct gfs2_alloc *al = ip->i_alloc; | 980 | struct gfs2_alloc *al = ip->i_alloc; |
1079 | int flags = LM_FLAG_TRY; | ||
1080 | int skipped = 0; | ||
1081 | int loops = 0; | ||
1082 | int error, rg_locked; | 981 | int error, rg_locked; |
982 | int loops = 0; | ||
983 | |||
984 | if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) | ||
985 | rgd = begin = ip->i_rgd; | ||
986 | else | ||
987 | rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal); | ||
1083 | 988 | ||
1084 | rgd = gfs2_blk2rgrpd(sdp, ip->i_goal); | 989 | if (rgd == NULL) |
990 | return -EBADSLT; | ||
1085 | 991 | ||
1086 | while (rgd) { | 992 | while (loops < 3) { |
1087 | rg_locked = 0; | 993 | rg_locked = 0; |
1088 | 994 | ||
1089 | if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) { | 995 | if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) { |
@@ -1095,92 +1001,36 @@ static int get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked) | |||
1095 | } | 1001 | } |
1096 | switch (error) { | 1002 | switch (error) { |
1097 | case 0: | 1003 | case 0: |
1098 | if (try_rgrp_fit(rgd, al)) | 1004 | if (try_rgrp_fit(rgd, ip)) { |
1099 | goto out; | 1005 | ip->i_rgd = rgd; |
1006 | return 0; | ||
1007 | } | ||
1100 | if (rgd->rd_flags & GFS2_RDF_CHECK) | 1008 | if (rgd->rd_flags & GFS2_RDF_CHECK) |
1101 | try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); | 1009 | try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); |
1102 | if (!rg_locked) | 1010 | if (!rg_locked) |
1103 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1011 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1104 | /* fall through */ | 1012 | /* fall through */ |
1105 | case GLR_TRYFAILED: | 1013 | case GLR_TRYFAILED: |
1106 | rgd = recent_rgrp_next(rgd); | 1014 | rgd = gfs2_rgrpd_get_next(rgd); |
1107 | break; | 1015 | if (rgd == begin) |
1108 | 1016 | loops++; | |
1109 | default: | ||
1110 | return error; | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | /* Go through full list of rgrps */ | ||
1115 | |||
1116 | begin = rgd = forward_rgrp_get(sdp); | ||
1117 | |||
1118 | for (;;) { | ||
1119 | rg_locked = 0; | ||
1120 | |||
1121 | if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) { | ||
1122 | rg_locked = 1; | ||
1123 | error = 0; | ||
1124 | } else { | ||
1125 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags, | ||
1126 | &al->al_rgd_gh); | ||
1127 | } | ||
1128 | switch (error) { | ||
1129 | case 0: | ||
1130 | if (try_rgrp_fit(rgd, al)) | ||
1131 | goto out; | ||
1132 | if (rgd->rd_flags & GFS2_RDF_CHECK) | ||
1133 | try_rgrp_unlink(rgd, last_unlinked, ip->i_no_addr); | ||
1134 | if (!rg_locked) | ||
1135 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | ||
1136 | break; | ||
1137 | |||
1138 | case GLR_TRYFAILED: | ||
1139 | skipped++; | ||
1140 | break; | 1017 | break; |
1141 | |||
1142 | default: | 1018 | default: |
1143 | return error; | 1019 | return error; |
1144 | } | 1020 | } |
1145 | |||
1146 | rgd = gfs2_rgrpd_get_next(rgd); | ||
1147 | if (!rgd) | ||
1148 | rgd = gfs2_rgrpd_get_first(sdp); | ||
1149 | |||
1150 | if (rgd == begin) { | ||
1151 | if (++loops >= 3) | ||
1152 | return -ENOSPC; | ||
1153 | if (!skipped) | ||
1154 | loops++; | ||
1155 | flags = 0; | ||
1156 | if (loops == 2) | ||
1157 | gfs2_log_flush(sdp, NULL); | ||
1158 | } | ||
1159 | } | 1021 | } |
1160 | 1022 | ||
1161 | out: | 1023 | return -ENOSPC; |
1162 | if (begin) { | ||
1163 | spin_lock(&sdp->sd_rindex_spin); | ||
1164 | list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list); | ||
1165 | spin_unlock(&sdp->sd_rindex_spin); | ||
1166 | rgd = gfs2_rgrpd_get_next(rgd); | ||
1167 | if (!rgd) | ||
1168 | rgd = gfs2_rgrpd_get_first(sdp); | ||
1169 | forward_rgrp_set(sdp, rgd); | ||
1170 | } | ||
1171 | |||
1172 | return 0; | ||
1173 | } | 1024 | } |
1174 | 1025 | ||
1175 | /** | 1026 | /** |
1176 | * gfs2_inplace_reserve_i - Reserve space in the filesystem | 1027 | * gfs2_inplace_reserve - Reserve space in the filesystem |
1177 | * @ip: the inode to reserve space for | 1028 | * @ip: the inode to reserve space for |
1178 | * | 1029 | * |
1179 | * Returns: errno | 1030 | * Returns: errno |
1180 | */ | 1031 | */ |
1181 | 1032 | ||
1182 | int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex, | 1033 | int gfs2_inplace_reserve(struct gfs2_inode *ip) |
1183 | char *file, unsigned int line) | ||
1184 | { | 1034 | { |
1185 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1035 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); |
1186 | struct gfs2_alloc *al = ip->i_alloc; | 1036 | struct gfs2_alloc *al = ip->i_alloc; |
@@ -1191,45 +1041,22 @@ int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex, | |||
1191 | if (gfs2_assert_warn(sdp, al->al_requested)) | 1041 | if (gfs2_assert_warn(sdp, al->al_requested)) |
1192 | return -EINVAL; | 1042 | return -EINVAL; |
1193 | 1043 | ||
1194 | if (hold_rindex) { | ||
1195 | /* We need to hold the rindex unless the inode we're using is | ||
1196 | the rindex itself, in which case it's already held. */ | ||
1197 | if (ip != GFS2_I(sdp->sd_rindex)) | ||
1198 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); | ||
1199 | else if (!sdp->sd_rgrps) /* We may not have the rindex read | ||
1200 | in, so: */ | ||
1201 | error = gfs2_ri_update(ip); | ||
1202 | if (error) | ||
1203 | return error; | ||
1204 | } | ||
1205 | |||
1206 | try_again: | ||
1207 | do { | 1044 | do { |
1208 | error = get_local_rgrp(ip, &last_unlinked); | 1045 | error = get_local_rgrp(ip, &last_unlinked); |
1209 | /* If there is no space, flushing the log may release some */ | 1046 | if (error != -ENOSPC) |
1210 | if (error) { | 1047 | break; |
1211 | if (ip == GFS2_I(sdp->sd_rindex) && | 1048 | /* Check that fs hasn't grown if writing to rindex */ |
1212 | !sdp->sd_rindex_uptodate) { | 1049 | if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { |
1213 | error = gfs2_ri_update(ip); | 1050 | error = gfs2_ri_update(ip); |
1214 | if (error) | 1051 | if (error) |
1215 | return error; | 1052 | break; |
1216 | goto try_again; | 1053 | continue; |
1217 | } | ||
1218 | gfs2_log_flush(sdp, NULL); | ||
1219 | } | 1054 | } |
1220 | } while (error && tries++ < 3); | 1055 | /* Flushing the log may release space */ |
1221 | 1056 | gfs2_log_flush(sdp, NULL); | |
1222 | if (error) { | 1057 | } while (tries++ < 3); |
1223 | if (hold_rindex && ip != GFS2_I(sdp->sd_rindex)) | ||
1224 | gfs2_glock_dq_uninit(&al->al_ri_gh); | ||
1225 | return error; | ||
1226 | } | ||
1227 | |||
1228 | /* no error, so we have the rgrp set in the inode's allocation. */ | ||
1229 | al->al_file = file; | ||
1230 | al->al_line = line; | ||
1231 | 1058 | ||
1232 | return 0; | 1059 | return error; |
1233 | } | 1060 | } |
1234 | 1061 | ||
1235 | /** | 1062 | /** |
@@ -1241,20 +1068,10 @@ try_again: | |||
1241 | 1068 | ||
1242 | void gfs2_inplace_release(struct gfs2_inode *ip) | 1069 | void gfs2_inplace_release(struct gfs2_inode *ip) |
1243 | { | 1070 | { |
1244 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
1245 | struct gfs2_alloc *al = ip->i_alloc; | 1071 | struct gfs2_alloc *al = ip->i_alloc; |
1246 | 1072 | ||
1247 | if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1) | ||
1248 | fs_warn(sdp, "al_alloced = %u, al_requested = %u " | ||
1249 | "al_file = %s, al_line = %u\n", | ||
1250 | al->al_alloced, al->al_requested, al->al_file, | ||
1251 | al->al_line); | ||
1252 | |||
1253 | al->al_rgd = NULL; | ||
1254 | if (al->al_rgd_gh.gh_gl) | 1073 | if (al->al_rgd_gh.gh_gl) |
1255 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1074 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1256 | if (ip != GFS2_I(sdp->sd_rindex) && al->al_ri_gh.gh_gl) | ||
1257 | gfs2_glock_dq_uninit(&al->al_ri_gh); | ||
1258 | } | 1075 | } |
1259 | 1076 | ||
1260 | /** | 1077 | /** |
@@ -1352,6 +1169,7 @@ do_search: | |||
1352 | /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone | 1169 | /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone |
1353 | bitmaps, so we must search the originals for that. */ | 1170 | bitmaps, so we must search the originals for that. */ |
1354 | buffer = bi->bi_bh->b_data + bi->bi_offset; | 1171 | buffer = bi->bi_bh->b_data + bi->bi_offset; |
1172 | WARN_ON(!buffer_uptodate(bi->bi_bh)); | ||
1355 | if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone) | 1173 | if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone) |
1356 | buffer = bi->bi_clone + bi->bi_offset; | 1174 | buffer = bi->bi_clone + bi->bi_offset; |
1357 | 1175 | ||
@@ -1371,6 +1189,7 @@ skip: | |||
1371 | 1189 | ||
1372 | if (blk == BFITNOENT) | 1190 | if (blk == BFITNOENT) |
1373 | return blk; | 1191 | return blk; |
1192 | |||
1374 | *n = 1; | 1193 | *n = 1; |
1375 | if (old_state == new_state) | 1194 | if (old_state == new_state) |
1376 | goto out; | 1195 | goto out; |
@@ -1503,7 +1322,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n) | |||
1503 | if (al == NULL) | 1322 | if (al == NULL) |
1504 | return -ECANCELED; | 1323 | return -ECANCELED; |
1505 | 1324 | ||
1506 | rgd = al->al_rgd; | 1325 | rgd = ip->i_rgd; |
1507 | 1326 | ||
1508 | if (rgrp_contains_block(rgd, ip->i_goal)) | 1327 | if (rgrp_contains_block(rgd, ip->i_goal)) |
1509 | goal = ip->i_goal - rgd->rd_data0; | 1328 | goal = ip->i_goal - rgd->rd_data0; |
@@ -1518,7 +1337,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n) | |||
1518 | 1337 | ||
1519 | rgd->rd_last_alloc = blk; | 1338 | rgd->rd_last_alloc = blk; |
1520 | block = rgd->rd_data0 + blk; | 1339 | block = rgd->rd_data0 + blk; |
1521 | ip->i_goal = block; | 1340 | ip->i_goal = block + *n - 1; |
1522 | error = gfs2_meta_inode_buffer(ip, &dibh); | 1341 | error = gfs2_meta_inode_buffer(ip, &dibh); |
1523 | if (error == 0) { | 1342 | if (error == 0) { |
1524 | struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; | 1343 | struct gfs2_dinode *di = (struct gfs2_dinode *)dibh->b_data; |
@@ -1539,9 +1358,7 @@ int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n) | |||
1539 | gfs2_statfs_change(sdp, 0, -(s64)*n, 0); | 1358 | gfs2_statfs_change(sdp, 0, -(s64)*n, 0); |
1540 | gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid); | 1359 | gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid); |
1541 | 1360 | ||
1542 | spin_lock(&sdp->sd_rindex_spin); | ||
1543 | rgd->rd_free_clone -= *n; | 1361 | rgd->rd_free_clone -= *n; |
1544 | spin_unlock(&sdp->sd_rindex_spin); | ||
1545 | trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED); | 1362 | trace_gfs2_block_alloc(ip, block, *n, GFS2_BLKST_USED); |
1546 | *bn = block; | 1363 | *bn = block; |
1547 | return 0; | 1364 | return 0; |
@@ -1564,7 +1381,7 @@ int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation) | |||
1564 | { | 1381 | { |
1565 | struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); | 1382 | struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); |
1566 | struct gfs2_alloc *al = dip->i_alloc; | 1383 | struct gfs2_alloc *al = dip->i_alloc; |
1567 | struct gfs2_rgrpd *rgd = al->al_rgd; | 1384 | struct gfs2_rgrpd *rgd = dip->i_rgd; |
1568 | u32 blk; | 1385 | u32 blk; |
1569 | u64 block; | 1386 | u64 block; |
1570 | unsigned int n = 1; | 1387 | unsigned int n = 1; |
@@ -1594,9 +1411,7 @@ int gfs2_alloc_di(struct gfs2_inode *dip, u64 *bn, u64 *generation) | |||
1594 | gfs2_statfs_change(sdp, 0, -1, +1); | 1411 | gfs2_statfs_change(sdp, 0, -1, +1); |
1595 | gfs2_trans_add_unrevoke(sdp, block, 1); | 1412 | gfs2_trans_add_unrevoke(sdp, block, 1); |
1596 | 1413 | ||
1597 | spin_lock(&sdp->sd_rindex_spin); | ||
1598 | rgd->rd_free_clone--; | 1414 | rgd->rd_free_clone--; |
1599 | spin_unlock(&sdp->sd_rindex_spin); | ||
1600 | trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE); | 1415 | trace_gfs2_block_alloc(dip, block, 1, GFS2_BLKST_DINODE); |
1601 | *bn = block; | 1416 | *bn = block; |
1602 | return 0; | 1417 | return 0; |
@@ -1629,8 +1444,6 @@ void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta) | |||
1629 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1444 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1630 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); | 1445 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); |
1631 | 1446 | ||
1632 | gfs2_trans_add_rg(rgd); | ||
1633 | |||
1634 | /* Directories keep their data in the metadata address space */ | 1447 | /* Directories keep their data in the metadata address space */ |
1635 | if (meta || ip->i_depth) | 1448 | if (meta || ip->i_depth) |
1636 | gfs2_meta_wipe(ip, bstart, blen); | 1449 | gfs2_meta_wipe(ip, bstart, blen); |
@@ -1666,7 +1479,6 @@ void gfs2_unlink_di(struct inode *inode) | |||
1666 | trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED); | 1479 | trace_gfs2_block_alloc(ip, blkno, 1, GFS2_BLKST_UNLINKED); |
1667 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); | 1480 | gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); |
1668 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); | 1481 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); |
1669 | gfs2_trans_add_rg(rgd); | ||
1670 | } | 1482 | } |
1671 | 1483 | ||
1672 | static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) | 1484 | static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) |
@@ -1688,7 +1500,6 @@ static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno) | |||
1688 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); | 1500 | gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); |
1689 | 1501 | ||
1690 | gfs2_statfs_change(sdp, 0, +1, -1); | 1502 | gfs2_statfs_change(sdp, 0, +1, -1); |
1691 | gfs2_trans_add_rg(rgd); | ||
1692 | } | 1503 | } |
1693 | 1504 | ||
1694 | 1505 | ||
@@ -1714,41 +1525,33 @@ void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) | |||
1714 | int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) | 1525 | int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) |
1715 | { | 1526 | { |
1716 | struct gfs2_rgrpd *rgd; | 1527 | struct gfs2_rgrpd *rgd; |
1717 | struct gfs2_holder ri_gh, rgd_gh; | 1528 | struct gfs2_holder rgd_gh; |
1718 | struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); | ||
1719 | int ri_locked = 0; | ||
1720 | int error; | 1529 | int error; |
1721 | 1530 | ||
1722 | if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { | 1531 | error = gfs2_rindex_update(sdp); |
1723 | error = gfs2_rindex_hold(sdp, &ri_gh); | 1532 | if (error) |
1724 | if (error) | 1533 | return error; |
1725 | goto fail; | ||
1726 | ri_locked = 1; | ||
1727 | } | ||
1728 | 1534 | ||
1729 | error = -EINVAL; | 1535 | error = -EINVAL; |
1730 | rgd = gfs2_blk2rgrpd(sdp, no_addr); | 1536 | rgd = gfs2_blk2rgrpd(sdp, no_addr); |
1731 | if (!rgd) | 1537 | if (!rgd) |
1732 | goto fail_rindex; | 1538 | goto fail; |
1733 | 1539 | ||
1734 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh); | 1540 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh); |
1735 | if (error) | 1541 | if (error) |
1736 | goto fail_rindex; | 1542 | goto fail; |
1737 | 1543 | ||
1738 | if (gfs2_get_block_type(rgd, no_addr) != type) | 1544 | if (gfs2_get_block_type(rgd, no_addr) != type) |
1739 | error = -ESTALE; | 1545 | error = -ESTALE; |
1740 | 1546 | ||
1741 | gfs2_glock_dq_uninit(&rgd_gh); | 1547 | gfs2_glock_dq_uninit(&rgd_gh); |
1742 | fail_rindex: | ||
1743 | if (ri_locked) | ||
1744 | gfs2_glock_dq_uninit(&ri_gh); | ||
1745 | fail: | 1548 | fail: |
1746 | return error; | 1549 | return error; |
1747 | } | 1550 | } |
1748 | 1551 | ||
1749 | /** | 1552 | /** |
1750 | * gfs2_rlist_add - add a RG to a list of RGs | 1553 | * gfs2_rlist_add - add a RG to a list of RGs |
1751 | * @sdp: the filesystem | 1554 | * @ip: the inode |
1752 | * @rlist: the list of resource groups | 1555 | * @rlist: the list of resource groups |
1753 | * @block: the block | 1556 | * @block: the block |
1754 | * | 1557 | * |
@@ -1758,9 +1561,10 @@ fail: | |||
1758 | * | 1561 | * |
1759 | */ | 1562 | */ |
1760 | 1563 | ||
1761 | void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, | 1564 | void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, |
1762 | u64 block) | 1565 | u64 block) |
1763 | { | 1566 | { |
1567 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | ||
1764 | struct gfs2_rgrpd *rgd; | 1568 | struct gfs2_rgrpd *rgd; |
1765 | struct gfs2_rgrpd **tmp; | 1569 | struct gfs2_rgrpd **tmp; |
1766 | unsigned int new_space; | 1570 | unsigned int new_space; |
@@ -1769,12 +1573,15 @@ void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, | |||
1769 | if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) | 1573 | if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) |
1770 | return; | 1574 | return; |
1771 | 1575 | ||
1772 | rgd = gfs2_blk2rgrpd(sdp, block); | 1576 | if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block)) |
1577 | rgd = ip->i_rgd; | ||
1578 | else | ||
1579 | rgd = gfs2_blk2rgrpd(sdp, block); | ||
1773 | if (!rgd) { | 1580 | if (!rgd) { |
1774 | if (gfs2_consist(sdp)) | 1581 | fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block); |
1775 | fs_err(sdp, "block = %llu\n", (unsigned long long)block); | ||
1776 | return; | 1582 | return; |
1777 | } | 1583 | } |
1584 | ip->i_rgd = rgd; | ||
1778 | 1585 | ||
1779 | for (x = 0; x < rlist->rl_rgrps; x++) | 1586 | for (x = 0; x < rlist->rl_rgrps; x++) |
1780 | if (rlist->rl_rgd[x] == rgd) | 1587 | if (rlist->rl_rgd[x] == rgd) |
diff --git a/fs/gfs2/rgrp.h b/fs/gfs2/rgrp.h index d253f9a8c70..cf5c5018019 100644 --- a/fs/gfs2/rgrp.h +++ b/fs/gfs2/rgrp.h | |||
@@ -18,18 +18,15 @@ struct gfs2_holder; | |||
18 | 18 | ||
19 | extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd); | 19 | extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd); |
20 | 20 | ||
21 | struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk); | 21 | extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk); |
22 | struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp); | 22 | extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp); |
23 | struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd); | 23 | extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd); |
24 | 24 | ||
25 | extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp); | 25 | extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp); |
26 | extern int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh); | 26 | extern int gfs2_rindex_update(struct gfs2_sbd *sdp); |
27 | 27 | extern void gfs2_free_clones(struct gfs2_rgrpd *rgd); | |
28 | extern int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd); | 28 | extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh); |
29 | extern void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd); | 29 | extern void gfs2_rgrp_go_unlock(struct gfs2_holder *gh); |
30 | extern void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd); | ||
31 | |||
32 | extern void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd); | ||
33 | 30 | ||
34 | extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); | 31 | extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); |
35 | static inline void gfs2_alloc_put(struct gfs2_inode *ip) | 32 | static inline void gfs2_alloc_put(struct gfs2_inode *ip) |
@@ -39,16 +36,9 @@ static inline void gfs2_alloc_put(struct gfs2_inode *ip) | |||
39 | ip->i_alloc = NULL; | 36 | ip->i_alloc = NULL; |
40 | } | 37 | } |
41 | 38 | ||
42 | extern int gfs2_inplace_reserve_i(struct gfs2_inode *ip, int hold_rindex, | 39 | extern int gfs2_inplace_reserve(struct gfs2_inode *ip); |
43 | char *file, unsigned int line); | ||
44 | #define gfs2_inplace_reserve(ip) \ | ||
45 | gfs2_inplace_reserve_i((ip), 1, __FILE__, __LINE__) | ||
46 | #define gfs2_inplace_reserve_ri(ip) \ | ||
47 | gfs2_inplace_reserve_i((ip), 0, __FILE__, __LINE__) | ||
48 | |||
49 | extern void gfs2_inplace_release(struct gfs2_inode *ip); | 40 | extern void gfs2_inplace_release(struct gfs2_inode *ip); |
50 | 41 | ||
51 | extern int gfs2_ri_update(struct gfs2_inode *ip); | ||
52 | extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n); | 42 | extern int gfs2_alloc_block(struct gfs2_inode *ip, u64 *bn, unsigned int *n); |
53 | extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation); | 43 | extern int gfs2_alloc_di(struct gfs2_inode *ip, u64 *bn, u64 *generation); |
54 | 44 | ||
@@ -66,11 +56,14 @@ struct gfs2_rgrp_list { | |||
66 | struct gfs2_holder *rl_ghs; | 56 | struct gfs2_holder *rl_ghs; |
67 | }; | 57 | }; |
68 | 58 | ||
69 | extern void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist, | 59 | extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, |
70 | u64 block); | 60 | u64 block); |
71 | extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state); | 61 | extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state); |
72 | extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); | 62 | extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); |
73 | extern u64 gfs2_ri_total(struct gfs2_sbd *sdp); | 63 | extern u64 gfs2_ri_total(struct gfs2_sbd *sdp); |
74 | extern int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl); | 64 | extern int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl); |
65 | extern void gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, | ||
66 | struct buffer_head *bh, | ||
67 | const struct gfs2_bitmap *bi); | ||
75 | 68 | ||
76 | #endif /* __RGRP_DOT_H__ */ | 69 | #endif /* __RGRP_DOT_H__ */ |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index b7beadd9ba4..71e420989f7 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -752,51 +752,77 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
752 | struct gfs2_sbd *sdp = GFS2_SB(inode); | 752 | struct gfs2_sbd *sdp = GFS2_SB(inode); |
753 | struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); | 753 | struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); |
754 | struct backing_dev_info *bdi = metamapping->backing_dev_info; | 754 | struct backing_dev_info *bdi = metamapping->backing_dev_info; |
755 | struct gfs2_holder gh; | 755 | int ret = 0; |
756 | |||
757 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
758 | gfs2_log_flush(GFS2_SB(inode), ip->i_gl); | ||
759 | if (bdi->dirty_exceeded) | ||
760 | gfs2_ail1_flush(sdp, wbc); | ||
761 | else | ||
762 | filemap_fdatawrite(metamapping); | ||
763 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
764 | ret = filemap_fdatawait(metamapping); | ||
765 | if (ret) | ||
766 | mark_inode_dirty_sync(inode); | ||
767 | return ret; | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * gfs2_dirty_inode - check for atime updates | ||
772 | * @inode: The inode in question | ||
773 | * @flags: The type of dirty | ||
774 | * | ||
775 | * Unfortunately it can be called under any combination of inode | ||
776 | * glock and transaction lock, so we have to check carefully. | ||
777 | * | ||
778 | * At the moment this deals only with atime - it should be possible | ||
779 | * to expand that role in future, once a review of the locking has | ||
780 | * been carried out. | ||
781 | */ | ||
782 | |||
783 | static void gfs2_dirty_inode(struct inode *inode, int flags) | ||
784 | { | ||
785 | struct gfs2_inode *ip = GFS2_I(inode); | ||
786 | struct gfs2_sbd *sdp = GFS2_SB(inode); | ||
756 | struct buffer_head *bh; | 787 | struct buffer_head *bh; |
757 | struct timespec atime; | 788 | struct gfs2_holder gh; |
758 | struct gfs2_dinode *di; | 789 | int need_unlock = 0; |
759 | int ret = -EAGAIN; | 790 | int need_endtrans = 0; |
760 | int unlock_required = 0; | 791 | int ret; |
761 | 792 | ||
762 | /* Skip timestamp update, if this is from a memalloc */ | 793 | if (!(flags & (I_DIRTY_DATASYNC|I_DIRTY_SYNC))) |
763 | if (current->flags & PF_MEMALLOC) | 794 | return; |
764 | goto do_flush; | 795 | |
765 | if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { | 796 | if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { |
766 | ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); | 797 | ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); |
767 | if (ret) | 798 | if (ret) { |
768 | goto do_flush; | 799 | fs_err(sdp, "dirty_inode: glock %d\n", ret); |
769 | unlock_required = 1; | 800 | return; |
801 | } | ||
802 | need_unlock = 1; | ||
770 | } | 803 | } |
771 | ret = gfs2_trans_begin(sdp, RES_DINODE, 0); | 804 | |
772 | if (ret) | 805 | if (current->journal_info == NULL) { |
773 | goto do_unlock; | 806 | ret = gfs2_trans_begin(sdp, RES_DINODE, 0); |
807 | if (ret) { | ||
808 | fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret); | ||
809 | goto out; | ||
810 | } | ||
811 | need_endtrans = 1; | ||
812 | } | ||
813 | |||
774 | ret = gfs2_meta_inode_buffer(ip, &bh); | 814 | ret = gfs2_meta_inode_buffer(ip, &bh); |
775 | if (ret == 0) { | 815 | if (ret == 0) { |
776 | di = (struct gfs2_dinode *)bh->b_data; | 816 | gfs2_trans_add_bh(ip->i_gl, bh, 1); |
777 | atime.tv_sec = be64_to_cpu(di->di_atime); | 817 | gfs2_dinode_out(ip, bh->b_data); |
778 | atime.tv_nsec = be32_to_cpu(di->di_atime_nsec); | ||
779 | if (timespec_compare(&inode->i_atime, &atime) > 0) { | ||
780 | gfs2_trans_add_bh(ip->i_gl, bh, 1); | ||
781 | gfs2_dinode_out(ip, bh->b_data); | ||
782 | } | ||
783 | brelse(bh); | 818 | brelse(bh); |
784 | } | 819 | } |
785 | gfs2_trans_end(sdp); | 820 | |
786 | do_unlock: | 821 | if (need_endtrans) |
787 | if (unlock_required) | 822 | gfs2_trans_end(sdp); |
823 | out: | ||
824 | if (need_unlock) | ||
788 | gfs2_glock_dq_uninit(&gh); | 825 | gfs2_glock_dq_uninit(&gh); |
789 | do_flush: | ||
790 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
791 | gfs2_log_flush(GFS2_SB(inode), ip->i_gl); | ||
792 | filemap_fdatawrite(metamapping); | ||
793 | if (bdi->dirty_exceeded) | ||
794 | gfs2_ail1_flush(sdp, wbc); | ||
795 | if (!ret && (wbc->sync_mode == WB_SYNC_ALL)) | ||
796 | ret = filemap_fdatawait(metamapping); | ||
797 | if (ret) | ||
798 | mark_inode_dirty_sync(inode); | ||
799 | return ret; | ||
800 | } | 826 | } |
801 | 827 | ||
802 | /** | 828 | /** |
@@ -1011,7 +1037,6 @@ static int statfs_slow_fill(struct gfs2_rgrpd *rgd, | |||
1011 | 1037 | ||
1012 | static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) | 1038 | static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) |
1013 | { | 1039 | { |
1014 | struct gfs2_holder ri_gh; | ||
1015 | struct gfs2_rgrpd *rgd_next; | 1040 | struct gfs2_rgrpd *rgd_next; |
1016 | struct gfs2_holder *gha, *gh; | 1041 | struct gfs2_holder *gha, *gh; |
1017 | unsigned int slots = 64; | 1042 | unsigned int slots = 64; |
@@ -1024,10 +1049,6 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host | |||
1024 | if (!gha) | 1049 | if (!gha) |
1025 | return -ENOMEM; | 1050 | return -ENOMEM; |
1026 | 1051 | ||
1027 | error = gfs2_rindex_hold(sdp, &ri_gh); | ||
1028 | if (error) | ||
1029 | goto out; | ||
1030 | |||
1031 | rgd_next = gfs2_rgrpd_get_first(sdp); | 1052 | rgd_next = gfs2_rgrpd_get_first(sdp); |
1032 | 1053 | ||
1033 | for (;;) { | 1054 | for (;;) { |
@@ -1070,9 +1091,6 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host | |||
1070 | yield(); | 1091 | yield(); |
1071 | } | 1092 | } |
1072 | 1093 | ||
1073 | gfs2_glock_dq_uninit(&ri_gh); | ||
1074 | |||
1075 | out: | ||
1076 | kfree(gha); | 1094 | kfree(gha); |
1077 | return error; | 1095 | return error; |
1078 | } | 1096 | } |
@@ -1124,6 +1142,10 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
1124 | struct gfs2_statfs_change_host sc; | 1142 | struct gfs2_statfs_change_host sc; |
1125 | int error; | 1143 | int error; |
1126 | 1144 | ||
1145 | error = gfs2_rindex_update(sdp); | ||
1146 | if (error) | ||
1147 | return error; | ||
1148 | |||
1127 | if (gfs2_tune_get(sdp, gt_statfs_slow)) | 1149 | if (gfs2_tune_get(sdp, gt_statfs_slow)) |
1128 | error = gfs2_statfs_slow(sdp, &sc); | 1150 | error = gfs2_statfs_slow(sdp, &sc); |
1129 | else | 1151 | else |
@@ -1394,21 +1416,17 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip) | |||
1394 | if (error) | 1416 | if (error) |
1395 | goto out; | 1417 | goto out; |
1396 | 1418 | ||
1397 | error = gfs2_rindex_hold(sdp, &al->al_ri_gh); | ||
1398 | if (error) | ||
1399 | goto out_qs; | ||
1400 | |||
1401 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); | 1419 | rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); |
1402 | if (!rgd) { | 1420 | if (!rgd) { |
1403 | gfs2_consist_inode(ip); | 1421 | gfs2_consist_inode(ip); |
1404 | error = -EIO; | 1422 | error = -EIO; |
1405 | goto out_rindex_relse; | 1423 | goto out_qs; |
1406 | } | 1424 | } |
1407 | 1425 | ||
1408 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, | 1426 | error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, |
1409 | &al->al_rgd_gh); | 1427 | &al->al_rgd_gh); |
1410 | if (error) | 1428 | if (error) |
1411 | goto out_rindex_relse; | 1429 | goto out_qs; |
1412 | 1430 | ||
1413 | error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, | 1431 | error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, |
1414 | sdp->sd_jdesc->jd_blocks); | 1432 | sdp->sd_jdesc->jd_blocks); |
@@ -1423,8 +1441,6 @@ static int gfs2_dinode_dealloc(struct gfs2_inode *ip) | |||
1423 | 1441 | ||
1424 | out_rg_gunlock: | 1442 | out_rg_gunlock: |
1425 | gfs2_glock_dq_uninit(&al->al_rgd_gh); | 1443 | gfs2_glock_dq_uninit(&al->al_rgd_gh); |
1426 | out_rindex_relse: | ||
1427 | gfs2_glock_dq_uninit(&al->al_ri_gh); | ||
1428 | out_qs: | 1444 | out_qs: |
1429 | gfs2_quota_unhold(ip); | 1445 | gfs2_quota_unhold(ip); |
1430 | out: | 1446 | out: |
@@ -1471,9 +1487,11 @@ static void gfs2_evict_inode(struct inode *inode) | |||
1471 | goto out; | 1487 | goto out; |
1472 | } | 1488 | } |
1473 | 1489 | ||
1474 | error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); | 1490 | if (!test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) { |
1475 | if (error) | 1491 | error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED); |
1476 | goto out_truncate; | 1492 | if (error) |
1493 | goto out_truncate; | ||
1494 | } | ||
1477 | 1495 | ||
1478 | if (test_bit(GIF_INVALID, &ip->i_flags)) { | 1496 | if (test_bit(GIF_INVALID, &ip->i_flags)) { |
1479 | error = gfs2_inode_refresh(ip); | 1497 | error = gfs2_inode_refresh(ip); |
@@ -1513,6 +1531,10 @@ static void gfs2_evict_inode(struct inode *inode) | |||
1513 | goto out_unlock; | 1531 | goto out_unlock; |
1514 | 1532 | ||
1515 | out_truncate: | 1533 | out_truncate: |
1534 | gfs2_log_flush(sdp, ip->i_gl); | ||
1535 | write_inode_now(inode, 1); | ||
1536 | gfs2_ail_flush(ip->i_gl, 0); | ||
1537 | |||
1516 | /* Case 2 starts here */ | 1538 | /* Case 2 starts here */ |
1517 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); | 1539 | error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); |
1518 | if (error) | 1540 | if (error) |
@@ -1552,6 +1574,7 @@ static struct inode *gfs2_alloc_inode(struct super_block *sb) | |||
1552 | if (ip) { | 1574 | if (ip) { |
1553 | ip->i_flags = 0; | 1575 | ip->i_flags = 0; |
1554 | ip->i_gl = NULL; | 1576 | ip->i_gl = NULL; |
1577 | ip->i_rgd = NULL; | ||
1555 | } | 1578 | } |
1556 | return &ip->i_inode; | 1579 | return &ip->i_inode; |
1557 | } | 1580 | } |
@@ -1572,6 +1595,7 @@ const struct super_operations gfs2_super_ops = { | |||
1572 | .alloc_inode = gfs2_alloc_inode, | 1595 | .alloc_inode = gfs2_alloc_inode, |
1573 | .destroy_inode = gfs2_destroy_inode, | 1596 | .destroy_inode = gfs2_destroy_inode, |
1574 | .write_inode = gfs2_write_inode, | 1597 | .write_inode = gfs2_write_inode, |
1598 | .dirty_inode = gfs2_dirty_inode, | ||
1575 | .evict_inode = gfs2_evict_inode, | 1599 | .evict_inode = gfs2_evict_inode, |
1576 | .put_super = gfs2_put_super, | 1600 | .put_super = gfs2_put_super, |
1577 | .sync_fs = gfs2_sync_fs, | 1601 | .sync_fs = gfs2_sync_fs, |
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index 9ec73a85411..86ac75d99d3 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c | |||
@@ -185,8 +185,3 @@ void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len) | |||
185 | gfs2_log_unlock(sdp); | 185 | gfs2_log_unlock(sdp); |
186 | } | 186 | } |
187 | 187 | ||
188 | void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd) | ||
189 | { | ||
190 | lops_add(rgd->rd_sbd, &rgd->rd_le); | ||
191 | } | ||
192 | |||
diff --git a/fs/gfs2/trans.h b/fs/gfs2/trans.h index fb56b783e02..f8f101ef600 100644 --- a/fs/gfs2/trans.h +++ b/fs/gfs2/trans.h | |||
@@ -28,20 +28,20 @@ struct gfs2_glock; | |||
28 | 28 | ||
29 | /* reserve either the number of blocks to be allocated plus the rg header | 29 | /* reserve either the number of blocks to be allocated plus the rg header |
30 | * block, or all of the blocks in the rg, whichever is smaller */ | 30 | * block, or all of the blocks in the rg, whichever is smaller */ |
31 | static inline unsigned int gfs2_rg_blocks(const struct gfs2_alloc *al) | 31 | static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip) |
32 | { | 32 | { |
33 | return (al->al_requested < al->al_rgd->rd_length)? | 33 | const struct gfs2_alloc *al = ip->i_alloc; |
34 | al->al_requested + 1 : al->al_rgd->rd_length; | 34 | if (al->al_requested < ip->i_rgd->rd_length) |
35 | return al->al_requested + 1; | ||
36 | return ip->i_rgd->rd_length; | ||
35 | } | 37 | } |
36 | 38 | ||
37 | int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, | 39 | extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, |
38 | unsigned int revokes); | 40 | unsigned int revokes); |
39 | 41 | ||
40 | void gfs2_trans_end(struct gfs2_sbd *sdp); | 42 | extern void gfs2_trans_end(struct gfs2_sbd *sdp); |
41 | 43 | extern void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta); | |
42 | void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta); | 44 | extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); |
43 | void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd); | 45 | extern void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); |
44 | void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len); | ||
45 | void gfs2_trans_add_rg(struct gfs2_rgrpd *rgd); | ||
46 | 46 | ||
47 | #endif /* __TRANS_DOT_H__ */ | 47 | #endif /* __TRANS_DOT_H__ */ |
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index 439b61c0326..71d7bf830c0 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c | |||
@@ -332,15 +332,8 @@ static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, | |||
332 | if (error) | 332 | if (error) |
333 | goto out_alloc; | 333 | goto out_alloc; |
334 | 334 | ||
335 | error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh); | ||
336 | if (error) | ||
337 | goto out_quota; | ||
338 | |||
339 | error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL); | 335 | error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL); |
340 | 336 | ||
341 | gfs2_glock_dq_uninit(&al->al_ri_gh); | ||
342 | |||
343 | out_quota: | ||
344 | gfs2_quota_unhold(ip); | 337 | gfs2_quota_unhold(ip); |
345 | out_alloc: | 338 | out_alloc: |
346 | gfs2_alloc_put(ip); | 339 | gfs2_alloc_put(ip); |
@@ -734,7 +727,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, | |||
734 | goto out_gunlock_q; | 727 | goto out_gunlock_q; |
735 | 728 | ||
736 | error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), | 729 | error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), |
737 | blks + gfs2_rg_blocks(al) + | 730 | blks + gfs2_rg_blocks(ip) + |
738 | RES_DINODE + RES_STATFS + RES_QUOTA, 0); | 731 | RES_DINODE + RES_STATFS + RES_QUOTA, 0); |
739 | if (error) | 732 | if (error) |
740 | goto out_ipres; | 733 | goto out_ipres; |
@@ -1296,7 +1289,8 @@ fail: | |||
1296 | 1289 | ||
1297 | int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) | 1290 | int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) |
1298 | { | 1291 | { |
1299 | struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); | 1292 | struct inode *inode = &ip->i_inode; |
1293 | struct gfs2_sbd *sdp = GFS2_SB(inode); | ||
1300 | struct gfs2_ea_location el; | 1294 | struct gfs2_ea_location el; |
1301 | int error; | 1295 | int error; |
1302 | 1296 | ||
@@ -1319,7 +1313,7 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) | |||
1319 | if (error) | 1313 | if (error) |
1320 | return error; | 1314 | return error; |
1321 | 1315 | ||
1322 | error = gfs2_setattr_simple(ip, attr); | 1316 | error = gfs2_setattr_simple(inode, attr); |
1323 | gfs2_trans_end(sdp); | 1317 | gfs2_trans_end(sdp); |
1324 | return error; | 1318 | return error; |
1325 | } | 1319 | } |
@@ -1362,14 +1356,14 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) | |||
1362 | blen++; | 1356 | blen++; |
1363 | else { | 1357 | else { |
1364 | if (bstart) | 1358 | if (bstart) |
1365 | gfs2_rlist_add(sdp, &rlist, bstart); | 1359 | gfs2_rlist_add(ip, &rlist, bstart); |
1366 | bstart = bn; | 1360 | bstart = bn; |
1367 | blen = 1; | 1361 | blen = 1; |
1368 | } | 1362 | } |
1369 | blks++; | 1363 | blks++; |
1370 | } | 1364 | } |
1371 | if (bstart) | 1365 | if (bstart) |
1372 | gfs2_rlist_add(sdp, &rlist, bstart); | 1366 | gfs2_rlist_add(ip, &rlist, bstart); |
1373 | else | 1367 | else |
1374 | goto out; | 1368 | goto out; |
1375 | 1369 | ||
@@ -1501,24 +1495,18 @@ int gfs2_ea_dealloc(struct gfs2_inode *ip) | |||
1501 | if (error) | 1495 | if (error) |
1502 | goto out_alloc; | 1496 | goto out_alloc; |
1503 | 1497 | ||
1504 | error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh); | ||
1505 | if (error) | ||
1506 | goto out_quota; | ||
1507 | |||
1508 | error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); | 1498 | error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); |
1509 | if (error) | 1499 | if (error) |
1510 | goto out_rindex; | 1500 | goto out_quota; |
1511 | 1501 | ||
1512 | if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { | 1502 | if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { |
1513 | error = ea_dealloc_indirect(ip); | 1503 | error = ea_dealloc_indirect(ip); |
1514 | if (error) | 1504 | if (error) |
1515 | goto out_rindex; | 1505 | goto out_quota; |
1516 | } | 1506 | } |
1517 | 1507 | ||
1518 | error = ea_dealloc_block(ip); | 1508 | error = ea_dealloc_block(ip); |
1519 | 1509 | ||
1520 | out_rindex: | ||
1521 | gfs2_glock_dq_uninit(&al->al_ri_gh); | ||
1522 | out_quota: | 1510 | out_quota: |
1523 | gfs2_quota_unhold(ip); | 1511 | gfs2_quota_unhold(ip); |
1524 | out_alloc: | 1512 | out_alloc: |