aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/file.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:09:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:09:18 -0400
commit80dcc31fbe55932ac9204daee5f2ebc0c49b6da3 (patch)
tree723802ccd7e96da883352704fe3d2c5e3ba9d2e6 /fs/gfs2/file.c
parent78d5dcda92a17f17132671c269ea2c3a17688649 (diff)
parent30133177957dca9a3e2a37b720f891d3225a92a1 (diff)
Merge tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2
Pull GFS2 updates from Bob Peterson: "Here is a list of patches we've accumulated for GFS2 for the current upstream merge window. Most of the patches fix GFS2 quotas, which were not properly enforced. There's another that adds me as a GFS2 co-maintainer, and a couple patches that fix a kernel panic doing splice_write on GFS2 as well as a few correctness patches" * tag 'gfs2-merge-window' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: gfs2: fix quota refresh race in do_glock() gfs2: incorrect check for debugfs returns gfs2: allow fallocate to max out quotas/fs efficiently gfs2: allow quota_check and inplace_reserve to return available blocks gfs2: perform quota checks against allocation parameters GFS2: Move gfs2_file_splice_write outside of #ifdef GFS2: Allocate reservation during splice_write GFS2: gfs2_set_acl(): Cache "no acl" as well Add myself (Bob Peterson) as a maintainer of GFS2
Diffstat (limited to 'fs/gfs2/file.c')
-rw-r--r--fs/gfs2/file.c101
1 files changed, 71 insertions, 30 deletions
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index f6fc412b1100..8ec43ab5babf 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -428,11 +428,11 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
428 if (ret) 428 if (ret)
429 goto out_unlock; 429 goto out_unlock;
430 430
431 ret = gfs2_quota_lock_check(ip);
432 if (ret)
433 goto out_unlock;
434 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 431 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
435 ap.target = data_blocks + ind_blocks; 432 ap.target = data_blocks + ind_blocks;
433 ret = gfs2_quota_lock_check(ip, &ap);
434 if (ret)
435 goto out_unlock;
436 ret = gfs2_inplace_reserve(ip, &ap); 436 ret = gfs2_inplace_reserve(ip, &ap);
437 if (ret) 437 if (ret)
438 goto out_quota_unlock; 438 goto out_quota_unlock;
@@ -764,22 +764,30 @@ out:
764 brelse(dibh); 764 brelse(dibh);
765 return error; 765 return error;
766} 766}
767 767/**
768static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len, 768 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
769 unsigned int *data_blocks, unsigned int *ind_blocks) 769 * blocks, determine how many bytes can be written.
770 * @ip: The inode in question.
771 * @len: Max cap of bytes. What we return in *len must be <= this.
772 * @data_blocks: Compute and return the number of data blocks needed
773 * @ind_blocks: Compute and return the number of indirect blocks needed
774 * @max_blocks: The total blocks available to work with.
775 *
776 * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
777 */
778static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len,
779 unsigned int *data_blocks, unsigned int *ind_blocks,
780 unsigned int max_blocks)
770{ 781{
782 loff_t max = *len;
771 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 783 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
772 unsigned int max_blocks = ip->i_rgd->rd_free_clone;
773 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); 784 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
774 785
775 for (tmp = max_data; tmp > sdp->sd_diptrs;) { 786 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
776 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); 787 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
777 max_data -= tmp; 788 max_data -= tmp;
778 } 789 }
779 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve, 790
780 so it might end up with fewer data blocks */
781 if (max_data <= *data_blocks)
782 return;
783 *data_blocks = max_data; 791 *data_blocks = max_data;
784 *ind_blocks = max_blocks - max_data; 792 *ind_blocks = max_blocks - max_data;
785 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; 793 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
@@ -796,7 +804,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
796 struct gfs2_inode *ip = GFS2_I(inode); 804 struct gfs2_inode *ip = GFS2_I(inode);
797 struct gfs2_alloc_parms ap = { .aflags = 0, }; 805 struct gfs2_alloc_parms ap = { .aflags = 0, };
798 unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 806 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
799 loff_t bytes, max_bytes; 807 loff_t bytes, max_bytes, max_blks = UINT_MAX;
800 int error; 808 int error;
801 const loff_t pos = offset; 809 const loff_t pos = offset;
802 const loff_t count = len; 810 const loff_t count = len;
@@ -818,6 +826,9 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
818 826
819 gfs2_size_hint(file, offset, len); 827 gfs2_size_hint(file, offset, len);
820 828
829 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks);
830 ap.min_target = data_blocks + ind_blocks;
831
821 while (len > 0) { 832 while (len > 0) {
822 if (len < bytes) 833 if (len < bytes)
823 bytes = len; 834 bytes = len;
@@ -826,27 +837,41 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
826 offset += bytes; 837 offset += bytes;
827 continue; 838 continue;
828 } 839 }
829 error = gfs2_quota_lock_check(ip); 840
841 /* We need to determine how many bytes we can actually
842 * fallocate without exceeding quota or going over the
843 * end of the fs. We start off optimistically by assuming
844 * we can write max_bytes */
845 max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
846
847 /* Since max_bytes is most likely a theoretical max, we
848 * calculate a more realistic 'bytes' to serve as a good
849 * starting point for the number of bytes we may be able
850 * to write */
851 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
852 ap.target = data_blocks + ind_blocks;
853
854 error = gfs2_quota_lock_check(ip, &ap);
830 if (error) 855 if (error)
831 return error; 856 return error;
832retry: 857 /* ap.allowed tells us how many blocks quota will allow
833 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); 858 * us to write. Check if this reduces max_blks */
859 if (ap.allowed && ap.allowed < max_blks)
860 max_blks = ap.allowed;
834 861
835 ap.target = data_blocks + ind_blocks;
836 error = gfs2_inplace_reserve(ip, &ap); 862 error = gfs2_inplace_reserve(ip, &ap);
837 if (error) { 863 if (error)
838 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
839 bytes >>= 1;
840 bytes &= bsize_mask;
841 if (bytes == 0)
842 bytes = sdp->sd_sb.sb_bsize;
843 goto retry;
844 }
845 goto out_qunlock; 864 goto out_qunlock;
846 } 865
847 max_bytes = bytes; 866 /* check if the selected rgrp limits our max_blks further */
848 calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len, 867 if (ap.allowed && ap.allowed < max_blks)
849 &max_bytes, &data_blocks, &ind_blocks); 868 max_blks = ap.allowed;
869
870 /* Almost done. Calculate bytes that can be written using
871 * max_blks. We also recompute max_bytes, data_blocks and
872 * ind_blocks */
873 calc_max_reserv(ip, &max_bytes, &data_blocks,
874 &ind_blocks, max_blks);
850 875
851 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + 876 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
852 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); 877 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
@@ -930,6 +955,22 @@ out_uninit:
930 return ret; 955 return ret;
931} 956}
932 957
958static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe,
959 struct file *out, loff_t *ppos,
960 size_t len, unsigned int flags)
961{
962 int error;
963 struct gfs2_inode *ip = GFS2_I(out->f_mapping->host);
964
965 error = gfs2_rs_alloc(ip);
966 if (error)
967 return (ssize_t)error;
968
969 gfs2_size_hint(out, *ppos, len);
970
971 return iter_file_splice_write(pipe, out, ppos, len, flags);
972}
973
933#ifdef CONFIG_GFS2_FS_LOCKING_DLM 974#ifdef CONFIG_GFS2_FS_LOCKING_DLM
934 975
935/** 976/**
@@ -1076,7 +1117,7 @@ const struct file_operations gfs2_file_fops = {
1076 .lock = gfs2_lock, 1117 .lock = gfs2_lock,
1077 .flock = gfs2_flock, 1118 .flock = gfs2_flock,
1078 .splice_read = generic_file_splice_read, 1119 .splice_read = generic_file_splice_read,
1079 .splice_write = iter_file_splice_write, 1120 .splice_write = gfs2_file_splice_write,
1080 .setlease = simple_nosetlease, 1121 .setlease = simple_nosetlease,
1081 .fallocate = gfs2_fallocate, 1122 .fallocate = gfs2_fallocate,
1082}; 1123};
@@ -1106,7 +1147,7 @@ const struct file_operations gfs2_file_fops_nolock = {
1106 .release = gfs2_release, 1147 .release = gfs2_release,
1107 .fsync = gfs2_fsync, 1148 .fsync = gfs2_fsync,
1108 .splice_read = generic_file_splice_read, 1149 .splice_read = generic_file_splice_read,
1109 .splice_write = iter_file_splice_write, 1150 .splice_write = gfs2_file_splice_write,
1110 .setlease = generic_setlease, 1151 .setlease = generic_setlease,
1111 .fallocate = gfs2_fallocate, 1152 .fallocate = gfs2_fallocate,
1112}; 1153};