aboutsummaryrefslogtreecommitdiffstats
path: root/fs/gfs2/quota.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/gfs2/quota.c')
-rw-r--r--fs/gfs2/quota.c70
1 files changed, 45 insertions, 25 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 1bc6b5695e6d..42e8d23bc047 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -38,6 +38,7 @@
38 38
39#include <linux/sched.h> 39#include <linux/sched.h>
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/mm.h>
41#include <linux/spinlock.h> 42#include <linux/spinlock.h>
42#include <linux/completion.h> 43#include <linux/completion.h>
43#include <linux/buffer_head.h> 44#include <linux/buffer_head.h>
@@ -77,19 +78,20 @@ static LIST_HEAD(qd_lru_list);
77static atomic_t qd_lru_count = ATOMIC_INIT(0); 78static atomic_t qd_lru_count = ATOMIC_INIT(0);
78static DEFINE_SPINLOCK(qd_lru_lock); 79static DEFINE_SPINLOCK(qd_lru_lock);
79 80
80int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) 81int gfs2_shrink_qd_memory(struct shrinker *shrink, struct shrink_control *sc)
81{ 82{
82 struct gfs2_quota_data *qd; 83 struct gfs2_quota_data *qd;
83 struct gfs2_sbd *sdp; 84 struct gfs2_sbd *sdp;
85 int nr_to_scan = sc->nr_to_scan;
84 86
85 if (nr == 0) 87 if (nr_to_scan == 0)
86 goto out; 88 goto out;
87 89
88 if (!(gfp_mask & __GFP_FS)) 90 if (!(sc->gfp_mask & __GFP_FS))
89 return -1; 91 return -1;
90 92
91 spin_lock(&qd_lru_lock); 93 spin_lock(&qd_lru_lock);
92 while (nr && !list_empty(&qd_lru_list)) { 94 while (nr_to_scan && !list_empty(&qd_lru_list)) {
93 qd = list_entry(qd_lru_list.next, 95 qd = list_entry(qd_lru_list.next,
94 struct gfs2_quota_data, qd_reclaim); 96 struct gfs2_quota_data, qd_reclaim);
95 sdp = qd->qd_gl->gl_sbd; 97 sdp = qd->qd_gl->gl_sbd;
@@ -110,7 +112,7 @@ int gfs2_shrink_qd_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
110 spin_unlock(&qd_lru_lock); 112 spin_unlock(&qd_lru_lock);
111 kmem_cache_free(gfs2_quotad_cachep, qd); 113 kmem_cache_free(gfs2_quotad_cachep, qd);
112 spin_lock(&qd_lru_lock); 114 spin_lock(&qd_lru_lock);
113 nr--; 115 nr_to_scan--;
114 } 116 }
115 spin_unlock(&qd_lru_lock); 117 spin_unlock(&qd_lru_lock);
116 118
@@ -631,6 +633,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
631 struct fs_disk_quota *fdq) 633 struct fs_disk_quota *fdq)
632{ 634{
633 struct inode *inode = &ip->i_inode; 635 struct inode *inode = &ip->i_inode;
636 struct gfs2_sbd *sdp = GFS2_SB(inode);
634 struct address_space *mapping = inode->i_mapping; 637 struct address_space *mapping = inode->i_mapping;
635 unsigned long index = loc >> PAGE_CACHE_SHIFT; 638 unsigned long index = loc >> PAGE_CACHE_SHIFT;
636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 639 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
@@ -658,13 +661,17 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
658 qd->qd_qb.qb_value = qp->qu_value; 661 qd->qd_qb.qb_value = qp->qu_value;
659 if (fdq) { 662 if (fdq) {
660 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 663 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
661 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); 664 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
662 qd->qd_qb.qb_warn = qp->qu_warn; 665 qd->qd_qb.qb_warn = qp->qu_warn;
663 } 666 }
664 if (fdq->d_fieldmask & FS_DQ_BHARD) { 667 if (fdq->d_fieldmask & FS_DQ_BHARD) {
665 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); 668 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
666 qd->qd_qb.qb_limit = qp->qu_limit; 669 qd->qd_qb.qb_limit = qp->qu_limit;
667 } 670 }
671 if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
672 qp->qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
673 qd->qd_qb.qb_value = qp->qu_value;
674 }
668 } 675 }
669 676
670 /* Write the quota into the quota file on disk */ 677 /* Write the quota into the quota file on disk */
@@ -735,10 +742,8 @@ get_a_page:
735 goto out; 742 goto out;
736 743
737 size = loc + sizeof(struct gfs2_quota); 744 size = loc + sizeof(struct gfs2_quota);
738 if (size > inode->i_size) { 745 if (size > inode->i_size)
739 ip->i_disksize = size;
740 i_size_write(inode, size); 746 i_size_write(inode, size);
741 }
742 inode->i_mtime = inode->i_atime = CURRENT_TIME; 747 inode->i_mtime = inode->i_atime = CURRENT_TIME;
743 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 748 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
744 gfs2_dinode_out(ip, dibh->b_data); 749 gfs2_dinode_out(ip, dibh->b_data);
@@ -817,7 +822,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
817 goto out_alloc; 822 goto out_alloc;
818 823
819 if (nalloc) 824 if (nalloc)
820 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS; 825 blocks += gfs2_rg_blocks(al) + nalloc * ind_blocks + RES_STATFS;
821 826
822 error = gfs2_trans_begin(sdp, blocks, 0); 827 error = gfs2_trans_begin(sdp, blocks, 0);
823 if (error) 828 if (error)
@@ -831,6 +836,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
831 goto out_end_trans; 836 goto out_end_trans;
832 837
833 do_qc(qd, -qd->qd_change_sync); 838 do_qc(qd, -qd->qd_change_sync);
839 set_bit(QDF_REFRESH, &qd->qd_flags);
834 } 840 }
835 841
836 error = 0; 842 error = 0;
@@ -926,6 +932,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
926{ 932{
927 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 933 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
928 struct gfs2_alloc *al = ip->i_alloc; 934 struct gfs2_alloc *al = ip->i_alloc;
935 struct gfs2_quota_data *qd;
929 unsigned int x; 936 unsigned int x;
930 int error = 0; 937 int error = 0;
931 938
@@ -939,7 +946,11 @@ int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
939 sort_qd, NULL); 946 sort_qd, NULL);
940 947
941 for (x = 0; x < al->al_qd_num; x++) { 948 for (x = 0; x < al->al_qd_num; x++) {
942 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]); 949 int force = NO_FORCE;
950 qd = al->al_qd[x];
951 if (test_and_clear_bit(QDF_REFRESH, &qd->qd_flags))
952 force = FORCE;
953 error = do_glock(qd, force, &al->al_qd_ghs[x]);
943 if (error) 954 if (error)
944 break; 955 break;
945 } 956 }
@@ -1190,18 +1201,17 @@ static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *
1190int gfs2_quota_init(struct gfs2_sbd *sdp) 1201int gfs2_quota_init(struct gfs2_sbd *sdp)
1191{ 1202{
1192 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1203 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
1193 unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; 1204 u64 size = i_size_read(sdp->sd_qc_inode);
1205 unsigned int blocks = size >> sdp->sd_sb.sb_bsize_shift;
1194 unsigned int x, slot = 0; 1206 unsigned int x, slot = 0;
1195 unsigned int found = 0; 1207 unsigned int found = 0;
1196 u64 dblock; 1208 u64 dblock;
1197 u32 extlen = 0; 1209 u32 extlen = 0;
1198 int error; 1210 int error;
1199 1211
1200 if (!ip->i_disksize || ip->i_disksize > (64 << 20) || 1212 if (gfs2_check_internal_file_size(sdp->sd_qc_inode, 1, 64 << 20))
1201 ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
1202 gfs2_consist_inode(ip);
1203 return -EIO; 1213 return -EIO;
1204 } 1214
1205 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block; 1215 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
1206 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE); 1216 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
1207 1217
@@ -1500,9 +1510,9 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1500 fdq->d_version = FS_DQUOT_VERSION; 1510 fdq->d_version = FS_DQUOT_VERSION;
1501 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1511 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1502 fdq->d_id = id; 1512 fdq->d_id = id;
1503 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); 1513 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1504 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); 1514 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1505 fdq->d_bcount = be64_to_cpu(qlvb->qb_value); 1515 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1506 1516
1507 gfs2_glock_dq_uninit(&q_gh); 1517 gfs2_glock_dq_uninit(&q_gh);
1508out: 1518out:
@@ -1511,7 +1521,7 @@ out:
1511} 1521}
1512 1522
1513/* GFS2 only supports a subset of the XFS fields */ 1523/* GFS2 only supports a subset of the XFS fields */
1514#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD) 1524#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1515 1525
1516static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, 1526static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1517 struct fs_disk_quota *fdq) 1527 struct fs_disk_quota *fdq)
@@ -1569,16 +1579,24 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1569 1579
1570 /* If nothing has changed, this is a no-op */ 1580 /* If nothing has changed, this is a no-op */
1571 if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1581 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1572 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) 1582 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1573 fdq->d_fieldmask ^= FS_DQ_BSOFT; 1583 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1584
1574 if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1585 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1575 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) 1586 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1576 fdq->d_fieldmask ^= FS_DQ_BHARD; 1587 fdq->d_fieldmask ^= FS_DQ_BHARD;
1588
1589 if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
1590 ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
1591 fdq->d_fieldmask ^= FS_DQ_BCOUNT;
1592
1577 if (fdq->d_fieldmask == 0) 1593 if (fdq->d_fieldmask == 0)
1578 goto out_i; 1594 goto out_i;
1579 1595
1580 offset = qd2offset(qd); 1596 offset = qd2offset(qd);
1581 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota)); 1597 alloc_required = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota));
1598 if (gfs2_is_stuffed(ip))
1599 alloc_required = 1;
1582 if (alloc_required) { 1600 if (alloc_required) {
1583 al = gfs2_alloc_get(ip); 1601 al = gfs2_alloc_get(ip);
1584 if (al == NULL) 1602 if (al == NULL)
@@ -1589,9 +1607,12 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1589 error = gfs2_inplace_reserve(ip); 1607 error = gfs2_inplace_reserve(ip);
1590 if (error) 1608 if (error)
1591 goto out_alloc; 1609 goto out_alloc;
1610 blocks += gfs2_rg_blocks(al);
1592 } 1611 }
1593 1612
1594 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); 1613 /* Some quotas span block boundaries and can update two blocks,
1614 adding an extra block to the transaction to handle such quotas */
1615 error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 2, 0);
1595 if (error) 1616 if (error)
1596 goto out_release; 1617 goto out_release;
1597 1618
@@ -1621,4 +1642,3 @@ const struct quotactl_ops gfs2_quotactl_ops = {
1621 .get_dqblk = gfs2_get_dqblk, 1642 .get_dqblk = gfs2_get_dqblk,
1622 .set_dqblk = gfs2_set_dqblk, 1643 .set_dqblk = gfs2_set_dqblk,
1623}; 1644};
1624