diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2009-09-15 15:42:56 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2009-12-03 06:51:05 -0500 |
commit | 1e72c0f7c40e665d2ed40014750fdd2fa9968bcf (patch) | |
tree | 89d8ef6d7b822ac17699a3c51a1c59c654aaf676 /fs | |
parent | 6a6ada81e4ffc222bf7e54ea7503c7cc98b4f0d8 (diff) |
GFS2: Clean up gfs2_adjust_quota() and do_glock()
Both of these functions contained confusing and in one case
duplicate code. This patch adds a new check in do_glock()
so that we report -ENOENT if we are asked to sync a quota
entry which doesn't exist. Due to the previous patch this is
now reported correctly to userspace.
Also there are a few new comments, and I hope that the code
is easier to understand now.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/gfs2/quota.c | 82 |
1 files changed, 26 insertions, 56 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index db124af8998e..33e369f108b3 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * fuzziness in the current usage value of IDs that are being used on different | 15 | * fuzziness in the current usage value of IDs that are being used on different |
16 | * nodes in the cluster simultaneously. So, it is possible for a user on | 16 | * nodes in the cluster simultaneously. So, it is possible for a user on |
17 | * multiple nodes to overrun their quota, but that overrun is controlable. | 17 | * multiple nodes to overrun their quota, but that overrun is controlable. |
18 | * Since quota tags are part of transactions, there is no need to a quota check | 18 | * Since quota tags are part of transactions, there is no need for a quota check |
19 | * program to be run on node crashes or anything like that. | 19 | * program to be run on node crashes or anything like that. |
20 | * | 20 | * |
21 | * There are couple of knobs that let the administrator manage the quota | 21 | * There are couple of knobs that let the administrator manage the quota |
@@ -66,13 +66,6 @@ | |||
66 | #define QUOTA_USER 1 | 66 | #define QUOTA_USER 1 |
67 | #define QUOTA_GROUP 0 | 67 | #define QUOTA_GROUP 0 |
68 | 68 | ||
69 | struct gfs2_quota_host { | ||
70 | u64 qu_limit; | ||
71 | u64 qu_warn; | ||
72 | s64 qu_value; | ||
73 | u32 qu_ll_next; | ||
74 | }; | ||
75 | |||
76 | struct gfs2_quota_change_host { | 69 | struct gfs2_quota_change_host { |
77 | u64 qc_change; | 70 | u64 qc_change; |
78 | u32 qc_flags; /* GFS2_QCF_... */ | 71 | u32 qc_flags; /* GFS2_QCF_... */ |
@@ -618,33 +611,19 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
618 | mutex_unlock(&sdp->sd_quota_mutex); | 611 | mutex_unlock(&sdp->sd_quota_mutex); |
619 | } | 612 | } |
620 | 613 | ||
621 | static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf) | ||
622 | { | ||
623 | const struct gfs2_quota *str = buf; | ||
624 | |||
625 | qu->qu_limit = be64_to_cpu(str->qu_limit); | ||
626 | qu->qu_warn = be64_to_cpu(str->qu_warn); | ||
627 | qu->qu_value = be64_to_cpu(str->qu_value); | ||
628 | qu->qu_ll_next = be32_to_cpu(str->qu_ll_next); | ||
629 | } | ||
630 | |||
631 | static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf) | ||
632 | { | ||
633 | struct gfs2_quota *str = buf; | ||
634 | |||
635 | str->qu_limit = cpu_to_be64(qu->qu_limit); | ||
636 | str->qu_warn = cpu_to_be64(qu->qu_warn); | ||
637 | str->qu_value = cpu_to_be64(qu->qu_value); | ||
638 | str->qu_ll_next = cpu_to_be32(qu->qu_ll_next); | ||
639 | memset(&str->qu_reserved, 0, sizeof(str->qu_reserved)); | ||
640 | } | ||
641 | |||
642 | /** | 614 | /** |
643 | * gfs2_adjust_quota | 615 | * gfs2_adjust_quota - adjust record of current block usage |
616 | * @ip: The quota inode | ||
617 | * @loc: Offset of the entry in the quota file | ||
618 | * @change: The amount of change to record | ||
619 | * @qd: The quota data | ||
644 | * | 620 | * |
645 | * This function was mostly borrowed from gfs2_block_truncate_page which was | 621 | * This function was mostly borrowed from gfs2_block_truncate_page which was |
646 | * in turn mostly borrowed from ext3 | 622 | * in turn mostly borrowed from ext3 |
623 | * | ||
624 | * Returns: 0 or -ve on error | ||
647 | */ | 625 | */ |
626 | |||
648 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | 627 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
649 | s64 change, struct gfs2_quota_data *qd) | 628 | s64 change, struct gfs2_quota_data *qd) |
650 | { | 629 | { |
@@ -656,8 +635,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
656 | struct buffer_head *bh; | 635 | struct buffer_head *bh; |
657 | struct page *page; | 636 | struct page *page; |
658 | void *kaddr; | 637 | void *kaddr; |
659 | char *ptr; | 638 | struct gfs2_quota *qp; |
660 | struct gfs2_quota_host qp; | ||
661 | s64 value; | 639 | s64 value; |
662 | int err = -EIO; | 640 | int err = -EIO; |
663 | 641 | ||
@@ -701,18 +679,13 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
701 | gfs2_trans_add_bh(ip->i_gl, bh, 0); | 679 | gfs2_trans_add_bh(ip->i_gl, bh, 0); |
702 | 680 | ||
703 | kaddr = kmap_atomic(page, KM_USER0); | 681 | kaddr = kmap_atomic(page, KM_USER0); |
704 | ptr = kaddr + offset; | 682 | qp = kaddr + offset; |
705 | gfs2_quota_in(&qp, ptr); | 683 | value = (s64)be64_to_cpu(qp->qu_value) + change; |
706 | qp.qu_value += change; | 684 | qp->qu_value = cpu_to_be64(value); |
707 | value = qp.qu_value; | 685 | qd->qd_qb.qb_value = qp->qu_value; |
708 | gfs2_quota_out(&qp, ptr); | ||
709 | flush_dcache_page(page); | 686 | flush_dcache_page(page); |
710 | kunmap_atomic(kaddr, KM_USER0); | 687 | kunmap_atomic(kaddr, KM_USER0); |
711 | err = 0; | 688 | err = 0; |
712 | qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
713 | qd->qd_qb.qb_value = cpu_to_be64(value); | ||
714 | ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
715 | ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); | ||
716 | unlock: | 689 | unlock: |
717 | unlock_page(page); | 690 | unlock_page(page); |
718 | page_cache_release(page); | 691 | page_cache_release(page); |
@@ -741,8 +714,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
741 | 714 | ||
742 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); | 715 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); |
743 | for (qx = 0; qx < num_qd; qx++) { | 716 | for (qx = 0; qx < num_qd; qx++) { |
744 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, | 717 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, |
745 | LM_ST_EXCLUSIVE, | ||
746 | GL_NOCACHE, &ghs[qx]); | 718 | GL_NOCACHE, &ghs[qx]); |
747 | if (error) | 719 | if (error) |
748 | goto out; | 720 | goto out; |
@@ -797,8 +769,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
797 | qd = qda[x]; | 769 | qd = qda[x]; |
798 | offset = qd2offset(qd); | 770 | offset = qd2offset(qd); |
799 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, | 771 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, |
800 | (struct gfs2_quota_data *) | 772 | (struct gfs2_quota_data *)qd); |
801 | qd); | ||
802 | if (error) | 773 | if (error) |
803 | goto out_end_trans; | 774 | goto out_end_trans; |
804 | 775 | ||
@@ -829,8 +800,7 @@ static int do_glock(struct gfs2_quota_data *qd, int force_refresh, | |||
829 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 800 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
830 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | 801 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
831 | struct gfs2_holder i_gh; | 802 | struct gfs2_holder i_gh; |
832 | struct gfs2_quota_host q; | 803 | struct gfs2_quota q; |
833 | char buf[sizeof(struct gfs2_quota)]; | ||
834 | int error; | 804 | int error; |
835 | struct gfs2_quota_lvb *qlvb; | 805 | struct gfs2_quota_lvb *qlvb; |
836 | 806 | ||
@@ -853,22 +823,23 @@ restart: | |||
853 | if (error) | 823 | if (error) |
854 | goto fail; | 824 | goto fail; |
855 | 825 | ||
856 | memset(buf, 0, sizeof(struct gfs2_quota)); | 826 | memset(&q, 0, sizeof(struct gfs2_quota)); |
857 | pos = qd2offset(qd); | 827 | pos = qd2offset(qd); |
858 | error = gfs2_internal_read(ip, NULL, buf, &pos, | 828 | error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); |
859 | sizeof(struct gfs2_quota)); | ||
860 | if (error < 0) | 829 | if (error < 0) |
861 | goto fail_gunlock; | 830 | goto fail_gunlock; |
862 | 831 | if ((error < sizeof(q)) && force_refresh) { | |
832 | error = -ENOENT; | ||
833 | goto fail_gunlock; | ||
834 | } | ||
863 | gfs2_glock_dq_uninit(&i_gh); | 835 | gfs2_glock_dq_uninit(&i_gh); |
864 | 836 | ||
865 | gfs2_quota_in(&q, buf); | ||
866 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 837 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
867 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | 838 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); |
868 | qlvb->__pad = 0; | 839 | qlvb->__pad = 0; |
869 | qlvb->qb_limit = cpu_to_be64(q.qu_limit); | 840 | qlvb->qb_limit = q.qu_limit; |
870 | qlvb->qb_warn = cpu_to_be64(q.qu_warn); | 841 | qlvb->qb_warn = q.qu_warn; |
871 | qlvb->qb_value = cpu_to_be64(q.qu_value); | 842 | qlvb->qb_value = q.qu_value; |
872 | qd->qd_qb = *qlvb; | 843 | qd->qd_qb = *qlvb; |
873 | 844 | ||
874 | gfs2_glock_dq_uninit(q_gh); | 845 | gfs2_glock_dq_uninit(q_gh); |
@@ -1126,7 +1097,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) | |||
1126 | gfs2_glock_dq_uninit(&q_gh); | 1097 | gfs2_glock_dq_uninit(&q_gh); |
1127 | 1098 | ||
1128 | qd_put(qd); | 1099 | qd_put(qd); |
1129 | |||
1130 | return error; | 1100 | return error; |
1131 | } | 1101 | } |
1132 | 1102 | ||