diff options
Diffstat (limited to 'fs/gfs2/quota.c')
-rw-r--r-- | fs/gfs2/quota.c | 393 |
1 files changed, 300 insertions, 93 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 2e9b9326bfc9..e3bf6eab8750 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -15,7 +15,7 @@ | |||
15 | * fuzziness in the current usage value of IDs that are being used on different | 15 | * fuzziness in the current usage value of IDs that are being used on different |
16 | * nodes in the cluster simultaneously. So, it is possible for a user on | 16 | * nodes in the cluster simultaneously. So, it is possible for a user on |
17 | * multiple nodes to overrun their quota, but that overrun is controlable. | 17 | * multiple nodes to overrun their quota, but that overrun is controlable. |
18 | * Since quota tags are part of transactions, there is no need to a quota check | 18 | * Since quota tags are part of transactions, there is no need for a quota check |
19 | * program to be run on node crashes or anything like that. | 19 | * program to be run on node crashes or anything like that. |
20 | * | 20 | * |
21 | * There are couple of knobs that let the administrator manage the quota | 21 | * There are couple of knobs that let the administrator manage the quota |
@@ -47,6 +47,8 @@ | |||
47 | #include <linux/gfs2_ondisk.h> | 47 | #include <linux/gfs2_ondisk.h> |
48 | #include <linux/kthread.h> | 48 | #include <linux/kthread.h> |
49 | #include <linux/freezer.h> | 49 | #include <linux/freezer.h> |
50 | #include <linux/quota.h> | ||
51 | #include <linux/dqblk_xfs.h> | ||
50 | 52 | ||
51 | #include "gfs2.h" | 53 | #include "gfs2.h" |
52 | #include "incore.h" | 54 | #include "incore.h" |
@@ -65,13 +67,6 @@ | |||
65 | #define QUOTA_USER 1 | 67 | #define QUOTA_USER 1 |
66 | #define QUOTA_GROUP 0 | 68 | #define QUOTA_GROUP 0 |
67 | 69 | ||
68 | struct gfs2_quota_host { | ||
69 | u64 qu_limit; | ||
70 | u64 qu_warn; | ||
71 | s64 qu_value; | ||
72 | u32 qu_ll_next; | ||
73 | }; | ||
74 | |||
75 | struct gfs2_quota_change_host { | 70 | struct gfs2_quota_change_host { |
76 | u64 qc_change; | 71 | u64 qc_change; |
77 | u32 qc_flags; /* GFS2_QCF_... */ | 72 | u32 qc_flags; /* GFS2_QCF_... */ |
@@ -164,7 +159,7 @@ fail: | |||
164 | return error; | 159 | return error; |
165 | } | 160 | } |
166 | 161 | ||
167 | static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | 162 | static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, |
168 | struct gfs2_quota_data **qdp) | 163 | struct gfs2_quota_data **qdp) |
169 | { | 164 | { |
170 | struct gfs2_quota_data *qd = NULL, *new_qd = NULL; | 165 | struct gfs2_quota_data *qd = NULL, *new_qd = NULL; |
@@ -202,7 +197,7 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | |||
202 | 197 | ||
203 | spin_unlock(&qd_lru_lock); | 198 | spin_unlock(&qd_lru_lock); |
204 | 199 | ||
205 | if (qd || !create) { | 200 | if (qd) { |
206 | if (new_qd) { | 201 | if (new_qd) { |
207 | gfs2_glock_put(new_qd->qd_gl); | 202 | gfs2_glock_put(new_qd->qd_gl); |
208 | kmem_cache_free(gfs2_quotad_cachep, new_qd); | 203 | kmem_cache_free(gfs2_quotad_cachep, new_qd); |
@@ -461,12 +456,12 @@ static void qd_unlock(struct gfs2_quota_data *qd) | |||
461 | qd_put(qd); | 456 | qd_put(qd); |
462 | } | 457 | } |
463 | 458 | ||
464 | static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create, | 459 | static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, |
465 | struct gfs2_quota_data **qdp) | 460 | struct gfs2_quota_data **qdp) |
466 | { | 461 | { |
467 | int error; | 462 | int error; |
468 | 463 | ||
469 | error = qd_get(sdp, user, id, create, qdp); | 464 | error = qd_get(sdp, user, id, qdp); |
470 | if (error) | 465 | if (error) |
471 | return error; | 466 | return error; |
472 | 467 | ||
@@ -508,20 +503,20 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
508 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | 503 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) |
509 | return 0; | 504 | return 0; |
510 | 505 | ||
511 | error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd); | 506 | error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd); |
512 | if (error) | 507 | if (error) |
513 | goto out; | 508 | goto out; |
514 | al->al_qd_num++; | 509 | al->al_qd_num++; |
515 | qd++; | 510 | qd++; |
516 | 511 | ||
517 | error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd); | 512 | error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd); |
518 | if (error) | 513 | if (error) |
519 | goto out; | 514 | goto out; |
520 | al->al_qd_num++; | 515 | al->al_qd_num++; |
521 | qd++; | 516 | qd++; |
522 | 517 | ||
523 | if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { | 518 | if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) { |
524 | error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd); | 519 | error = qdsb_get(sdp, QUOTA_USER, uid, qd); |
525 | if (error) | 520 | if (error) |
526 | goto out; | 521 | goto out; |
527 | al->al_qd_num++; | 522 | al->al_qd_num++; |
@@ -529,7 +524,7 @@ int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
529 | } | 524 | } |
530 | 525 | ||
531 | if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { | 526 | if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) { |
532 | error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd); | 527 | error = qdsb_get(sdp, QUOTA_GROUP, gid, qd); |
533 | if (error) | 528 | if (error) |
534 | goto out; | 529 | goto out; |
535 | al->al_qd_num++; | 530 | al->al_qd_num++; |
@@ -617,48 +612,36 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
617 | mutex_unlock(&sdp->sd_quota_mutex); | 612 | mutex_unlock(&sdp->sd_quota_mutex); |
618 | } | 613 | } |
619 | 614 | ||
620 | static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf) | ||
621 | { | ||
622 | const struct gfs2_quota *str = buf; | ||
623 | |||
624 | qu->qu_limit = be64_to_cpu(str->qu_limit); | ||
625 | qu->qu_warn = be64_to_cpu(str->qu_warn); | ||
626 | qu->qu_value = be64_to_cpu(str->qu_value); | ||
627 | qu->qu_ll_next = be32_to_cpu(str->qu_ll_next); | ||
628 | } | ||
629 | |||
630 | static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf) | ||
631 | { | ||
632 | struct gfs2_quota *str = buf; | ||
633 | |||
634 | str->qu_limit = cpu_to_be64(qu->qu_limit); | ||
635 | str->qu_warn = cpu_to_be64(qu->qu_warn); | ||
636 | str->qu_value = cpu_to_be64(qu->qu_value); | ||
637 | str->qu_ll_next = cpu_to_be32(qu->qu_ll_next); | ||
638 | memset(&str->qu_reserved, 0, sizeof(str->qu_reserved)); | ||
639 | } | ||
640 | |||
641 | /** | 615 | /** |
642 | * gfs2_adjust_quota | 616 | * gfs2_adjust_quota - adjust record of current block usage |
617 | * @ip: The quota inode | ||
618 | * @loc: Offset of the entry in the quota file | ||
619 | * @change: The amount of usage change to record | ||
620 | * @qd: The quota data | ||
621 | * @fdq: The updated limits to record | ||
643 | * | 622 | * |
644 | * This function was mostly borrowed from gfs2_block_truncate_page which was | 623 | * This function was mostly borrowed from gfs2_block_truncate_page which was |
645 | * in turn mostly borrowed from ext3 | 624 | * in turn mostly borrowed from ext3 |
625 | * | ||
626 | * Returns: 0 or -ve on error | ||
646 | */ | 627 | */ |
628 | |||
647 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | 629 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
648 | s64 change, struct gfs2_quota_data *qd) | 630 | s64 change, struct gfs2_quota_data *qd, |
631 | struct fs_disk_quota *fdq) | ||
649 | { | 632 | { |
650 | struct inode *inode = &ip->i_inode; | 633 | struct inode *inode = &ip->i_inode; |
651 | struct address_space *mapping = inode->i_mapping; | 634 | struct address_space *mapping = inode->i_mapping; |
652 | unsigned long index = loc >> PAGE_CACHE_SHIFT; | 635 | unsigned long index = loc >> PAGE_CACHE_SHIFT; |
653 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); | 636 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); |
654 | unsigned blocksize, iblock, pos; | 637 | unsigned blocksize, iblock, pos; |
655 | struct buffer_head *bh; | 638 | struct buffer_head *bh, *dibh; |
656 | struct page *page; | 639 | struct page *page; |
657 | void *kaddr; | 640 | void *kaddr; |
658 | char *ptr; | 641 | struct gfs2_quota *qp; |
659 | struct gfs2_quota_host qp; | ||
660 | s64 value; | 642 | s64 value; |
661 | int err = -EIO; | 643 | int err = -EIO; |
644 | u64 size; | ||
662 | 645 | ||
663 | if (gfs2_is_stuffed(ip)) | 646 | if (gfs2_is_stuffed(ip)) |
664 | gfs2_unstuff_dinode(ip, NULL); | 647 | gfs2_unstuff_dinode(ip, NULL); |
@@ -700,18 +683,38 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
700 | gfs2_trans_add_bh(ip->i_gl, bh, 0); | 683 | gfs2_trans_add_bh(ip->i_gl, bh, 0); |
701 | 684 | ||
702 | kaddr = kmap_atomic(page, KM_USER0); | 685 | kaddr = kmap_atomic(page, KM_USER0); |
703 | ptr = kaddr + offset; | 686 | qp = kaddr + offset; |
704 | gfs2_quota_in(&qp, ptr); | 687 | value = (s64)be64_to_cpu(qp->qu_value) + change; |
705 | qp.qu_value += change; | 688 | qp->qu_value = cpu_to_be64(value); |
706 | value = qp.qu_value; | 689 | qd->qd_qb.qb_value = qp->qu_value; |
707 | gfs2_quota_out(&qp, ptr); | 690 | if (fdq) { |
691 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | ||
692 | qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | ||
693 | qd->qd_qb.qb_warn = qp->qu_warn; | ||
694 | } | ||
695 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | ||
696 | qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | ||
697 | qd->qd_qb.qb_limit = qp->qu_limit; | ||
698 | } | ||
699 | } | ||
708 | flush_dcache_page(page); | 700 | flush_dcache_page(page); |
709 | kunmap_atomic(kaddr, KM_USER0); | 701 | kunmap_atomic(kaddr, KM_USER0); |
710 | err = 0; | 702 | |
711 | qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC); | 703 | err = gfs2_meta_inode_buffer(ip, &dibh); |
712 | qd->qd_qb.qb_value = cpu_to_be64(value); | 704 | if (err) |
713 | ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC); | 705 | goto unlock; |
714 | ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value); | 706 | |
707 | size = loc + sizeof(struct gfs2_quota); | ||
708 | if (size > inode->i_size) { | ||
709 | ip->i_disksize = size; | ||
710 | i_size_write(inode, size); | ||
711 | } | ||
712 | inode->i_mtime = inode->i_atime = CURRENT_TIME; | ||
713 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
714 | gfs2_dinode_out(ip, dibh->b_data); | ||
715 | brelse(dibh); | ||
716 | mark_inode_dirty(inode); | ||
717 | |||
715 | unlock: | 718 | unlock: |
716 | unlock_page(page); | 719 | unlock_page(page); |
717 | page_cache_release(page); | 720 | page_cache_release(page); |
@@ -739,9 +742,9 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
739 | return -ENOMEM; | 742 | return -ENOMEM; |
740 | 743 | ||
741 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); | 744 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); |
745 | mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA); | ||
742 | for (qx = 0; qx < num_qd; qx++) { | 746 | for (qx = 0; qx < num_qd; qx++) { |
743 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, | 747 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, |
744 | LM_ST_EXCLUSIVE, | ||
745 | GL_NOCACHE, &ghs[qx]); | 748 | GL_NOCACHE, &ghs[qx]); |
746 | if (error) | 749 | if (error) |
747 | goto out; | 750 | goto out; |
@@ -795,9 +798,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
795 | for (x = 0; x < num_qd; x++) { | 798 | for (x = 0; x < num_qd; x++) { |
796 | qd = qda[x]; | 799 | qd = qda[x]; |
797 | offset = qd2offset(qd); | 800 | offset = qd2offset(qd); |
798 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, | 801 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); |
799 | (struct gfs2_quota_data *) | ||
800 | qd); | ||
801 | if (error) | 802 | if (error) |
802 | goto out_end_trans; | 803 | goto out_end_trans; |
803 | 804 | ||
@@ -817,21 +818,44 @@ out_gunlock: | |||
817 | out: | 818 | out: |
818 | while (qx--) | 819 | while (qx--) |
819 | gfs2_glock_dq_uninit(&ghs[qx]); | 820 | gfs2_glock_dq_uninit(&ghs[qx]); |
821 | mutex_unlock(&ip->i_inode.i_mutex); | ||
820 | kfree(ghs); | 822 | kfree(ghs); |
821 | gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); | 823 | gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); |
822 | return error; | 824 | return error; |
823 | } | 825 | } |
824 | 826 | ||
827 | static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) | ||
828 | { | ||
829 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | ||
830 | struct gfs2_quota q; | ||
831 | struct gfs2_quota_lvb *qlvb; | ||
832 | loff_t pos; | ||
833 | int error; | ||
834 | |||
835 | memset(&q, 0, sizeof(struct gfs2_quota)); | ||
836 | pos = qd2offset(qd); | ||
837 | error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); | ||
838 | if (error < 0) | ||
839 | return error; | ||
840 | |||
841 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | ||
842 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
843 | qlvb->__pad = 0; | ||
844 | qlvb->qb_limit = q.qu_limit; | ||
845 | qlvb->qb_warn = q.qu_warn; | ||
846 | qlvb->qb_value = q.qu_value; | ||
847 | qd->qd_qb = *qlvb; | ||
848 | |||
849 | return 0; | ||
850 | } | ||
851 | |||
825 | static int do_glock(struct gfs2_quota_data *qd, int force_refresh, | 852 | static int do_glock(struct gfs2_quota_data *qd, int force_refresh, |
826 | struct gfs2_holder *q_gh) | 853 | struct gfs2_holder *q_gh) |
827 | { | 854 | { |
828 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 855 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
829 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | 856 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
830 | struct gfs2_holder i_gh; | 857 | struct gfs2_holder i_gh; |
831 | struct gfs2_quota_host q; | ||
832 | char buf[sizeof(struct gfs2_quota)]; | ||
833 | int error; | 858 | int error; |
834 | struct gfs2_quota_lvb *qlvb; | ||
835 | 859 | ||
836 | restart: | 860 | restart: |
837 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); | 861 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); |
@@ -841,11 +865,9 @@ restart: | |||
841 | qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 865 | qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
842 | 866 | ||
843 | if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { | 867 | if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { |
844 | loff_t pos; | ||
845 | gfs2_glock_dq_uninit(q_gh); | 868 | gfs2_glock_dq_uninit(q_gh); |
846 | error = gfs2_glock_nq_init(qd->qd_gl, | 869 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, |
847 | LM_ST_EXCLUSIVE, GL_NOCACHE, | 870 | GL_NOCACHE, q_gh); |
848 | q_gh); | ||
849 | if (error) | 871 | if (error) |
850 | return error; | 872 | return error; |
851 | 873 | ||
@@ -853,29 +875,14 @@ restart: | |||
853 | if (error) | 875 | if (error) |
854 | goto fail; | 876 | goto fail; |
855 | 877 | ||
856 | memset(buf, 0, sizeof(struct gfs2_quota)); | 878 | error = update_qd(sdp, qd); |
857 | pos = qd2offset(qd); | 879 | if (error) |
858 | error = gfs2_internal_read(ip, NULL, buf, &pos, | ||
859 | sizeof(struct gfs2_quota)); | ||
860 | if (error < 0) | ||
861 | goto fail_gunlock; | 880 | goto fail_gunlock; |
862 | 881 | ||
863 | gfs2_glock_dq_uninit(&i_gh); | 882 | gfs2_glock_dq_uninit(&i_gh); |
864 | 883 | gfs2_glock_dq_uninit(q_gh); | |
865 | gfs2_quota_in(&q, buf); | 884 | force_refresh = 0; |
866 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 885 | goto restart; |
867 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
868 | qlvb->__pad = 0; | ||
869 | qlvb->qb_limit = cpu_to_be64(q.qu_limit); | ||
870 | qlvb->qb_warn = cpu_to_be64(q.qu_warn); | ||
871 | qlvb->qb_value = cpu_to_be64(q.qu_value); | ||
872 | qd->qd_qb = *qlvb; | ||
873 | |||
874 | if (gfs2_glock_is_blocking(qd->qd_gl)) { | ||
875 | gfs2_glock_dq_uninit(q_gh); | ||
876 | force_refresh = 0; | ||
877 | goto restart; | ||
878 | } | ||
879 | } | 886 | } |
880 | 887 | ||
881 | return 0; | 888 | return 0; |
@@ -995,7 +1002,7 @@ static int print_message(struct gfs2_quota_data *qd, char *type) | |||
995 | { | 1002 | { |
996 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 1003 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
997 | 1004 | ||
998 | printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n", | 1005 | printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\n", |
999 | sdp->sd_fsname, type, | 1006 | sdp->sd_fsname, type, |
1000 | (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", | 1007 | (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group", |
1001 | qd->qd_id); | 1008 | qd->qd_id); |
@@ -1032,6 +1039,10 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
1032 | 1039 | ||
1033 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { | 1040 | if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) { |
1034 | print_message(qd, "exceeded"); | 1041 | print_message(qd, "exceeded"); |
1042 | quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? | ||
1043 | USRQUOTA : GRPQUOTA, qd->qd_id, | ||
1044 | sdp->sd_vfs->s_dev, QUOTA_NL_BHARDWARN); | ||
1045 | |||
1035 | error = -EDQUOT; | 1046 | error = -EDQUOT; |
1036 | break; | 1047 | break; |
1037 | } else if (be64_to_cpu(qd->qd_qb.qb_warn) && | 1048 | } else if (be64_to_cpu(qd->qd_qb.qb_warn) && |
@@ -1039,6 +1050,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid) | |||
1039 | time_after_eq(jiffies, qd->qd_last_warn + | 1050 | time_after_eq(jiffies, qd->qd_last_warn + |
1040 | gfs2_tune_get(sdp, | 1051 | gfs2_tune_get(sdp, |
1041 | gt_quota_warn_period) * HZ)) { | 1052 | gt_quota_warn_period) * HZ)) { |
1053 | quota_send_warning(test_bit(QDF_USER, &qd->qd_flags) ? | ||
1054 | USRQUOTA : GRPQUOTA, qd->qd_id, | ||
1055 | sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN); | ||
1042 | error = print_message(qd, "warning"); | 1056 | error = print_message(qd, "warning"); |
1043 | qd->qd_last_warn = jiffies; | 1057 | qd->qd_last_warn = jiffies; |
1044 | } | 1058 | } |
@@ -1069,8 +1083,9 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change, | |||
1069 | } | 1083 | } |
1070 | } | 1084 | } |
1071 | 1085 | ||
1072 | int gfs2_quota_sync(struct gfs2_sbd *sdp) | 1086 | int gfs2_quota_sync(struct super_block *sb, int type) |
1073 | { | 1087 | { |
1088 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1074 | struct gfs2_quota_data **qda; | 1089 | struct gfs2_quota_data **qda; |
1075 | unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); | 1090 | unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync); |
1076 | unsigned int num_qd; | 1091 | unsigned int num_qd; |
@@ -1118,7 +1133,7 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) | |||
1118 | struct gfs2_holder q_gh; | 1133 | struct gfs2_holder q_gh; |
1119 | int error; | 1134 | int error; |
1120 | 1135 | ||
1121 | error = qd_get(sdp, user, id, CREATE, &qd); | 1136 | error = qd_get(sdp, user, id, &qd); |
1122 | if (error) | 1137 | if (error) |
1123 | return error; | 1138 | return error; |
1124 | 1139 | ||
@@ -1127,7 +1142,6 @@ int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id) | |||
1127 | gfs2_glock_dq_uninit(&q_gh); | 1142 | gfs2_glock_dq_uninit(&q_gh); |
1128 | 1143 | ||
1129 | qd_put(qd); | 1144 | qd_put(qd); |
1130 | |||
1131 | return error; | 1145 | return error; |
1132 | } | 1146 | } |
1133 | 1147 | ||
@@ -1298,12 +1312,12 @@ static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) | |||
1298 | } | 1312 | } |
1299 | 1313 | ||
1300 | static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, | 1314 | static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, |
1301 | int (*fxn)(struct gfs2_sbd *sdp), | 1315 | int (*fxn)(struct super_block *sb, int type), |
1302 | unsigned long t, unsigned long *timeo, | 1316 | unsigned long t, unsigned long *timeo, |
1303 | unsigned int *new_timeo) | 1317 | unsigned int *new_timeo) |
1304 | { | 1318 | { |
1305 | if (t >= *timeo) { | 1319 | if (t >= *timeo) { |
1306 | int error = fxn(sdp); | 1320 | int error = fxn(sdp->sd_vfs, 0); |
1307 | quotad_error(sdp, msg, error); | 1321 | quotad_error(sdp, msg, error); |
1308 | *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; | 1322 | *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; |
1309 | } else { | 1323 | } else { |
@@ -1330,6 +1344,14 @@ static void quotad_check_trunc_list(struct gfs2_sbd *sdp) | |||
1330 | } | 1344 | } |
1331 | } | 1345 | } |
1332 | 1346 | ||
1347 | void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { | ||
1348 | if (!sdp->sd_statfs_force_sync) { | ||
1349 | sdp->sd_statfs_force_sync = 1; | ||
1350 | wake_up(&sdp->sd_quota_wait); | ||
1351 | } | ||
1352 | } | ||
1353 | |||
1354 | |||
1333 | /** | 1355 | /** |
1334 | * gfs2_quotad - Write cached quota changes into the quota file | 1356 | * gfs2_quotad - Write cached quota changes into the quota file |
1335 | * @sdp: Pointer to GFS2 superblock | 1357 | * @sdp: Pointer to GFS2 superblock |
@@ -1349,8 +1371,15 @@ int gfs2_quotad(void *data) | |||
1349 | while (!kthread_should_stop()) { | 1371 | while (!kthread_should_stop()) { |
1350 | 1372 | ||
1351 | /* Update the master statfs file */ | 1373 | /* Update the master statfs file */ |
1352 | quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, | 1374 | if (sdp->sd_statfs_force_sync) { |
1353 | &statfs_timeo, &tune->gt_statfs_quantum); | 1375 | int error = gfs2_statfs_sync(sdp->sd_vfs, 0); |
1376 | quotad_error(sdp, "statfs", error); | ||
1377 | statfs_timeo = gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; | ||
1378 | } | ||
1379 | else | ||
1380 | quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, | ||
1381 | &statfs_timeo, | ||
1382 | &tune->gt_statfs_quantum); | ||
1354 | 1383 | ||
1355 | /* Update quota file */ | 1384 | /* Update quota file */ |
1356 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, | 1385 | quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, |
@@ -1367,7 +1396,7 @@ int gfs2_quotad(void *data) | |||
1367 | spin_lock(&sdp->sd_trunc_lock); | 1396 | spin_lock(&sdp->sd_trunc_lock); |
1368 | empty = list_empty(&sdp->sd_trunc_list); | 1397 | empty = list_empty(&sdp->sd_trunc_list); |
1369 | spin_unlock(&sdp->sd_trunc_lock); | 1398 | spin_unlock(&sdp->sd_trunc_lock); |
1370 | if (empty) | 1399 | if (empty && !sdp->sd_statfs_force_sync) |
1371 | t -= schedule_timeout(t); | 1400 | t -= schedule_timeout(t); |
1372 | else | 1401 | else |
1373 | t = 0; | 1402 | t = 0; |
@@ -1377,3 +1406,181 @@ int gfs2_quotad(void *data) | |||
1377 | return 0; | 1406 | return 0; |
1378 | } | 1407 | } |
1379 | 1408 | ||
1409 | static int gfs2_quota_get_xstate(struct super_block *sb, | ||
1410 | struct fs_quota_stat *fqs) | ||
1411 | { | ||
1412 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1413 | |||
1414 | memset(fqs, 0, sizeof(struct fs_quota_stat)); | ||
1415 | fqs->qs_version = FS_QSTAT_VERSION; | ||
1416 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_ON) | ||
1417 | fqs->qs_flags = (XFS_QUOTA_UDQ_ENFD | XFS_QUOTA_GDQ_ENFD); | ||
1418 | else if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT) | ||
1419 | fqs->qs_flags = (XFS_QUOTA_UDQ_ACCT | XFS_QUOTA_GDQ_ACCT); | ||
1420 | if (sdp->sd_quota_inode) { | ||
1421 | fqs->qs_uquota.qfs_ino = GFS2_I(sdp->sd_quota_inode)->i_no_addr; | ||
1422 | fqs->qs_uquota.qfs_nblks = sdp->sd_quota_inode->i_blocks; | ||
1423 | } | ||
1424 | fqs->qs_uquota.qfs_nextents = 1; /* unsupported */ | ||
1425 | fqs->qs_gquota = fqs->qs_uquota; /* its the same inode in both cases */ | ||
1426 | fqs->qs_incoredqs = atomic_read(&qd_lru_count); | ||
1427 | return 0; | ||
1428 | } | ||
1429 | |||
1430 | static int gfs2_xquota_get(struct super_block *sb, int type, qid_t id, | ||
1431 | struct fs_disk_quota *fdq) | ||
1432 | { | ||
1433 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1434 | struct gfs2_quota_lvb *qlvb; | ||
1435 | struct gfs2_quota_data *qd; | ||
1436 | struct gfs2_holder q_gh; | ||
1437 | int error; | ||
1438 | |||
1439 | memset(fdq, 0, sizeof(struct fs_disk_quota)); | ||
1440 | |||
1441 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | ||
1442 | return -ESRCH; /* Crazy XFS error code */ | ||
1443 | |||
1444 | if (type == USRQUOTA) | ||
1445 | type = QUOTA_USER; | ||
1446 | else if (type == GRPQUOTA) | ||
1447 | type = QUOTA_GROUP; | ||
1448 | else | ||
1449 | return -EINVAL; | ||
1450 | |||
1451 | error = qd_get(sdp, type, id, &qd); | ||
1452 | if (error) | ||
1453 | return error; | ||
1454 | error = do_glock(qd, FORCE, &q_gh); | ||
1455 | if (error) | ||
1456 | goto out; | ||
1457 | |||
1458 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | ||
1459 | fdq->d_version = FS_DQUOT_VERSION; | ||
1460 | fdq->d_flags = (type == QUOTA_USER) ? XFS_USER_QUOTA : XFS_GROUP_QUOTA; | ||
1461 | fdq->d_id = id; | ||
1462 | fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); | ||
1463 | fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); | ||
1464 | fdq->d_bcount = be64_to_cpu(qlvb->qb_value); | ||
1465 | |||
1466 | gfs2_glock_dq_uninit(&q_gh); | ||
1467 | out: | ||
1468 | qd_put(qd); | ||
1469 | return error; | ||
1470 | } | ||
1471 | |||
1472 | /* GFS2 only supports a subset of the XFS fields */ | ||
1473 | #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD) | ||
1474 | |||
1475 | static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id, | ||
1476 | struct fs_disk_quota *fdq) | ||
1477 | { | ||
1478 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1479 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | ||
1480 | struct gfs2_quota_data *qd; | ||
1481 | struct gfs2_holder q_gh, i_gh; | ||
1482 | unsigned int data_blocks, ind_blocks; | ||
1483 | unsigned int blocks = 0; | ||
1484 | int alloc_required; | ||
1485 | struct gfs2_alloc *al; | ||
1486 | loff_t offset; | ||
1487 | int error; | ||
1488 | |||
1489 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | ||
1490 | return -ESRCH; /* Crazy XFS error code */ | ||
1491 | |||
1492 | switch(type) { | ||
1493 | case USRQUOTA: | ||
1494 | type = QUOTA_USER; | ||
1495 | if (fdq->d_flags != XFS_USER_QUOTA) | ||
1496 | return -EINVAL; | ||
1497 | break; | ||
1498 | case GRPQUOTA: | ||
1499 | type = QUOTA_GROUP; | ||
1500 | if (fdq->d_flags != XFS_GROUP_QUOTA) | ||
1501 | return -EINVAL; | ||
1502 | break; | ||
1503 | default: | ||
1504 | return -EINVAL; | ||
1505 | } | ||
1506 | |||
1507 | if (fdq->d_fieldmask & ~GFS2_FIELDMASK) | ||
1508 | return -EINVAL; | ||
1509 | if (fdq->d_id != id) | ||
1510 | return -EINVAL; | ||
1511 | |||
1512 | error = qd_get(sdp, type, id, &qd); | ||
1513 | if (error) | ||
1514 | return error; | ||
1515 | |||
1516 | mutex_lock(&ip->i_inode.i_mutex); | ||
1517 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); | ||
1518 | if (error) | ||
1519 | goto out_put; | ||
1520 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); | ||
1521 | if (error) | ||
1522 | goto out_q; | ||
1523 | |||
1524 | /* Check for existing entry, if none then alloc new blocks */ | ||
1525 | error = update_qd(sdp, qd); | ||
1526 | if (error) | ||
1527 | goto out_i; | ||
1528 | |||
1529 | /* If nothing has changed, this is a no-op */ | ||
1530 | if ((fdq->d_fieldmask & FS_DQ_BSOFT) && | ||
1531 | (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) | ||
1532 | fdq->d_fieldmask ^= FS_DQ_BSOFT; | ||
1533 | if ((fdq->d_fieldmask & FS_DQ_BHARD) && | ||
1534 | (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) | ||
1535 | fdq->d_fieldmask ^= FS_DQ_BHARD; | ||
1536 | if (fdq->d_fieldmask == 0) | ||
1537 | goto out_i; | ||
1538 | |||
1539 | offset = qd2offset(qd); | ||
1540 | error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), | ||
1541 | &alloc_required); | ||
1542 | if (error) | ||
1543 | goto out_i; | ||
1544 | if (alloc_required) { | ||
1545 | al = gfs2_alloc_get(ip); | ||
1546 | if (al == NULL) | ||
1547 | goto out_i; | ||
1548 | gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), | ||
1549 | &data_blocks, &ind_blocks); | ||
1550 | blocks = al->al_requested = 1 + data_blocks + ind_blocks; | ||
1551 | error = gfs2_inplace_reserve(ip); | ||
1552 | if (error) | ||
1553 | goto out_alloc; | ||
1554 | } | ||
1555 | |||
1556 | error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); | ||
1557 | if (error) | ||
1558 | goto out_release; | ||
1559 | |||
1560 | /* Apply changes */ | ||
1561 | error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); | ||
1562 | |||
1563 | gfs2_trans_end(sdp); | ||
1564 | out_release: | ||
1565 | if (alloc_required) { | ||
1566 | gfs2_inplace_release(ip); | ||
1567 | out_alloc: | ||
1568 | gfs2_alloc_put(ip); | ||
1569 | } | ||
1570 | out_i: | ||
1571 | gfs2_glock_dq_uninit(&i_gh); | ||
1572 | out_q: | ||
1573 | gfs2_glock_dq_uninit(&q_gh); | ||
1574 | out_put: | ||
1575 | mutex_unlock(&ip->i_inode.i_mutex); | ||
1576 | qd_put(qd); | ||
1577 | return error; | ||
1578 | } | ||
1579 | |||
1580 | const struct quotactl_ops gfs2_quotactl_ops = { | ||
1581 | .quota_sync = gfs2_quota_sync, | ||
1582 | .get_xstate = gfs2_quota_get_xstate, | ||
1583 | .get_xquota = gfs2_xquota_get, | ||
1584 | .set_xquota = gfs2_xquota_set, | ||
1585 | }; | ||
1586 | |||