diff options
author | Steven Whitehouse <swhiteho@redhat.com> | 2009-09-23 08:50:49 -0400 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2009-12-03 06:52:43 -0500 |
commit | e285c100362762f7440643be637dd332460fdc75 (patch) | |
tree | 03d8ba11e5c9c0c43fafd88c38e996f32a6ac2d1 /fs/gfs2 | |
parent | 113d6b3c99bf30d8083068d00e3c7304d91d4845 (diff) |
GFS2: Add set_xquota support
This patch adds the ability to set GFS2 quota limit and
warning levels via the XFS quota API.
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/gfs2')
-rw-r--r-- | fs/gfs2/quota.c | 198 |
1 files changed, 172 insertions, 26 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 6c5d6aa7d532..e8db5346a942 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
@@ -615,8 +615,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
615 | * gfs2_adjust_quota - adjust record of current block usage | 615 | * gfs2_adjust_quota - adjust record of current block usage |
616 | * @ip: The quota inode | 616 | * @ip: The quota inode |
617 | * @loc: Offset of the entry in the quota file | 617 | * @loc: Offset of the entry in the quota file |
618 | * @change: The amount of change to record | 618 | * @change: The amount of usage change to record |
619 | * @qd: The quota data | 619 | * @qd: The quota data |
620 | * @fdq: The updated limits to record | ||
620 | * | 621 | * |
621 | * This function was mostly borrowed from gfs2_block_truncate_page which was | 622 | * This function was mostly borrowed from gfs2_block_truncate_page which was |
622 | * in turn mostly borrowed from ext3 | 623 | * in turn mostly borrowed from ext3 |
@@ -625,19 +626,21 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change) | |||
625 | */ | 626 | */ |
626 | 627 | ||
627 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | 628 | static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, |
628 | s64 change, struct gfs2_quota_data *qd) | 629 | s64 change, struct gfs2_quota_data *qd, |
630 | struct fs_disk_quota *fdq) | ||
629 | { | 631 | { |
630 | struct inode *inode = &ip->i_inode; | 632 | struct inode *inode = &ip->i_inode; |
631 | struct address_space *mapping = inode->i_mapping; | 633 | struct address_space *mapping = inode->i_mapping; |
632 | unsigned long index = loc >> PAGE_CACHE_SHIFT; | 634 | unsigned long index = loc >> PAGE_CACHE_SHIFT; |
633 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); | 635 | unsigned offset = loc & (PAGE_CACHE_SIZE - 1); |
634 | unsigned blocksize, iblock, pos; | 636 | unsigned blocksize, iblock, pos; |
635 | struct buffer_head *bh; | 637 | struct buffer_head *bh, *dibh; |
636 | struct page *page; | 638 | struct page *page; |
637 | void *kaddr; | 639 | void *kaddr; |
638 | struct gfs2_quota *qp; | 640 | struct gfs2_quota *qp; |
639 | s64 value; | 641 | s64 value; |
640 | int err = -EIO; | 642 | int err = -EIO; |
643 | u64 size; | ||
641 | 644 | ||
642 | if (gfs2_is_stuffed(ip)) | 645 | if (gfs2_is_stuffed(ip)) |
643 | gfs2_unstuff_dinode(ip, NULL); | 646 | gfs2_unstuff_dinode(ip, NULL); |
@@ -683,9 +686,34 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
683 | value = (s64)be64_to_cpu(qp->qu_value) + change; | 686 | value = (s64)be64_to_cpu(qp->qu_value) + change; |
684 | qp->qu_value = cpu_to_be64(value); | 687 | qp->qu_value = cpu_to_be64(value); |
685 | qd->qd_qb.qb_value = qp->qu_value; | 688 | qd->qd_qb.qb_value = qp->qu_value; |
689 | if (fdq) { | ||
690 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | ||
691 | qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | ||
692 | qd->qd_qb.qb_warn = qp->qu_warn; | ||
693 | } | ||
694 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | ||
695 | qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | ||
696 | qd->qd_qb.qb_limit = qp->qu_limit; | ||
697 | } | ||
698 | } | ||
686 | flush_dcache_page(page); | 699 | flush_dcache_page(page); |
687 | kunmap_atomic(kaddr, KM_USER0); | 700 | kunmap_atomic(kaddr, KM_USER0); |
688 | err = 0; | 701 | |
702 | err = gfs2_meta_inode_buffer(ip, &dibh); | ||
703 | if (err) | ||
704 | goto unlock; | ||
705 | |||
706 | size = loc + sizeof(struct gfs2_quota); | ||
707 | if (size > inode->i_size) { | ||
708 | ip->i_disksize = size; | ||
709 | i_size_write(inode, size); | ||
710 | } | ||
711 | inode->i_mtime = inode->i_atime = CURRENT_TIME; | ||
712 | gfs2_trans_add_bh(ip->i_gl, dibh, 1); | ||
713 | gfs2_dinode_out(ip, dibh->b_data); | ||
714 | brelse(dibh); | ||
715 | mark_inode_dirty(inode); | ||
716 | |||
689 | unlock: | 717 | unlock: |
690 | unlock_page(page); | 718 | unlock_page(page); |
691 | page_cache_release(page); | 719 | page_cache_release(page); |
@@ -713,6 +741,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
713 | return -ENOMEM; | 741 | return -ENOMEM; |
714 | 742 | ||
715 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); | 743 | sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); |
744 | mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA); | ||
716 | for (qx = 0; qx < num_qd; qx++) { | 745 | for (qx = 0; qx < num_qd; qx++) { |
717 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, | 746 | error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, |
718 | GL_NOCACHE, &ghs[qx]); | 747 | GL_NOCACHE, &ghs[qx]); |
@@ -768,8 +797,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
768 | for (x = 0; x < num_qd; x++) { | 797 | for (x = 0; x < num_qd; x++) { |
769 | qd = qda[x]; | 798 | qd = qda[x]; |
770 | offset = qd2offset(qd); | 799 | offset = qd2offset(qd); |
771 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, | 800 | error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync, qd, NULL); |
772 | (struct gfs2_quota_data *)qd); | ||
773 | if (error) | 801 | if (error) |
774 | goto out_end_trans; | 802 | goto out_end_trans; |
775 | 803 | ||
@@ -789,20 +817,44 @@ out_gunlock: | |||
789 | out: | 817 | out: |
790 | while (qx--) | 818 | while (qx--) |
791 | gfs2_glock_dq_uninit(&ghs[qx]); | 819 | gfs2_glock_dq_uninit(&ghs[qx]); |
820 | mutex_unlock(&ip->i_inode.i_mutex); | ||
792 | kfree(ghs); | 821 | kfree(ghs); |
793 | gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); | 822 | gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl); |
794 | return error; | 823 | return error; |
795 | } | 824 | } |
796 | 825 | ||
826 | static int update_qd(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd) | ||
827 | { | ||
828 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | ||
829 | struct gfs2_quota q; | ||
830 | struct gfs2_quota_lvb *qlvb; | ||
831 | loff_t pos; | ||
832 | int error; | ||
833 | |||
834 | memset(&q, 0, sizeof(struct gfs2_quota)); | ||
835 | pos = qd2offset(qd); | ||
836 | error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); | ||
837 | if (error < 0) | ||
838 | return error; | ||
839 | |||
840 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | ||
841 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
842 | qlvb->__pad = 0; | ||
843 | qlvb->qb_limit = q.qu_limit; | ||
844 | qlvb->qb_warn = q.qu_warn; | ||
845 | qlvb->qb_value = q.qu_value; | ||
846 | qd->qd_qb = *qlvb; | ||
847 | |||
848 | return 0; | ||
849 | } | ||
850 | |||
797 | static int do_glock(struct gfs2_quota_data *qd, int force_refresh, | 851 | static int do_glock(struct gfs2_quota_data *qd, int force_refresh, |
798 | struct gfs2_holder *q_gh) | 852 | struct gfs2_holder *q_gh) |
799 | { | 853 | { |
800 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; | 854 | struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd; |
801 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | 855 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); |
802 | struct gfs2_holder i_gh; | 856 | struct gfs2_holder i_gh; |
803 | struct gfs2_quota q; | ||
804 | int error; | 857 | int error; |
805 | struct gfs2_quota_lvb *qlvb; | ||
806 | 858 | ||
807 | restart: | 859 | restart: |
808 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); | 860 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh); |
@@ -812,7 +864,6 @@ restart: | |||
812 | qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | 864 | qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; |
813 | 865 | ||
814 | if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { | 866 | if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) { |
815 | loff_t pos; | ||
816 | gfs2_glock_dq_uninit(q_gh); | 867 | gfs2_glock_dq_uninit(q_gh); |
817 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, | 868 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, |
818 | GL_NOCACHE, q_gh); | 869 | GL_NOCACHE, q_gh); |
@@ -823,25 +874,11 @@ restart: | |||
823 | if (error) | 874 | if (error) |
824 | goto fail; | 875 | goto fail; |
825 | 876 | ||
826 | memset(&q, 0, sizeof(struct gfs2_quota)); | 877 | error = update_qd(sdp, qd); |
827 | pos = qd2offset(qd); | 878 | if (error) |
828 | error = gfs2_internal_read(ip, NULL, (char *)&q, &pos, sizeof(q)); | ||
829 | if (error < 0) | ||
830 | goto fail_gunlock; | ||
831 | if ((error < sizeof(q)) && force_refresh) { | ||
832 | error = -ENOENT; | ||
833 | goto fail_gunlock; | 879 | goto fail_gunlock; |
834 | } | ||
835 | gfs2_glock_dq_uninit(&i_gh); | ||
836 | |||
837 | qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb; | ||
838 | qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC); | ||
839 | qlvb->__pad = 0; | ||
840 | qlvb->qb_limit = q.qu_limit; | ||
841 | qlvb->qb_warn = q.qu_warn; | ||
842 | qlvb->qb_value = q.qu_value; | ||
843 | qd->qd_qb = *qlvb; | ||
844 | 880 | ||
881 | gfs2_glock_dq_uninit(&i_gh); | ||
845 | gfs2_glock_dq_uninit(q_gh); | 882 | gfs2_glock_dq_uninit(q_gh); |
846 | force_refresh = 0; | 883 | force_refresh = 0; |
847 | goto restart; | 884 | goto restart; |
@@ -1409,9 +1446,118 @@ out: | |||
1409 | return error; | 1446 | return error; |
1410 | } | 1447 | } |
1411 | 1448 | ||
1449 | /* GFS2 only supports a subset of the XFS fields */ | ||
1450 | #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD) | ||
1451 | |||
1452 | static int gfs2_xquota_set(struct super_block *sb, int type, qid_t id, | ||
1453 | struct fs_disk_quota *fdq) | ||
1454 | { | ||
1455 | struct gfs2_sbd *sdp = sb->s_fs_info; | ||
1456 | struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode); | ||
1457 | struct gfs2_quota_data *qd; | ||
1458 | struct gfs2_holder q_gh, i_gh; | ||
1459 | unsigned int data_blocks, ind_blocks; | ||
1460 | unsigned int blocks = 0; | ||
1461 | int alloc_required; | ||
1462 | struct gfs2_alloc *al; | ||
1463 | loff_t offset; | ||
1464 | int error; | ||
1465 | |||
1466 | if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF) | ||
1467 | return -ESRCH; /* Crazy XFS error code */ | ||
1468 | |||
1469 | switch(type) { | ||
1470 | case USRQUOTA: | ||
1471 | type = QUOTA_USER; | ||
1472 | if (fdq->d_flags != XFS_USER_QUOTA) | ||
1473 | return -EINVAL; | ||
1474 | break; | ||
1475 | case GRPQUOTA: | ||
1476 | type = QUOTA_GROUP; | ||
1477 | if (fdq->d_flags != XFS_GROUP_QUOTA) | ||
1478 | return -EINVAL; | ||
1479 | break; | ||
1480 | default: | ||
1481 | return -EINVAL; | ||
1482 | } | ||
1483 | |||
1484 | if (fdq->d_fieldmask & ~GFS2_FIELDMASK) | ||
1485 | return -EINVAL; | ||
1486 | if (fdq->d_id != id) | ||
1487 | return -EINVAL; | ||
1488 | |||
1489 | error = qd_get(sdp, type, id, &qd); | ||
1490 | if (error) | ||
1491 | return error; | ||
1492 | |||
1493 | mutex_lock(&ip->i_inode.i_mutex); | ||
1494 | error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_EXCLUSIVE, 0, &q_gh); | ||
1495 | if (error) | ||
1496 | goto out_put; | ||
1497 | error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh); | ||
1498 | if (error) | ||
1499 | goto out_q; | ||
1500 | |||
1501 | /* Check for existing entry, if none then alloc new blocks */ | ||
1502 | error = update_qd(sdp, qd); | ||
1503 | if (error) | ||
1504 | goto out_i; | ||
1505 | |||
1506 | /* If nothing has changed, this is a no-op */ | ||
1507 | if ((fdq->d_fieldmask & FS_DQ_BSOFT) && | ||
1508 | (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) | ||
1509 | fdq->d_fieldmask ^= FS_DQ_BSOFT; | ||
1510 | if ((fdq->d_fieldmask & FS_DQ_BHARD) && | ||
1511 | (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) | ||
1512 | fdq->d_fieldmask ^= FS_DQ_BHARD; | ||
1513 | if (fdq->d_fieldmask == 0) | ||
1514 | goto out_i; | ||
1515 | |||
1516 | offset = qd2offset(qd); | ||
1517 | error = gfs2_write_alloc_required(ip, offset, sizeof(struct gfs2_quota), | ||
1518 | &alloc_required); | ||
1519 | if (error) | ||
1520 | goto out_i; | ||
1521 | if (alloc_required) { | ||
1522 | al = gfs2_alloc_get(ip); | ||
1523 | if (al == NULL) | ||
1524 | goto out_i; | ||
1525 | gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota), | ||
1526 | &data_blocks, &ind_blocks); | ||
1527 | blocks = al->al_requested = 1 + data_blocks + ind_blocks; | ||
1528 | error = gfs2_inplace_reserve(ip); | ||
1529 | if (error) | ||
1530 | goto out_alloc; | ||
1531 | } | ||
1532 | |||
1533 | error = gfs2_trans_begin(sdp, blocks + RES_DINODE + 1, 0); | ||
1534 | if (error) | ||
1535 | goto out_release; | ||
1536 | |||
1537 | /* Apply changes */ | ||
1538 | error = gfs2_adjust_quota(ip, offset, 0, qd, fdq); | ||
1539 | |||
1540 | gfs2_trans_end(sdp); | ||
1541 | out_release: | ||
1542 | if (alloc_required) { | ||
1543 | gfs2_inplace_release(ip); | ||
1544 | out_alloc: | ||
1545 | gfs2_alloc_put(ip); | ||
1546 | } | ||
1547 | out_i: | ||
1548 | gfs2_glock_dq_uninit(&i_gh); | ||
1549 | out_q: | ||
1550 | gfs2_glock_dq_uninit(&q_gh); | ||
1551 | out_put: | ||
1552 | mutex_unlock(&ip->i_inode.i_mutex); | ||
1553 | qd_put(qd); | ||
1554 | return error; | ||
1555 | } | ||
1556 | |||
1412 | const struct quotactl_ops gfs2_quotactl_ops = { | 1557 | const struct quotactl_ops gfs2_quotactl_ops = { |
1413 | .quota_sync = gfs2_quota_sync, | 1558 | .quota_sync = gfs2_quota_sync, |
1414 | .get_xstate = gfs2_quota_get_xstate, | 1559 | .get_xstate = gfs2_quota_get_xstate, |
1415 | .get_xquota = gfs2_xquota_get, | 1560 | .get_xquota = gfs2_xquota_get, |
1561 | .set_xquota = gfs2_xquota_set, | ||
1416 | }; | 1562 | }; |
1417 | 1563 | ||