diff options
| -rw-r--r-- | fs/gfs2/quota.c | 86 |
1 files changed, 61 insertions, 25 deletions
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 6ca0967ce6e7..d5f4661287f9 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c | |||
| @@ -637,15 +637,40 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 637 | unsigned blocksize, iblock, pos; | 637 | unsigned blocksize, iblock, pos; |
| 638 | struct buffer_head *bh, *dibh; | 638 | struct buffer_head *bh, *dibh; |
| 639 | struct page *page; | 639 | struct page *page; |
| 640 | void *kaddr; | 640 | void *kaddr, *ptr; |
| 641 | struct gfs2_quota *qp; | 641 | struct gfs2_quota q, *qp; |
| 642 | s64 value; | 642 | int err, nbytes; |
| 643 | int err = -EIO; | ||
| 644 | u64 size; | 643 | u64 size; |
| 645 | 644 | ||
| 646 | if (gfs2_is_stuffed(ip)) | 645 | if (gfs2_is_stuffed(ip)) |
| 647 | gfs2_unstuff_dinode(ip, NULL); | 646 | gfs2_unstuff_dinode(ip, NULL); |
| 648 | 647 | ||
| 648 | memset(&q, 0, sizeof(struct gfs2_quota)); | ||
| 649 | err = gfs2_internal_read(ip, NULL, (char *)&q, &loc, sizeof(q)); | ||
| 650 | if (err < 0) | ||
| 651 | return err; | ||
| 652 | |||
| 653 | err = -EIO; | ||
| 654 | qp = &q; | ||
| 655 | qp->qu_value = be64_to_cpu(qp->qu_value); | ||
| 656 | qp->qu_value += change; | ||
| 657 | qp->qu_value = cpu_to_be64(qp->qu_value); | ||
| 658 | qd->qd_qb.qb_value = qp->qu_value; | ||
| 659 | if (fdq) { | ||
| 660 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | ||
| 661 | qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | ||
| 662 | qd->qd_qb.qb_warn = qp->qu_warn; | ||
| 663 | } | ||
| 664 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | ||
| 665 | qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | ||
| 666 | qd->qd_qb.qb_limit = qp->qu_limit; | ||
| 667 | } | ||
| 668 | } | ||
| 669 | |||
| 670 | /* Write the quota into the quota file on disk */ | ||
| 671 | ptr = qp; | ||
| 672 | nbytes = sizeof(struct gfs2_quota); | ||
| 673 | get_a_page: | ||
| 649 | page = grab_cache_page(mapping, index); | 674 | page = grab_cache_page(mapping, index); |
| 650 | if (!page) | 675 | if (!page) |
| 651 | return -ENOMEM; | 676 | return -ENOMEM; |
| @@ -667,7 +692,12 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 667 | if (!buffer_mapped(bh)) { | 692 | if (!buffer_mapped(bh)) { |
| 668 | gfs2_block_map(inode, iblock, bh, 1); | 693 | gfs2_block_map(inode, iblock, bh, 1); |
| 669 | if (!buffer_mapped(bh)) | 694 | if (!buffer_mapped(bh)) |
| 670 | goto unlock; | 695 | goto unlock_out; |
| 696 | /* If it's a newly allocated disk block for quota, zero it */ | ||
| 697 | if (buffer_new(bh)) { | ||
| 698 | memset(bh->b_data, 0, bh->b_size); | ||
| 699 | set_buffer_uptodate(bh); | ||
| 700 | } | ||
| 671 | } | 701 | } |
| 672 | 702 | ||
| 673 | if (PageUptodate(page)) | 703 | if (PageUptodate(page)) |
| @@ -677,32 +707,34 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 677 | ll_rw_block(READ_META, 1, &bh); | 707 | ll_rw_block(READ_META, 1, &bh); |
| 678 | wait_on_buffer(bh); | 708 | wait_on_buffer(bh); |
| 679 | if (!buffer_uptodate(bh)) | 709 | if (!buffer_uptodate(bh)) |
| 680 | goto unlock; | 710 | goto unlock_out; |
| 681 | } | 711 | } |
| 682 | 712 | ||
| 683 | gfs2_trans_add_bh(ip->i_gl, bh, 0); | 713 | gfs2_trans_add_bh(ip->i_gl, bh, 0); |
| 684 | 714 | ||
| 685 | kaddr = kmap_atomic(page, KM_USER0); | 715 | kaddr = kmap_atomic(page, KM_USER0); |
| 686 | qp = kaddr + offset; | 716 | if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) |
| 687 | value = (s64)be64_to_cpu(qp->qu_value) + change; | 717 | nbytes = PAGE_CACHE_SIZE - offset; |
| 688 | qp->qu_value = cpu_to_be64(value); | 718 | memcpy(kaddr + offset, ptr, nbytes); |
| 689 | qd->qd_qb.qb_value = qp->qu_value; | ||
| 690 | if (fdq) { | ||
| 691 | if (fdq->d_fieldmask & FS_DQ_BSOFT) { | ||
| 692 | qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); | ||
| 693 | qd->qd_qb.qb_warn = qp->qu_warn; | ||
| 694 | } | ||
| 695 | if (fdq->d_fieldmask & FS_DQ_BHARD) { | ||
| 696 | qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); | ||
| 697 | qd->qd_qb.qb_limit = qp->qu_limit; | ||
| 698 | } | ||
| 699 | } | ||
| 700 | flush_dcache_page(page); | 719 | flush_dcache_page(page); |
| 701 | kunmap_atomic(kaddr, KM_USER0); | 720 | kunmap_atomic(kaddr, KM_USER0); |
| 721 | unlock_page(page); | ||
| 722 | page_cache_release(page); | ||
| 723 | |||
| 724 | /* If quota straddles page boundary, we need to update the rest of the | ||
| 725 | * quota at the beginning of the next page */ | ||
| 726 | if (offset != 0) { /* first page, offset is closer to PAGE_CACHE_SIZE */ | ||
| 727 | ptr = ptr + nbytes; | ||
| 728 | nbytes = sizeof(struct gfs2_quota) - nbytes; | ||
| 729 | offset = 0; | ||
| 730 | index++; | ||
| 731 | goto get_a_page; | ||
| 732 | } | ||
| 702 | 733 | ||
| 734 | /* Update the disk inode timestamp and size (if extended) */ | ||
| 703 | err = gfs2_meta_inode_buffer(ip, &dibh); | 735 | err = gfs2_meta_inode_buffer(ip, &dibh); |
| 704 | if (err) | 736 | if (err) |
| 705 | goto unlock; | 737 | goto out; |
| 706 | 738 | ||
| 707 | size = loc + sizeof(struct gfs2_quota); | 739 | size = loc + sizeof(struct gfs2_quota); |
| 708 | if (size > inode->i_size) { | 740 | if (size > inode->i_size) { |
| @@ -715,7 +747,9 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc, | |||
| 715 | brelse(dibh); | 747 | brelse(dibh); |
| 716 | mark_inode_dirty(inode); | 748 | mark_inode_dirty(inode); |
| 717 | 749 | ||
| 718 | unlock: | 750 | out: |
| 751 | return err; | ||
| 752 | unlock_out: | ||
| 719 | unlock_page(page); | 753 | unlock_page(page); |
| 720 | page_cache_release(page); | 754 | page_cache_release(page); |
| 721 | return err; | 755 | return err; |
| @@ -779,8 +813,10 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) | |||
| 779 | * rgrp since it won't be allocated during the transaction | 813 | * rgrp since it won't be allocated during the transaction |
| 780 | */ | 814 | */ |
| 781 | al->al_requested = 1; | 815 | al->al_requested = 1; |
| 782 | /* +1 in the end for block requested above for unstuffing */ | 816 | /* +3 in the end for unstuffing block, inode size update block |
| 783 | blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1; | 817 | * and another block in case quota straddles page boundary and |
| 818 | * two blocks need to be updated instead of 1 */ | ||
| 819 | blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; | ||
| 784 | 820 | ||
| 785 | if (nalloc) | 821 | if (nalloc) |
| 786 | al->al_requested += nalloc * (data_blocks + ind_blocks); | 822 | al->al_requested += nalloc * (data_blocks + ind_blocks); |
