aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/balloc.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2008-10-10 09:39:00 -0400
committerTheodore Ts'o <tytso@mit.edu>2008-10-10 09:39:00 -0400
commit6bc6e63fcd7dac9e633ea29f1fddd9580ab28f3f (patch)
tree144d53023af5faeb94b9b3aa28e186a33e6c5b98 /fs/ext4/balloc.c
parent030ba6bc67b4f2bc5cd174f57785a1745c929abe (diff)
ext4: Add percpu dirty block accounting.
This patch adds dirty block accounting using percpu_counters. Delayed allocation block reservation is now done by updating dirty block counter. In a later patch we switch to non delalloc mode if the filesystem free blocks is greater than 150% of total filesystem dirty blocks Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Mingming Cao<cmm@us.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/balloc.c')
-rw-r--r--fs/ext4/balloc.c62
1 files changed, 40 insertions, 22 deletions
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 57909882c084..edef0023e6e6 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -1605,26 +1605,38 @@ out:
1605int ext4_claim_free_blocks(struct ext4_sb_info *sbi, 1605int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
1606 ext4_fsblk_t nblocks) 1606 ext4_fsblk_t nblocks)
1607{ 1607{
1608 s64 free_blocks; 1608 s64 free_blocks, dirty_blocks;
1609 ext4_fsblk_t root_blocks = 0; 1609 ext4_fsblk_t root_blocks = 0;
1610 struct percpu_counter *fbc = &sbi->s_freeblocks_counter; 1610 struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
1611 struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
1611 1612
1612 free_blocks = percpu_counter_read(fbc); 1613 free_blocks = percpu_counter_read_positive(fbc);
1614 dirty_blocks = percpu_counter_read_positive(dbc);
1613 1615
1614 if (!capable(CAP_SYS_RESOURCE) && 1616 if (!capable(CAP_SYS_RESOURCE) &&
1615 sbi->s_resuid != current->fsuid && 1617 sbi->s_resuid != current->fsuid &&
1616 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid))) 1618 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid)))
1617 root_blocks = ext4_r_blocks_count(sbi->s_es); 1619 root_blocks = ext4_r_blocks_count(sbi->s_es);
1618 1620
1619 if (free_blocks - (nblocks + root_blocks) < EXT4_FREEBLOCKS_WATERMARK) 1621 if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
1620 free_blocks = percpu_counter_sum(&sbi->s_freeblocks_counter); 1622 EXT4_FREEBLOCKS_WATERMARK) {
1621 1623 free_blocks = percpu_counter_sum(fbc);
1622 if (free_blocks < (root_blocks + nblocks)) 1624 dirty_blocks = percpu_counter_sum(dbc);
1625 if (dirty_blocks < 0) {
1626 printk(KERN_CRIT "Dirty block accounting "
1627 "went wrong %lld\n",
1628 dirty_blocks);
1629 }
1630 }
1631 /* Check whether we have space after
1632 * accounting for current dirty blocks
1633 */
1634 if (free_blocks < ((s64)(root_blocks + nblocks) + dirty_blocks))
1623 /* we don't have free space */ 1635 /* we don't have free space */
1624 return -ENOSPC; 1636 return -ENOSPC;
1625 1637
1626 /* reduce fs free blocks counter */ 1638 /* Add the blocks to nblocks */
1627 percpu_counter_sub(fbc, nblocks); 1639 percpu_counter_add(dbc, nblocks);
1628 return 0; 1640 return 0;
1629} 1641}
1630 1642
@@ -1640,23 +1652,28 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
1640ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi, 1652ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
1641 ext4_fsblk_t nblocks) 1653 ext4_fsblk_t nblocks)
1642{ 1654{
1643 ext4_fsblk_t free_blocks; 1655 ext4_fsblk_t free_blocks, dirty_blocks;
1644 ext4_fsblk_t root_blocks = 0; 1656 ext4_fsblk_t root_blocks = 0;
1657 struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
1658 struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
1645 1659
1646 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); 1660 free_blocks = percpu_counter_read_positive(fbc);
1661 dirty_blocks = percpu_counter_read_positive(dbc);
1647 1662
1648 if (!capable(CAP_SYS_RESOURCE) && 1663 if (!capable(CAP_SYS_RESOURCE) &&
1649 sbi->s_resuid != current->fsuid && 1664 sbi->s_resuid != current->fsuid &&
1650 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid))) 1665 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid)))
1651 root_blocks = ext4_r_blocks_count(sbi->s_es); 1666 root_blocks = ext4_r_blocks_count(sbi->s_es);
1652 1667
1653 if (free_blocks - (nblocks + root_blocks) < EXT4_FREEBLOCKS_WATERMARK) 1668 if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
1654 free_blocks = percpu_counter_sum_positive(&sbi->s_freeblocks_counter); 1669 EXT4_FREEBLOCKS_WATERMARK) {
1655 1670 free_blocks = percpu_counter_sum_positive(fbc);
1656 if (free_blocks <= root_blocks) 1671 dirty_blocks = percpu_counter_sum_positive(dbc);
1672 }
1673 if (free_blocks <= (root_blocks + dirty_blocks))
1657 /* we don't have free space */ 1674 /* we don't have free space */
1658 return 0; 1675 return 0;
1659 if (free_blocks - root_blocks < nblocks) 1676 if (free_blocks - (root_blocks + dirty_blocks) < nblocks)
1660 return free_blocks - root_blocks; 1677 return free_blocks - root_blocks;
1661 return nblocks; 1678 return nblocks;
1662} 1679}
@@ -1943,13 +1960,14 @@ allocated:
1943 le16_add_cpu(&gdp->bg_free_blocks_count, -num); 1960 le16_add_cpu(&gdp->bg_free_blocks_count, -num);
1944 gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp); 1961 gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
1945 spin_unlock(sb_bgl_lock(sbi, group_no)); 1962 spin_unlock(sb_bgl_lock(sbi, group_no));
1946 if (!EXT4_I(inode)->i_delalloc_reserved_flag && (*count != num)) { 1963 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1947 /* 1964 /*
1948 * we allocated less blocks than we 1965 * Now reduce the dirty block count also. Should not go negative
1949 * claimed. Add the difference back. 1966 */
1950 */ 1967 if (!EXT4_I(inode)->i_delalloc_reserved_flag)
1951 percpu_counter_add(&sbi->s_freeblocks_counter, *count - num); 1968 percpu_counter_sub(&sbi->s_dirtyblocks_counter, *count);
1952 } 1969 else
1970 percpu_counter_sub(&sbi->s_dirtyblocks_counter, num);
1953 if (sbi->s_log_groups_per_flex) { 1971 if (sbi->s_log_groups_per_flex) {
1954 ext4_group_t flex_group = ext4_flex_group(sbi, group_no); 1972 ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
1955 spin_lock(sb_bgl_lock(sbi, flex_group)); 1973 spin_lock(sb_bgl_lock(sbi, flex_group));