diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/ext4/ext4.h | 10 | ||||
| -rw-r--r-- | fs/ext4/extents.c | 26 | ||||
| -rw-r--r-- | fs/ext4/mballoc.c | 6 |
3 files changed, 25 insertions, 17 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index e6185031c1cc..ece55565b9cd 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -268,6 +268,16 @@ struct ext4_io_submit { | |||
| 268 | /* Translate # of blks to # of clusters */ | 268 | /* Translate # of blks to # of clusters */ |
| 269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ | 269 | #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ |
| 270 | (sbi)->s_cluster_bits) | 270 | (sbi)->s_cluster_bits) |
| 271 | /* Mask out the low bits to get the starting block of the cluster */ | ||
| 272 | #define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \ | ||
| 273 | ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
| 274 | #define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \ | ||
| 275 | ~((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
| 276 | /* Get the cluster offset */ | ||
| 277 | #define EXT4_PBLK_COFF(s, pblk) ((pblk) & \ | ||
| 278 | ((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) | ||
| 279 | #define EXT4_LBLK_COFF(s, lblk) ((lblk) & \ | ||
| 280 | ((ext4_lblk_t) (s)->s_cluster_ratio - 1)) | ||
| 271 | 281 | ||
| 272 | /* | 282 | /* |
| 273 | * Structure of a blocks group descriptor | 283 | * Structure of a blocks group descriptor |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 267c9fb53bf9..4410cc3d6ee2 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -1851,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
| 1851 | depth = ext_depth(inode); | 1851 | depth = ext_depth(inode); |
| 1852 | if (!path[depth].p_ext) | 1852 | if (!path[depth].p_ext) |
| 1853 | goto out; | 1853 | goto out; |
| 1854 | b2 = le32_to_cpu(path[depth].p_ext->ee_block); | 1854 | b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); |
| 1855 | b2 &= ~(sbi->s_cluster_ratio - 1); | ||
| 1856 | 1855 | ||
| 1857 | /* | 1856 | /* |
| 1858 | * get the next allocated block if the extent in the path | 1857 | * get the next allocated block if the extent in the path |
| @@ -1862,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, | |||
| 1862 | b2 = ext4_ext_next_allocated_block(path); | 1861 | b2 = ext4_ext_next_allocated_block(path); |
| 1863 | if (b2 == EXT_MAX_BLOCKS) | 1862 | if (b2 == EXT_MAX_BLOCKS) |
| 1864 | goto out; | 1863 | goto out; |
| 1865 | b2 &= ~(sbi->s_cluster_ratio - 1); | 1864 | b2 = EXT4_LBLK_CMASK(sbi, b2); |
| 1866 | } | 1865 | } |
| 1867 | 1866 | ||
| 1868 | /* check for wrap through zero on extent logical start block*/ | 1867 | /* check for wrap through zero on extent logical start block*/ |
| @@ -2521,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |||
| 2521 | * extent, we have to mark the cluster as used (store negative | 2520 | * extent, we have to mark the cluster as used (store negative |
| 2522 | * cluster number in partial_cluster). | 2521 | * cluster number in partial_cluster). |
| 2523 | */ | 2522 | */ |
| 2524 | unaligned = pblk & (sbi->s_cluster_ratio - 1); | 2523 | unaligned = EXT4_PBLK_COFF(sbi, pblk); |
| 2525 | if (unaligned && (ee_len == num) && | 2524 | if (unaligned && (ee_len == num) && |
| 2526 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) | 2525 | (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) |
| 2527 | *partial_cluster = EXT4_B2C(sbi, pblk); | 2526 | *partial_cluster = EXT4_B2C(sbi, pblk); |
| @@ -2615,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
| 2615 | * accidentally freeing it later on | 2614 | * accidentally freeing it later on |
| 2616 | */ | 2615 | */ |
| 2617 | pblk = ext4_ext_pblock(ex); | 2616 | pblk = ext4_ext_pblock(ex); |
| 2618 | if (pblk & (sbi->s_cluster_ratio - 1)) | 2617 | if (EXT4_PBLK_COFF(sbi, pblk)) |
| 2619 | *partial_cluster = | 2618 | *partial_cluster = |
| 2620 | -((long long)EXT4_B2C(sbi, pblk)); | 2619 | -((long long)EXT4_B2C(sbi, pblk)); |
| 2621 | ex--; | 2620 | ex--; |
| @@ -3770,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk) | |||
| 3770 | { | 3769 | { |
| 3771 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 3770 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 3772 | ext4_lblk_t lblk_start, lblk_end; | 3771 | ext4_lblk_t lblk_start, lblk_end; |
| 3773 | lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); | 3772 | lblk_start = EXT4_LBLK_CMASK(sbi, lblk); |
| 3774 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; | 3773 | lblk_end = lblk_start + sbi->s_cluster_ratio - 1; |
| 3775 | 3774 | ||
| 3776 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); | 3775 | return ext4_find_delalloc_range(inode, lblk_start, lblk_end); |
| @@ -3829,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
| 3829 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); | 3828 | trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); |
| 3830 | 3829 | ||
| 3831 | /* Check towards left side */ | 3830 | /* Check towards left side */ |
| 3832 | c_offset = lblk_start & (sbi->s_cluster_ratio - 1); | 3831 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start); |
| 3833 | if (c_offset) { | 3832 | if (c_offset) { |
| 3834 | lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); | 3833 | lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); |
| 3835 | lblk_to = lblk_from + c_offset - 1; | 3834 | lblk_to = lblk_from + c_offset - 1; |
| 3836 | 3835 | ||
| 3837 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) | 3836 | if (ext4_find_delalloc_range(inode, lblk_from, lblk_to)) |
| @@ -3839,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, | |||
| 3839 | } | 3838 | } |
| 3840 | 3839 | ||
| 3841 | /* Now check towards right. */ | 3840 | /* Now check towards right. */ |
| 3842 | c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); | 3841 | c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); |
| 3843 | if (allocated_clusters && c_offset) { | 3842 | if (allocated_clusters && c_offset) { |
| 3844 | lblk_from = lblk_start + num_blks; | 3843 | lblk_from = lblk_start + num_blks; |
| 3845 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; | 3844 | lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; |
| @@ -4047,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
| 4047 | struct ext4_ext_path *path) | 4046 | struct ext4_ext_path *path) |
| 4048 | { | 4047 | { |
| 4049 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 4048 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
| 4050 | ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4049 | ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
| 4051 | ext4_lblk_t ex_cluster_start, ex_cluster_end; | 4050 | ext4_lblk_t ex_cluster_start, ex_cluster_end; |
| 4052 | ext4_lblk_t rr_cluster_start; | 4051 | ext4_lblk_t rr_cluster_start; |
| 4053 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); | 4052 | ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); |
| @@ -4065,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, | |||
| 4065 | (rr_cluster_start == ex_cluster_start)) { | 4064 | (rr_cluster_start == ex_cluster_start)) { |
| 4066 | if (rr_cluster_start == ex_cluster_end) | 4065 | if (rr_cluster_start == ex_cluster_end) |
| 4067 | ee_start += ee_len - 1; | 4066 | ee_start += ee_len - 1; |
| 4068 | map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + | 4067 | map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; |
| 4069 | c_offset; | ||
| 4070 | map->m_len = min(map->m_len, | 4068 | map->m_len = min(map->m_len, |
| 4071 | (unsigned) sbi->s_cluster_ratio - c_offset); | 4069 | (unsigned) sbi->s_cluster_ratio - c_offset); |
| 4072 | /* | 4070 | /* |
| @@ -4220,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 4220 | */ | 4218 | */ |
| 4221 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; | 4219 | map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; |
| 4222 | newex.ee_block = cpu_to_le32(map->m_lblk); | 4220 | newex.ee_block = cpu_to_le32(map->m_lblk); |
| 4223 | cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); | 4221 | cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk); |
| 4224 | 4222 | ||
| 4225 | /* | 4223 | /* |
| 4226 | * If we are doing bigalloc, check to see if the extent returned | 4224 | * If we are doing bigalloc, check to see if the extent returned |
| @@ -4288,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 4288 | * needed so that future calls to get_implied_cluster_alloc() | 4286 | * needed so that future calls to get_implied_cluster_alloc() |
| 4289 | * work correctly. | 4287 | * work correctly. |
| 4290 | */ | 4288 | */ |
| 4291 | offset = map->m_lblk & (sbi->s_cluster_ratio - 1); | 4289 | offset = EXT4_LBLK_COFF(sbi, map->m_lblk); |
| 4292 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); | 4290 | ar.len = EXT4_NUM_B2C(sbi, offset+allocated); |
| 4293 | ar.goal -= offset; | 4291 | ar.goal -= offset; |
| 4294 | ar.logical -= offset; | 4292 | ar.logical -= offset; |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 04766d9a29cd..04a5c7504be9 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -4126,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, | |||
| 4126 | ext4_get_group_no_and_offset(sb, goal, &group, &block); | 4126 | ext4_get_group_no_and_offset(sb, goal, &group, &block); |
| 4127 | 4127 | ||
| 4128 | /* set up allocation goals */ | 4128 | /* set up allocation goals */ |
| 4129 | ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); | 4129 | ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); |
| 4130 | ac->ac_status = AC_STATUS_CONTINUE; | 4130 | ac->ac_status = AC_STATUS_CONTINUE; |
| 4131 | ac->ac_sb = sb; | 4131 | ac->ac_sb = sb; |
| 4132 | ac->ac_inode = ar->inode; | 4132 | ac->ac_inode = ar->inode; |
| @@ -4668,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
| 4668 | * blocks at the beginning or the end unless we are explicitly | 4668 | * blocks at the beginning or the end unless we are explicitly |
| 4669 | * requested to avoid doing so. | 4669 | * requested to avoid doing so. |
| 4670 | */ | 4670 | */ |
| 4671 | overflow = block & (sbi->s_cluster_ratio - 1); | 4671 | overflow = EXT4_PBLK_COFF(sbi, block); |
| 4672 | if (overflow) { | 4672 | if (overflow) { |
| 4673 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { | 4673 | if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { |
| 4674 | overflow = sbi->s_cluster_ratio - overflow; | 4674 | overflow = sbi->s_cluster_ratio - overflow; |
| @@ -4682,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
| 4682 | count += overflow; | 4682 | count += overflow; |
| 4683 | } | 4683 | } |
| 4684 | } | 4684 | } |
| 4685 | overflow = count & (sbi->s_cluster_ratio - 1); | 4685 | overflow = EXT4_LBLK_COFF(sbi, count); |
| 4686 | if (overflow) { | 4686 | if (overflow) { |
| 4687 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { | 4687 | if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { |
| 4688 | if (count > overflow) | 4688 | if (count > overflow) |
