diff options
| author | Theodore Ts'o <tytso@mit.edu> | 2011-01-10 12:13:26 -0500 |
|---|---|---|
| committer | Theodore Ts'o <tytso@mit.edu> | 2011-01-10 12:13:26 -0500 |
| commit | b05e6ae58a13b56e3e11882c1fc71948c9b29760 (patch) | |
| tree | 452cbadcbc8091b4db95f917f28b0f9de845dabf /fs/ext4/extents.c | |
| parent | 01f49d0b9d0209dc1194255b11601e4b94447b36 (diff) | |
ext4: drop ec_type from the ext4_ext_cache structure
We can encode the ec_type information by using ee_len == 0 to denote
EXT4_EXT_CACHE_NO, ee_start == 0 to denote EXT4_EXT_CACHE_GAP, and if
neither is true, then the cache type must be EXT4_EXT_CACHE_EXTENT.
This allows us to reduce the size of ext4_ext_inode by another 8
bytes. (ec_type is 4 bytes, plus another 4 bytes of padding)
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/extents.c')
| -rw-r--r-- | fs/ext4/extents.c | 37 |
1 files changed, 15 insertions, 22 deletions
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index f1a4354ea3cf..9081d1060a5f 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -1894,12 +1894,10 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, | |||
| 1894 | cbex.ec_block = start; | 1894 | cbex.ec_block = start; |
| 1895 | cbex.ec_len = end - start; | 1895 | cbex.ec_len = end - start; |
| 1896 | cbex.ec_start = 0; | 1896 | cbex.ec_start = 0; |
| 1897 | cbex.ec_type = EXT4_EXT_CACHE_GAP; | ||
| 1898 | } else { | 1897 | } else { |
| 1899 | cbex.ec_block = le32_to_cpu(ex->ee_block); | 1898 | cbex.ec_block = le32_to_cpu(ex->ee_block); |
| 1900 | cbex.ec_len = ext4_ext_get_actual_len(ex); | 1899 | cbex.ec_len = ext4_ext_get_actual_len(ex); |
| 1901 | cbex.ec_start = ext4_ext_pblock(ex); | 1900 | cbex.ec_start = ext4_ext_pblock(ex); |
| 1902 | cbex.ec_type = EXT4_EXT_CACHE_EXTENT; | ||
| 1903 | } | 1901 | } |
| 1904 | 1902 | ||
| 1905 | if (unlikely(cbex.ec_len == 0)) { | 1903 | if (unlikely(cbex.ec_len == 0)) { |
| @@ -1939,13 +1937,12 @@ static int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block, | |||
| 1939 | 1937 | ||
| 1940 | static void | 1938 | static void |
| 1941 | ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, | 1939 | ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block, |
| 1942 | __u32 len, ext4_fsblk_t start, int type) | 1940 | __u32 len, ext4_fsblk_t start) |
| 1943 | { | 1941 | { |
| 1944 | struct ext4_ext_cache *cex; | 1942 | struct ext4_ext_cache *cex; |
| 1945 | BUG_ON(len == 0); | 1943 | BUG_ON(len == 0); |
| 1946 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 1944 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1947 | cex = &EXT4_I(inode)->i_cached_extent; | 1945 | cex = &EXT4_I(inode)->i_cached_extent; |
| 1948 | cex->ec_type = type; | ||
| 1949 | cex->ec_block = block; | 1946 | cex->ec_block = block; |
| 1950 | cex->ec_len = len; | 1947 | cex->ec_len = len; |
| 1951 | cex->ec_start = start; | 1948 | cex->ec_start = start; |
| @@ -1998,15 +1995,18 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, | |||
| 1998 | } | 1995 | } |
| 1999 | 1996 | ||
| 2000 | ext_debug(" -> %u:%lu\n", lblock, len); | 1997 | ext_debug(" -> %u:%lu\n", lblock, len); |
| 2001 | ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP); | 1998 | ext4_ext_put_in_cache(inode, lblock, len, 0); |
| 2002 | } | 1999 | } |
| 2003 | 2000 | ||
| 2001 | /* | ||
| 2002 | * Return 0 if cache is invalid; 1 if the cache is valid | ||
| 2003 | */ | ||
| 2004 | static int | 2004 | static int |
| 2005 | ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | 2005 | ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, |
| 2006 | struct ext4_extent *ex) | 2006 | struct ext4_extent *ex) |
| 2007 | { | 2007 | { |
| 2008 | struct ext4_ext_cache *cex; | 2008 | struct ext4_ext_cache *cex; |
| 2009 | int ret = EXT4_EXT_CACHE_NO; | 2009 | int ret = 0; |
| 2010 | 2010 | ||
| 2011 | /* | 2011 | /* |
| 2012 | * We borrow i_block_reservation_lock to protect i_cached_extent | 2012 | * We borrow i_block_reservation_lock to protect i_cached_extent |
| @@ -2015,11 +2015,9 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | |||
| 2015 | cex = &EXT4_I(inode)->i_cached_extent; | 2015 | cex = &EXT4_I(inode)->i_cached_extent; |
| 2016 | 2016 | ||
| 2017 | /* has cache valid data? */ | 2017 | /* has cache valid data? */ |
| 2018 | if (cex->ec_type == EXT4_EXT_CACHE_NO) | 2018 | if (cex->ec_len == 0) |
| 2019 | goto errout; | 2019 | goto errout; |
| 2020 | 2020 | ||
| 2021 | BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP && | ||
| 2022 | cex->ec_type != EXT4_EXT_CACHE_EXTENT); | ||
| 2023 | if (in_range(block, cex->ec_block, cex->ec_len)) { | 2021 | if (in_range(block, cex->ec_block, cex->ec_len)) { |
| 2024 | ex->ee_block = cpu_to_le32(cex->ec_block); | 2022 | ex->ee_block = cpu_to_le32(cex->ec_block); |
| 2025 | ext4_ext_store_pblock(ex, cex->ec_start); | 2023 | ext4_ext_store_pblock(ex, cex->ec_start); |
| @@ -2027,7 +2025,7 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | |||
| 2027 | ext_debug("%u cached by %u:%u:%llu\n", | 2025 | ext_debug("%u cached by %u:%u:%llu\n", |
| 2028 | block, | 2026 | block, |
| 2029 | cex->ec_block, cex->ec_len, cex->ec_start); | 2027 | cex->ec_block, cex->ec_len, cex->ec_start); |
| 2030 | ret = cex->ec_type; | 2028 | ret = 1; |
| 2031 | } | 2029 | } |
| 2032 | errout: | 2030 | errout: |
| 2033 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 2031 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| @@ -3298,7 +3296,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 3298 | struct ext4_extent_header *eh; | 3296 | struct ext4_extent_header *eh; |
| 3299 | struct ext4_extent newex, *ex; | 3297 | struct ext4_extent newex, *ex; |
| 3300 | ext4_fsblk_t newblock; | 3298 | ext4_fsblk_t newblock; |
| 3301 | int err = 0, depth, ret, cache_type; | 3299 | int err = 0, depth, ret; |
| 3302 | unsigned int allocated = 0; | 3300 | unsigned int allocated = 0; |
| 3303 | struct ext4_allocation_request ar; | 3301 | struct ext4_allocation_request ar; |
| 3304 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; | 3302 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
| @@ -3307,9 +3305,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 3307 | map->m_lblk, map->m_len, inode->i_ino); | 3305 | map->m_lblk, map->m_len, inode->i_ino); |
| 3308 | 3306 | ||
| 3309 | /* check in cache */ | 3307 | /* check in cache */ |
| 3310 | cache_type = ext4_ext_in_cache(inode, map->m_lblk, &newex); | 3308 | if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { |
| 3311 | if (cache_type) { | 3309 | if (!newex.ee_start_lo && !newex.ee_start_hi) { |
| 3312 | if (cache_type == EXT4_EXT_CACHE_GAP) { | ||
| 3313 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | 3310 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
| 3314 | /* | 3311 | /* |
| 3315 | * block isn't allocated yet and | 3312 | * block isn't allocated yet and |
| @@ -3318,7 +3315,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 3318 | goto out2; | 3315 | goto out2; |
| 3319 | } | 3316 | } |
| 3320 | /* we should allocate requested block */ | 3317 | /* we should allocate requested block */ |
| 3321 | } else if (cache_type == EXT4_EXT_CACHE_EXTENT) { | 3318 | } else { |
| 3322 | /* block is already allocated */ | 3319 | /* block is already allocated */ |
| 3323 | newblock = map->m_lblk | 3320 | newblock = map->m_lblk |
| 3324 | - le32_to_cpu(newex.ee_block) | 3321 | - le32_to_cpu(newex.ee_block) |
| @@ -3327,8 +3324,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 3327 | allocated = ext4_ext_get_actual_len(&newex) - | 3324 | allocated = ext4_ext_get_actual_len(&newex) - |
| 3328 | (map->m_lblk - le32_to_cpu(newex.ee_block)); | 3325 | (map->m_lblk - le32_to_cpu(newex.ee_block)); |
| 3329 | goto out; | 3326 | goto out; |
| 3330 | } else { | ||
| 3331 | BUG(); | ||
| 3332 | } | 3327 | } |
| 3333 | } | 3328 | } |
| 3334 | 3329 | ||
| @@ -3379,8 +3374,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 3379 | /* Do not put uninitialized extent in the cache */ | 3374 | /* Do not put uninitialized extent in the cache */ |
| 3380 | if (!ext4_ext_is_uninitialized(ex)) { | 3375 | if (!ext4_ext_is_uninitialized(ex)) { |
| 3381 | ext4_ext_put_in_cache(inode, ee_block, | 3376 | ext4_ext_put_in_cache(inode, ee_block, |
| 3382 | ee_len, ee_start, | 3377 | ee_len, ee_start); |
| 3383 | EXT4_EXT_CACHE_EXTENT); | ||
| 3384 | goto out; | 3378 | goto out; |
| 3385 | } | 3379 | } |
| 3386 | ret = ext4_ext_handle_uninitialized_extents(handle, | 3380 | ret = ext4_ext_handle_uninitialized_extents(handle, |
| @@ -3512,8 +3506,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
| 3512 | * when it is _not_ an uninitialized extent. | 3506 | * when it is _not_ an uninitialized extent. |
| 3513 | */ | 3507 | */ |
| 3514 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { | 3508 | if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) { |
| 3515 | ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock, | 3509 | ext4_ext_put_in_cache(inode, map->m_lblk, allocated, newblock); |
| 3516 | EXT4_EXT_CACHE_EXTENT); | ||
| 3517 | ext4_update_inode_fsync_trans(handle, inode, 1); | 3510 | ext4_update_inode_fsync_trans(handle, inode, 1); |
| 3518 | } else | 3511 | } else |
| 3519 | ext4_update_inode_fsync_trans(handle, inode, 0); | 3512 | ext4_update_inode_fsync_trans(handle, inode, 0); |
| @@ -3789,7 +3782,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, | |||
| 3789 | 3782 | ||
| 3790 | logical = (__u64)newex->ec_block << blksize_bits; | 3783 | logical = (__u64)newex->ec_block << blksize_bits; |
| 3791 | 3784 | ||
| 3792 | if (newex->ec_type == EXT4_EXT_CACHE_GAP) { | 3785 | if (newex->ec_start == 0) { |
| 3793 | pgoff_t offset; | 3786 | pgoff_t offset; |
| 3794 | struct page *page; | 3787 | struct page *page; |
| 3795 | struct buffer_head *bh = NULL; | 3788 | struct buffer_head *bh = NULL; |
