diff options
author | Zheng Liu <wenqing.lz@taobao.com> | 2012-11-08 21:57:32 -0500 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2012-11-08 21:57:32 -0500 |
commit | 51865fda28e585bdcc164474ff6438a9ccdbfada (patch) | |
tree | c4450c21bdbce0750543d8b1cd59992fb342a650 /fs/ext4/inode.c | |
parent | 9a26b66175e1c221f39bbe09e2e1d0a31a14ba6d (diff) |
ext4: let ext4 maintain extent status tree
This patch lets ext4 maintain extent status tree.
Currently it only tracks delay extent status in extent status tree. When a
delay allocation is issued, the related delay extent will be inserted into
extent status tree. When a delay extent is written out or invalidated, it will
be removed from this tree.
Signed-off-by: Yongqiang Yang <xiaoqiangnk@gmail.com>
Signed-off-by: Allison Henderson <achender@linux.vnet.ibm.com>
Signed-off-by: Zheng Liu <wenqing.lz@taobao.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 38 |
1 files changed, 35 insertions, 3 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f84bfd6d1867..1e92349272e0 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -574,7 +574,16 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
574 | up_read((&EXT4_I(inode)->i_data_sem)); | 574 | up_read((&EXT4_I(inode)->i_data_sem)); |
575 | 575 | ||
576 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { | 576 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
577 | int ret = check_block_validity(inode, map); | 577 | int ret; |
578 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) { | ||
579 | /* delayed alloc may be allocated by fallocate and | ||
580 | * coverted to initialized by directIO. | ||
581 | * we need to handle delayed extent here. | ||
582 | */ | ||
583 | down_write((&EXT4_I(inode)->i_data_sem)); | ||
584 | goto delayed_mapped; | ||
585 | } | ||
586 | ret = check_block_validity(inode, map); | ||
578 | if (ret != 0) | 587 | if (ret != 0) |
579 | return ret; | 588 | return ret; |
580 | } | 589 | } |
@@ -656,8 +665,16 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
656 | * set the BH_Da_Mapped bit on them. Its important to do this | 665 | * set the BH_Da_Mapped bit on them. Its important to do this |
657 | * under the protection of i_data_sem. | 666 | * under the protection of i_data_sem. |
658 | */ | 667 | */ |
659 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) | 668 | if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) { |
669 | int ret; | ||
660 | set_buffers_da_mapped(inode, map); | 670 | set_buffers_da_mapped(inode, map); |
671 | delayed_mapped: | ||
672 | /* delayed allocation blocks has been allocated */ | ||
673 | ret = ext4_es_remove_extent(inode, map->m_lblk, | ||
674 | map->m_len); | ||
675 | if (ret < 0) | ||
676 | retval = ret; | ||
677 | } | ||
661 | } | 678 | } |
662 | 679 | ||
663 | up_write((&EXT4_I(inode)->i_data_sem)); | 680 | up_write((&EXT4_I(inode)->i_data_sem)); |
@@ -1303,6 +1320,7 @@ static void ext4_da_page_release_reservation(struct page *page, | |||
1303 | struct inode *inode = page->mapping->host; | 1320 | struct inode *inode = page->mapping->host; |
1304 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1321 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
1305 | int num_clusters; | 1322 | int num_clusters; |
1323 | ext4_fsblk_t lblk; | ||
1306 | 1324 | ||
1307 | head = page_buffers(page); | 1325 | head = page_buffers(page); |
1308 | bh = head; | 1326 | bh = head; |
@@ -1317,11 +1335,15 @@ static void ext4_da_page_release_reservation(struct page *page, | |||
1317 | curr_off = next_off; | 1335 | curr_off = next_off; |
1318 | } while ((bh = bh->b_this_page) != head); | 1336 | } while ((bh = bh->b_this_page) != head); |
1319 | 1337 | ||
1338 | if (to_release) { | ||
1339 | lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
1340 | ext4_es_remove_extent(inode, lblk, to_release); | ||
1341 | } | ||
1342 | |||
1320 | /* If we have released all the blocks belonging to a cluster, then we | 1343 | /* If we have released all the blocks belonging to a cluster, then we |
1321 | * need to release the reserved space for that cluster. */ | 1344 | * need to release the reserved space for that cluster. */ |
1322 | num_clusters = EXT4_NUM_B2C(sbi, to_release); | 1345 | num_clusters = EXT4_NUM_B2C(sbi, to_release); |
1323 | while (num_clusters > 0) { | 1346 | while (num_clusters > 0) { |
1324 | ext4_fsblk_t lblk; | ||
1325 | lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + | 1347 | lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + |
1326 | ((num_clusters - 1) << sbi->s_cluster_bits); | 1348 | ((num_clusters - 1) << sbi->s_cluster_bits); |
1327 | if (sbi->s_cluster_ratio == 1 || | 1349 | if (sbi->s_cluster_ratio == 1 || |
@@ -1502,9 +1524,15 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd) | |||
1502 | struct pagevec pvec; | 1524 | struct pagevec pvec; |
1503 | struct inode *inode = mpd->inode; | 1525 | struct inode *inode = mpd->inode; |
1504 | struct address_space *mapping = inode->i_mapping; | 1526 | struct address_space *mapping = inode->i_mapping; |
1527 | ext4_lblk_t start, last; | ||
1505 | 1528 | ||
1506 | index = mpd->first_page; | 1529 | index = mpd->first_page; |
1507 | end = mpd->next_page - 1; | 1530 | end = mpd->next_page - 1; |
1531 | |||
1532 | start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
1533 | last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); | ||
1534 | ext4_es_remove_extent(inode, start, last - start + 1); | ||
1535 | |||
1508 | while (index <= end) { | 1536 | while (index <= end) { |
1509 | nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); | 1537 | nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); |
1510 | if (nr_pages == 0) | 1538 | if (nr_pages == 0) |
@@ -1816,6 +1844,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock, | |||
1816 | goto out_unlock; | 1844 | goto out_unlock; |
1817 | } | 1845 | } |
1818 | 1846 | ||
1847 | retval = ext4_es_insert_extent(inode, map->m_lblk, map->m_len); | ||
1848 | if (retval) | ||
1849 | goto out_unlock; | ||
1850 | |||
1819 | /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served | 1851 | /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served |
1820 | * and it should not appear on the bh->b_state. | 1852 | * and it should not appear on the bh->b_state. |
1821 | */ | 1853 | */ |