diff options
Diffstat (limited to 'fs/ext4/inode.c')
-rw-r--r-- | fs/ext4/inode.c | 309 |
1 files changed, 227 insertions, 82 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 98d3fe7057ef..a6444cee0c7e 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -72,12 +72,17 @@ static int ext4_inode_is_fast_symlink(struct inode *inode) | |||
72 | * "bh" may be NULL: a metadata block may have been freed from memory | 72 | * "bh" may be NULL: a metadata block may have been freed from memory |
73 | * but there may still be a record of it in the journal, and that record | 73 | * but there may still be a record of it in the journal, and that record |
74 | * still needs to be revoked. | 74 | * still needs to be revoked. |
75 | * | ||
76 | * If the handle isn't valid we're not journaling so there's nothing to do. | ||
75 | */ | 77 | */ |
76 | int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, | 78 | int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode, |
77 | struct buffer_head *bh, ext4_fsblk_t blocknr) | 79 | struct buffer_head *bh, ext4_fsblk_t blocknr) |
78 | { | 80 | { |
79 | int err; | 81 | int err; |
80 | 82 | ||
83 | if (!ext4_handle_valid(handle)) | ||
84 | return 0; | ||
85 | |||
81 | might_sleep(); | 86 | might_sleep(); |
82 | 87 | ||
83 | BUFFER_TRACE(bh, "enter"); | 88 | BUFFER_TRACE(bh, "enter"); |
@@ -170,7 +175,9 @@ static handle_t *start_transaction(struct inode *inode) | |||
170 | */ | 175 | */ |
171 | static int try_to_extend_transaction(handle_t *handle, struct inode *inode) | 176 | static int try_to_extend_transaction(handle_t *handle, struct inode *inode) |
172 | { | 177 | { |
173 | if (handle->h_buffer_credits > EXT4_RESERVE_TRANS_BLOCKS) | 178 | if (!ext4_handle_valid(handle)) |
179 | return 0; | ||
180 | if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1)) | ||
174 | return 0; | 181 | return 0; |
175 | if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) | 182 | if (!ext4_journal_extend(handle, blocks_for_truncate(inode))) |
176 | return 0; | 183 | return 0; |
@@ -184,6 +191,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode) | |||
184 | */ | 191 | */ |
185 | static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) | 192 | static int ext4_journal_test_restart(handle_t *handle, struct inode *inode) |
186 | { | 193 | { |
194 | BUG_ON(EXT4_JOURNAL(inode) == NULL); | ||
187 | jbd_debug(2, "restarting handle %p\n", handle); | 195 | jbd_debug(2, "restarting handle %p\n", handle); |
188 | return ext4_journal_restart(handle, blocks_for_truncate(inode)); | 196 | return ext4_journal_restart(handle, blocks_for_truncate(inode)); |
189 | } | 197 | } |
@@ -216,7 +224,7 @@ void ext4_delete_inode(struct inode *inode) | |||
216 | } | 224 | } |
217 | 225 | ||
218 | if (IS_SYNC(inode)) | 226 | if (IS_SYNC(inode)) |
219 | handle->h_sync = 1; | 227 | ext4_handle_sync(handle); |
220 | inode->i_size = 0; | 228 | inode->i_size = 0; |
221 | err = ext4_mark_inode_dirty(handle, inode); | 229 | err = ext4_mark_inode_dirty(handle, inode); |
222 | if (err) { | 230 | if (err) { |
@@ -233,7 +241,7 @@ void ext4_delete_inode(struct inode *inode) | |||
233 | * enough credits left in the handle to remove the inode from | 241 | * enough credits left in the handle to remove the inode from |
234 | * the orphan list and set the dtime field. | 242 | * the orphan list and set the dtime field. |
235 | */ | 243 | */ |
236 | if (handle->h_buffer_credits < 3) { | 244 | if (!ext4_handle_has_enough_credits(handle, 3)) { |
237 | err = ext4_journal_extend(handle, 3); | 245 | err = ext4_journal_extend(handle, 3); |
238 | if (err > 0) | 246 | if (err > 0) |
239 | err = ext4_journal_restart(handle, 3); | 247 | err = ext4_journal_restart(handle, 3); |
@@ -506,10 +514,10 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block, | |||
506 | * return the total number of blocks to be allocate, including the | 514 | * return the total number of blocks to be allocate, including the |
507 | * direct and indirect blocks. | 515 | * direct and indirect blocks. |
508 | */ | 516 | */ |
509 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks, | 517 | static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, |
510 | int blocks_to_boundary) | 518 | int blocks_to_boundary) |
511 | { | 519 | { |
512 | unsigned long count = 0; | 520 | unsigned int count = 0; |
513 | 521 | ||
514 | /* | 522 | /* |
515 | * Simple case, [t,d]Indirect block(s) has not allocated yet | 523 | * Simple case, [t,d]Indirect block(s) has not allocated yet |
@@ -547,6 +555,7 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
547 | int indirect_blks, int blks, | 555 | int indirect_blks, int blks, |
548 | ext4_fsblk_t new_blocks[4], int *err) | 556 | ext4_fsblk_t new_blocks[4], int *err) |
549 | { | 557 | { |
558 | struct ext4_allocation_request ar; | ||
550 | int target, i; | 559 | int target, i; |
551 | unsigned long count = 0, blk_allocated = 0; | 560 | unsigned long count = 0, blk_allocated = 0; |
552 | int index = 0; | 561 | int index = 0; |
@@ -595,10 +604,17 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
595 | if (!target) | 604 | if (!target) |
596 | goto allocated; | 605 | goto allocated; |
597 | /* Now allocate data blocks */ | 606 | /* Now allocate data blocks */ |
598 | count = target; | 607 | memset(&ar, 0, sizeof(ar)); |
599 | /* allocating blocks for data blocks */ | 608 | ar.inode = inode; |
600 | current_block = ext4_new_blocks(handle, inode, iblock, | 609 | ar.goal = goal; |
601 | goal, &count, err); | 610 | ar.len = target; |
611 | ar.logical = iblock; | ||
612 | if (S_ISREG(inode->i_mode)) | ||
613 | /* enable in-core preallocation only for regular files */ | ||
614 | ar.flags = EXT4_MB_HINT_DATA; | ||
615 | |||
616 | current_block = ext4_mb_new_blocks(handle, &ar, err); | ||
617 | |||
602 | if (*err && (target == blks)) { | 618 | if (*err && (target == blks)) { |
603 | /* | 619 | /* |
604 | * if the allocation failed and we didn't allocate | 620 | * if the allocation failed and we didn't allocate |
@@ -614,7 +630,7 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
614 | */ | 630 | */ |
615 | new_blocks[index] = current_block; | 631 | new_blocks[index] = current_block; |
616 | } | 632 | } |
617 | blk_allocated += count; | 633 | blk_allocated += ar.len; |
618 | } | 634 | } |
619 | allocated: | 635 | allocated: |
620 | /* total number of blocks allocated for direct blocks */ | 636 | /* total number of blocks allocated for direct blocks */ |
@@ -709,8 +725,8 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
709 | set_buffer_uptodate(bh); | 725 | set_buffer_uptodate(bh); |
710 | unlock_buffer(bh); | 726 | unlock_buffer(bh); |
711 | 727 | ||
712 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); | 728 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
713 | err = ext4_journal_dirty_metadata(handle, bh); | 729 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
714 | if (err) | 730 | if (err) |
715 | goto failed; | 731 | goto failed; |
716 | } | 732 | } |
@@ -792,8 +808,8 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, | |||
792 | * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. | 808 | * generic_commit_write->__mark_inode_dirty->ext4_dirty_inode. |
793 | */ | 809 | */ |
794 | jbd_debug(5, "splicing indirect only\n"); | 810 | jbd_debug(5, "splicing indirect only\n"); |
795 | BUFFER_TRACE(where->bh, "call ext4_journal_dirty_metadata"); | 811 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); |
796 | err = ext4_journal_dirty_metadata(handle, where->bh); | 812 | err = ext4_handle_dirty_metadata(handle, inode, where->bh); |
797 | if (err) | 813 | if (err) |
798 | goto err_out; | 814 | goto err_out; |
799 | } else { | 815 | } else { |
@@ -840,10 +856,10 @@ err_out: | |||
840 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block | 856 | * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block |
841 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) | 857 | * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem) |
842 | */ | 858 | */ |
843 | int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, | 859 | static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode, |
844 | ext4_lblk_t iblock, unsigned long maxblocks, | 860 | ext4_lblk_t iblock, unsigned int maxblocks, |
845 | struct buffer_head *bh_result, | 861 | struct buffer_head *bh_result, |
846 | int create, int extend_disksize) | 862 | int create, int extend_disksize) |
847 | { | 863 | { |
848 | int err = -EIO; | 864 | int err = -EIO; |
849 | ext4_lblk_t offsets[4]; | 865 | ext4_lblk_t offsets[4]; |
@@ -1045,7 +1061,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
1045 | * It returns the error in case of allocation failure. | 1061 | * It returns the error in case of allocation failure. |
1046 | */ | 1062 | */ |
1047 | int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, | 1063 | int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block, |
1048 | unsigned long max_blocks, struct buffer_head *bh, | 1064 | unsigned int max_blocks, struct buffer_head *bh, |
1049 | int create, int extend_disksize, int flag) | 1065 | int create, int extend_disksize, int flag) |
1050 | { | 1066 | { |
1051 | int retval; | 1067 | int retval; |
@@ -1221,8 +1237,8 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, | |||
1221 | set_buffer_uptodate(bh); | 1237 | set_buffer_uptodate(bh); |
1222 | } | 1238 | } |
1223 | unlock_buffer(bh); | 1239 | unlock_buffer(bh); |
1224 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); | 1240 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
1225 | err = ext4_journal_dirty_metadata(handle, bh); | 1241 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
1226 | if (!fatal) | 1242 | if (!fatal) |
1227 | fatal = err; | 1243 | fatal = err; |
1228 | } else { | 1244 | } else { |
@@ -1335,6 +1351,10 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, | |||
1335 | pgoff_t index; | 1351 | pgoff_t index; |
1336 | unsigned from, to; | 1352 | unsigned from, to; |
1337 | 1353 | ||
1354 | trace_mark(ext4_write_begin, | ||
1355 | "dev %s ino %lu pos %llu len %u flags %u", | ||
1356 | inode->i_sb->s_id, inode->i_ino, | ||
1357 | (unsigned long long) pos, len, flags); | ||
1338 | index = pos >> PAGE_CACHE_SHIFT; | 1358 | index = pos >> PAGE_CACHE_SHIFT; |
1339 | from = pos & (PAGE_CACHE_SIZE - 1); | 1359 | from = pos & (PAGE_CACHE_SIZE - 1); |
1340 | to = from + len; | 1360 | to = from + len; |
@@ -1387,7 +1407,7 @@ static int write_end_fn(handle_t *handle, struct buffer_head *bh) | |||
1387 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 1407 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
1388 | return 0; | 1408 | return 0; |
1389 | set_buffer_uptodate(bh); | 1409 | set_buffer_uptodate(bh); |
1390 | return ext4_journal_dirty_metadata(handle, bh); | 1410 | return ext4_handle_dirty_metadata(handle, NULL, bh); |
1391 | } | 1411 | } |
1392 | 1412 | ||
1393 | /* | 1413 | /* |
@@ -1406,6 +1426,10 @@ static int ext4_ordered_write_end(struct file *file, | |||
1406 | struct inode *inode = mapping->host; | 1426 | struct inode *inode = mapping->host; |
1407 | int ret = 0, ret2; | 1427 | int ret = 0, ret2; |
1408 | 1428 | ||
1429 | trace_mark(ext4_ordered_write_end, | ||
1430 | "dev %s ino %lu pos %llu len %u copied %u", | ||
1431 | inode->i_sb->s_id, inode->i_ino, | ||
1432 | (unsigned long long) pos, len, copied); | ||
1409 | ret = ext4_jbd2_file_inode(handle, inode); | 1433 | ret = ext4_jbd2_file_inode(handle, inode); |
1410 | 1434 | ||
1411 | if (ret == 0) { | 1435 | if (ret == 0) { |
@@ -1444,6 +1468,10 @@ static int ext4_writeback_write_end(struct file *file, | |||
1444 | int ret = 0, ret2; | 1468 | int ret = 0, ret2; |
1445 | loff_t new_i_size; | 1469 | loff_t new_i_size; |
1446 | 1470 | ||
1471 | trace_mark(ext4_writeback_write_end, | ||
1472 | "dev %s ino %lu pos %llu len %u copied %u", | ||
1473 | inode->i_sb->s_id, inode->i_ino, | ||
1474 | (unsigned long long) pos, len, copied); | ||
1447 | new_i_size = pos + copied; | 1475 | new_i_size = pos + copied; |
1448 | if (new_i_size > EXT4_I(inode)->i_disksize) { | 1476 | if (new_i_size > EXT4_I(inode)->i_disksize) { |
1449 | ext4_update_i_disksize(inode, new_i_size); | 1477 | ext4_update_i_disksize(inode, new_i_size); |
@@ -1479,6 +1507,10 @@ static int ext4_journalled_write_end(struct file *file, | |||
1479 | unsigned from, to; | 1507 | unsigned from, to; |
1480 | loff_t new_i_size; | 1508 | loff_t new_i_size; |
1481 | 1509 | ||
1510 | trace_mark(ext4_journalled_write_end, | ||
1511 | "dev %s ino %lu pos %llu len %u copied %u", | ||
1512 | inode->i_sb->s_id, inode->i_ino, | ||
1513 | (unsigned long long) pos, len, copied); | ||
1482 | from = pos & (PAGE_CACHE_SIZE - 1); | 1514 | from = pos & (PAGE_CACHE_SIZE - 1); |
1483 | to = from + len; | 1515 | to = from + len; |
1484 | 1516 | ||
@@ -1625,7 +1657,7 @@ struct mpage_da_data { | |||
1625 | get_block_t *get_block; | 1657 | get_block_t *get_block; |
1626 | struct writeback_control *wbc; | 1658 | struct writeback_control *wbc; |
1627 | int io_done; | 1659 | int io_done; |
1628 | long pages_written; | 1660 | int pages_written; |
1629 | int retval; | 1661 | int retval; |
1630 | }; | 1662 | }; |
1631 | 1663 | ||
@@ -1645,35 +1677,39 @@ struct mpage_da_data { | |||
1645 | */ | 1677 | */ |
1646 | static int mpage_da_submit_io(struct mpage_da_data *mpd) | 1678 | static int mpage_da_submit_io(struct mpage_da_data *mpd) |
1647 | { | 1679 | { |
1648 | struct address_space *mapping = mpd->inode->i_mapping; | ||
1649 | int ret = 0, err, nr_pages, i; | ||
1650 | unsigned long index, end; | ||
1651 | struct pagevec pvec; | ||
1652 | long pages_skipped; | 1680 | long pages_skipped; |
1681 | struct pagevec pvec; | ||
1682 | unsigned long index, end; | ||
1683 | int ret = 0, err, nr_pages, i; | ||
1684 | struct inode *inode = mpd->inode; | ||
1685 | struct address_space *mapping = inode->i_mapping; | ||
1653 | 1686 | ||
1654 | BUG_ON(mpd->next_page <= mpd->first_page); | 1687 | BUG_ON(mpd->next_page <= mpd->first_page); |
1655 | pagevec_init(&pvec, 0); | 1688 | /* |
1689 | * We need to start from the first_page to the next_page - 1 | ||
1690 | * to make sure we also write the mapped dirty buffer_heads. | ||
1691 | * If we look at mpd->lbh.b_blocknr we would only be looking | ||
1692 | * at the currently mapped buffer_heads. | ||
1693 | */ | ||
1656 | index = mpd->first_page; | 1694 | index = mpd->first_page; |
1657 | end = mpd->next_page - 1; | 1695 | end = mpd->next_page - 1; |
1658 | 1696 | ||
1697 | pagevec_init(&pvec, 0); | ||
1659 | while (index <= end) { | 1698 | while (index <= end) { |
1660 | /* | 1699 | nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); |
1661 | * We can use PAGECACHE_TAG_DIRTY lookup here because | ||
1662 | * even though we have cleared the dirty flag on the page | ||
1663 | * We still keep the page in the radix tree with tag | ||
1664 | * PAGECACHE_TAG_DIRTY. See clear_page_dirty_for_io. | ||
1665 | * The PAGECACHE_TAG_DIRTY is cleared in set_page_writeback | ||
1666 | * which is called via the below writepage callback. | ||
1667 | */ | ||
1668 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | ||
1669 | PAGECACHE_TAG_DIRTY, | ||
1670 | min(end - index, | ||
1671 | (pgoff_t)PAGEVEC_SIZE-1) + 1); | ||
1672 | if (nr_pages == 0) | 1700 | if (nr_pages == 0) |
1673 | break; | 1701 | break; |
1674 | for (i = 0; i < nr_pages; i++) { | 1702 | for (i = 0; i < nr_pages; i++) { |
1675 | struct page *page = pvec.pages[i]; | 1703 | struct page *page = pvec.pages[i]; |
1676 | 1704 | ||
1705 | index = page->index; | ||
1706 | if (index > end) | ||
1707 | break; | ||
1708 | index++; | ||
1709 | |||
1710 | BUG_ON(!PageLocked(page)); | ||
1711 | BUG_ON(PageWriteback(page)); | ||
1712 | |||
1677 | pages_skipped = mpd->wbc->pages_skipped; | 1713 | pages_skipped = mpd->wbc->pages_skipped; |
1678 | err = mapping->a_ops->writepage(page, mpd->wbc); | 1714 | err = mapping->a_ops->writepage(page, mpd->wbc); |
1679 | if (!err && (pages_skipped == mpd->wbc->pages_skipped)) | 1715 | if (!err && (pages_skipped == mpd->wbc->pages_skipped)) |
@@ -1831,13 +1867,13 @@ static void ext4_print_free_blocks(struct inode *inode) | |||
1831 | ext4_count_free_blocks(inode->i_sb)); | 1867 | ext4_count_free_blocks(inode->i_sb)); |
1832 | printk(KERN_EMERG "Free/Dirty block details\n"); | 1868 | printk(KERN_EMERG "Free/Dirty block details\n"); |
1833 | printk(KERN_EMERG "free_blocks=%lld\n", | 1869 | printk(KERN_EMERG "free_blocks=%lld\n", |
1834 | percpu_counter_sum(&sbi->s_freeblocks_counter)); | 1870 | (long long)percpu_counter_sum(&sbi->s_freeblocks_counter)); |
1835 | printk(KERN_EMERG "dirty_blocks=%lld\n", | 1871 | printk(KERN_EMERG "dirty_blocks=%lld\n", |
1836 | percpu_counter_sum(&sbi->s_dirtyblocks_counter)); | 1872 | (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter)); |
1837 | printk(KERN_EMERG "Block reservation details\n"); | 1873 | printk(KERN_EMERG "Block reservation details\n"); |
1838 | printk(KERN_EMERG "i_reserved_data_blocks=%lu\n", | 1874 | printk(KERN_EMERG "i_reserved_data_blocks=%u\n", |
1839 | EXT4_I(inode)->i_reserved_data_blocks); | 1875 | EXT4_I(inode)->i_reserved_data_blocks); |
1840 | printk(KERN_EMERG "i_reserved_meta_blocks=%lu\n", | 1876 | printk(KERN_EMERG "i_reserved_meta_blocks=%u\n", |
1841 | EXT4_I(inode)->i_reserved_meta_blocks); | 1877 | EXT4_I(inode)->i_reserved_meta_blocks); |
1842 | return; | 1878 | return; |
1843 | } | 1879 | } |
@@ -2087,11 +2123,29 @@ static int __mpage_da_writepage(struct page *page, | |||
2087 | bh = head; | 2123 | bh = head; |
2088 | do { | 2124 | do { |
2089 | BUG_ON(buffer_locked(bh)); | 2125 | BUG_ON(buffer_locked(bh)); |
2126 | /* | ||
2127 | * We need to try to allocate | ||
2128 | * unmapped blocks in the same page. | ||
2129 | * Otherwise we won't make progress | ||
2130 | * with the page in ext4_da_writepage | ||
2131 | */ | ||
2090 | if (buffer_dirty(bh) && | 2132 | if (buffer_dirty(bh) && |
2091 | (!buffer_mapped(bh) || buffer_delay(bh))) { | 2133 | (!buffer_mapped(bh) || buffer_delay(bh))) { |
2092 | mpage_add_bh_to_extent(mpd, logical, bh); | 2134 | mpage_add_bh_to_extent(mpd, logical, bh); |
2093 | if (mpd->io_done) | 2135 | if (mpd->io_done) |
2094 | return MPAGE_DA_EXTENT_TAIL; | 2136 | return MPAGE_DA_EXTENT_TAIL; |
2137 | } else if (buffer_dirty(bh) && (buffer_mapped(bh))) { | ||
2138 | /* | ||
2139 | * mapped dirty buffer. We need to update | ||
2140 | * the b_state because we look at | ||
2141 | * b_state in mpage_da_map_blocks. We don't | ||
2142 | * update b_size because if we find an | ||
2143 | * unmapped buffer_head later we need to | ||
2144 | * use the b_state flag of that buffer_head. | ||
2145 | */ | ||
2146 | if (mpd->lbh.b_size == 0) | ||
2147 | mpd->lbh.b_state = | ||
2148 | bh->b_state & BH_FLAGS; | ||
2095 | } | 2149 | } |
2096 | logical++; | 2150 | logical++; |
2097 | } while ((bh = bh->b_this_page) != head); | 2151 | } while ((bh = bh->b_this_page) != head); |
@@ -2269,10 +2323,13 @@ static int ext4_da_writepage(struct page *page, | |||
2269 | { | 2323 | { |
2270 | int ret = 0; | 2324 | int ret = 0; |
2271 | loff_t size; | 2325 | loff_t size; |
2272 | unsigned long len; | 2326 | unsigned int len; |
2273 | struct buffer_head *page_bufs; | 2327 | struct buffer_head *page_bufs; |
2274 | struct inode *inode = page->mapping->host; | 2328 | struct inode *inode = page->mapping->host; |
2275 | 2329 | ||
2330 | trace_mark(ext4_da_writepage, | ||
2331 | "dev %s ino %lu page_index %lu", | ||
2332 | inode->i_sb->s_id, inode->i_ino, page->index); | ||
2276 | size = i_size_read(inode); | 2333 | size = i_size_read(inode); |
2277 | if (page->index == size >> PAGE_CACHE_SHIFT) | 2334 | if (page->index == size >> PAGE_CACHE_SHIFT) |
2278 | len = size & ~PAGE_CACHE_MASK; | 2335 | len = size & ~PAGE_CACHE_MASK; |
@@ -2378,10 +2435,25 @@ static int ext4_da_writepages(struct address_space *mapping, | |||
2378 | struct mpage_da_data mpd; | 2435 | struct mpage_da_data mpd; |
2379 | struct inode *inode = mapping->host; | 2436 | struct inode *inode = mapping->host; |
2380 | int no_nrwrite_index_update; | 2437 | int no_nrwrite_index_update; |
2381 | long pages_written = 0, pages_skipped; | 2438 | int pages_written = 0; |
2439 | long pages_skipped; | ||
2382 | int needed_blocks, ret = 0, nr_to_writebump = 0; | 2440 | int needed_blocks, ret = 0, nr_to_writebump = 0; |
2383 | struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); | 2441 | struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb); |
2384 | 2442 | ||
2443 | trace_mark(ext4_da_writepages, | ||
2444 | "dev %s ino %lu nr_t_write %ld " | ||
2445 | "pages_skipped %ld range_start %llu " | ||
2446 | "range_end %llu nonblocking %d " | ||
2447 | "for_kupdate %d for_reclaim %d " | ||
2448 | "for_writepages %d range_cyclic %d", | ||
2449 | inode->i_sb->s_id, inode->i_ino, | ||
2450 | wbc->nr_to_write, wbc->pages_skipped, | ||
2451 | (unsigned long long) wbc->range_start, | ||
2452 | (unsigned long long) wbc->range_end, | ||
2453 | wbc->nonblocking, wbc->for_kupdate, | ||
2454 | wbc->for_reclaim, wbc->for_writepages, | ||
2455 | wbc->range_cyclic); | ||
2456 | |||
2385 | /* | 2457 | /* |
2386 | * No pages to write? This is mainly a kludge to avoid starting | 2458 | * No pages to write? This is mainly a kludge to avoid starting |
2387 | * a transaction for special inodes like journal inode on last iput() | 2459 | * a transaction for special inodes like journal inode on last iput() |
@@ -2389,6 +2461,20 @@ static int ext4_da_writepages(struct address_space *mapping, | |||
2389 | */ | 2461 | */ |
2390 | if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) | 2462 | if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) |
2391 | return 0; | 2463 | return 0; |
2464 | |||
2465 | /* | ||
2466 | * If the filesystem has aborted, it is read-only, so return | ||
2467 | * right away instead of dumping stack traces later on that | ||
2468 | * will obscure the real source of the problem. We test | ||
2469 | * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because | ||
2470 | * the latter could be true if the filesystem is mounted | ||
2471 | * read-only, and in that case, ext4_da_writepages should | ||
2472 | * *never* be called, so if that ever happens, we would want | ||
2473 | * the stack trace. | ||
2474 | */ | ||
2475 | if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT)) | ||
2476 | return -EROFS; | ||
2477 | |||
2392 | /* | 2478 | /* |
2393 | * Make sure nr_to_write is >= sbi->s_mb_stream_request | 2479 | * Make sure nr_to_write is >= sbi->s_mb_stream_request |
2394 | * This make sure small files blocks are allocated in | 2480 | * This make sure small files blocks are allocated in |
@@ -2433,7 +2519,7 @@ static int ext4_da_writepages(struct address_space *mapping, | |||
2433 | handle = ext4_journal_start(inode, needed_blocks); | 2519 | handle = ext4_journal_start(inode, needed_blocks); |
2434 | if (IS_ERR(handle)) { | 2520 | if (IS_ERR(handle)) { |
2435 | ret = PTR_ERR(handle); | 2521 | ret = PTR_ERR(handle); |
2436 | printk(KERN_EMERG "%s: jbd2_start: " | 2522 | printk(KERN_CRIT "%s: jbd2_start: " |
2437 | "%ld pages, ino %lu; err %d\n", __func__, | 2523 | "%ld pages, ino %lu; err %d\n", __func__, |
2438 | wbc->nr_to_write, inode->i_ino, ret); | 2524 | wbc->nr_to_write, inode->i_ino, ret); |
2439 | dump_stack(); | 2525 | dump_stack(); |
@@ -2486,6 +2572,14 @@ out_writepages: | |||
2486 | if (!no_nrwrite_index_update) | 2572 | if (!no_nrwrite_index_update) |
2487 | wbc->no_nrwrite_index_update = 0; | 2573 | wbc->no_nrwrite_index_update = 0; |
2488 | wbc->nr_to_write -= nr_to_writebump; | 2574 | wbc->nr_to_write -= nr_to_writebump; |
2575 | trace_mark(ext4_da_writepage_result, | ||
2576 | "dev %s ino %lu ret %d pages_written %d " | ||
2577 | "pages_skipped %ld congestion %d " | ||
2578 | "more_io %d no_nrwrite_index_update %d", | ||
2579 | inode->i_sb->s_id, inode->i_ino, ret, | ||
2580 | pages_written, wbc->pages_skipped, | ||
2581 | wbc->encountered_congestion, wbc->more_io, | ||
2582 | wbc->no_nrwrite_index_update); | ||
2489 | return ret; | 2583 | return ret; |
2490 | } | 2584 | } |
2491 | 2585 | ||
@@ -2537,6 +2631,11 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, | |||
2537 | len, flags, pagep, fsdata); | 2631 | len, flags, pagep, fsdata); |
2538 | } | 2632 | } |
2539 | *fsdata = (void *)0; | 2633 | *fsdata = (void *)0; |
2634 | |||
2635 | trace_mark(ext4_da_write_begin, | ||
2636 | "dev %s ino %lu pos %llu len %u flags %u", | ||
2637 | inode->i_sb->s_id, inode->i_ino, | ||
2638 | (unsigned long long) pos, len, flags); | ||
2540 | retry: | 2639 | retry: |
2541 | /* | 2640 | /* |
2542 | * With delayed allocation, we don't log the i_disksize update | 2641 | * With delayed allocation, we don't log the i_disksize update |
@@ -2626,6 +2725,10 @@ static int ext4_da_write_end(struct file *file, | |||
2626 | } | 2725 | } |
2627 | } | 2726 | } |
2628 | 2727 | ||
2728 | trace_mark(ext4_da_write_end, | ||
2729 | "dev %s ino %lu pos %llu len %u copied %u", | ||
2730 | inode->i_sb->s_id, inode->i_ino, | ||
2731 | (unsigned long long) pos, len, copied); | ||
2629 | start = pos & (PAGE_CACHE_SIZE - 1); | 2732 | start = pos & (PAGE_CACHE_SIZE - 1); |
2630 | end = start + copied - 1; | 2733 | end = start + copied - 1; |
2631 | 2734 | ||
@@ -2718,7 +2821,10 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block) | |||
2718 | filemap_write_and_wait(mapping); | 2821 | filemap_write_and_wait(mapping); |
2719 | } | 2822 | } |
2720 | 2823 | ||
2721 | if (EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { | 2824 | BUG_ON(!EXT4_JOURNAL(inode) && |
2825 | EXT4_I(inode)->i_state & EXT4_STATE_JDATA); | ||
2826 | |||
2827 | if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) { | ||
2722 | /* | 2828 | /* |
2723 | * This is a REALLY heavyweight approach, but the use of | 2829 | * This is a REALLY heavyweight approach, but the use of |
2724 | * bmap on dirty files is expected to be extremely rare: | 2830 | * bmap on dirty files is expected to be extremely rare: |
@@ -2836,6 +2942,9 @@ static int ext4_normal_writepage(struct page *page, | |||
2836 | loff_t size = i_size_read(inode); | 2942 | loff_t size = i_size_read(inode); |
2837 | loff_t len; | 2943 | loff_t len; |
2838 | 2944 | ||
2945 | trace_mark(ext4_normal_writepage, | ||
2946 | "dev %s ino %lu page_index %lu", | ||
2947 | inode->i_sb->s_id, inode->i_ino, page->index); | ||
2839 | J_ASSERT(PageLocked(page)); | 2948 | J_ASSERT(PageLocked(page)); |
2840 | if (page->index == size >> PAGE_CACHE_SHIFT) | 2949 | if (page->index == size >> PAGE_CACHE_SHIFT) |
2841 | len = size & ~PAGE_CACHE_MASK; | 2950 | len = size & ~PAGE_CACHE_MASK; |
@@ -2921,6 +3030,9 @@ static int ext4_journalled_writepage(struct page *page, | |||
2921 | loff_t size = i_size_read(inode); | 3030 | loff_t size = i_size_read(inode); |
2922 | loff_t len; | 3031 | loff_t len; |
2923 | 3032 | ||
3033 | trace_mark(ext4_journalled_writepage, | ||
3034 | "dev %s ino %lu page_index %lu", | ||
3035 | inode->i_sb->s_id, inode->i_ino, page->index); | ||
2924 | J_ASSERT(PageLocked(page)); | 3036 | J_ASSERT(PageLocked(page)); |
2925 | if (page->index == size >> PAGE_CACHE_SHIFT) | 3037 | if (page->index == size >> PAGE_CACHE_SHIFT) |
2926 | len = size & ~PAGE_CACHE_MASK; | 3038 | len = size & ~PAGE_CACHE_MASK; |
@@ -2989,7 +3101,10 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset) | |||
2989 | if (offset == 0) | 3101 | if (offset == 0) |
2990 | ClearPageChecked(page); | 3102 | ClearPageChecked(page); |
2991 | 3103 | ||
2992 | jbd2_journal_invalidatepage(journal, page, offset); | 3104 | if (journal) |
3105 | jbd2_journal_invalidatepage(journal, page, offset); | ||
3106 | else | ||
3107 | block_invalidatepage(page, offset); | ||
2993 | } | 3108 | } |
2994 | 3109 | ||
2995 | static int ext4_releasepage(struct page *page, gfp_t wait) | 3110 | static int ext4_releasepage(struct page *page, gfp_t wait) |
@@ -2999,7 +3114,10 @@ static int ext4_releasepage(struct page *page, gfp_t wait) | |||
2999 | WARN_ON(PageChecked(page)); | 3114 | WARN_ON(PageChecked(page)); |
3000 | if (!page_has_buffers(page)) | 3115 | if (!page_has_buffers(page)) |
3001 | return 0; | 3116 | return 0; |
3002 | return jbd2_journal_try_to_free_buffers(journal, page, wait); | 3117 | if (journal) |
3118 | return jbd2_journal_try_to_free_buffers(journal, page, wait); | ||
3119 | else | ||
3120 | return try_to_free_buffers(page); | ||
3003 | } | 3121 | } |
3004 | 3122 | ||
3005 | /* | 3123 | /* |
@@ -3271,7 +3389,7 @@ int ext4_block_truncate_page(handle_t *handle, | |||
3271 | 3389 | ||
3272 | err = 0; | 3390 | err = 0; |
3273 | if (ext4_should_journal_data(inode)) { | 3391 | if (ext4_should_journal_data(inode)) { |
3274 | err = ext4_journal_dirty_metadata(handle, bh); | 3392 | err = ext4_handle_dirty_metadata(handle, inode, bh); |
3275 | } else { | 3393 | } else { |
3276 | if (ext4_should_order_data(inode)) | 3394 | if (ext4_should_order_data(inode)) |
3277 | err = ext4_jbd2_file_inode(handle, inode); | 3395 | err = ext4_jbd2_file_inode(handle, inode); |
@@ -3395,8 +3513,8 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode, | |||
3395 | __le32 *p; | 3513 | __le32 *p; |
3396 | if (try_to_extend_transaction(handle, inode)) { | 3514 | if (try_to_extend_transaction(handle, inode)) { |
3397 | if (bh) { | 3515 | if (bh) { |
3398 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); | 3516 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
3399 | ext4_journal_dirty_metadata(handle, bh); | 3517 | ext4_handle_dirty_metadata(handle, inode, bh); |
3400 | } | 3518 | } |
3401 | ext4_mark_inode_dirty(handle, inode); | 3519 | ext4_mark_inode_dirty(handle, inode); |
3402 | ext4_journal_test_restart(handle, inode); | 3520 | ext4_journal_test_restart(handle, inode); |
@@ -3496,7 +3614,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, | |||
3496 | count, block_to_free_p, p); | 3614 | count, block_to_free_p, p); |
3497 | 3615 | ||
3498 | if (this_bh) { | 3616 | if (this_bh) { |
3499 | BUFFER_TRACE(this_bh, "call ext4_journal_dirty_metadata"); | 3617 | BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); |
3500 | 3618 | ||
3501 | /* | 3619 | /* |
3502 | * The buffer head should have an attached journal head at this | 3620 | * The buffer head should have an attached journal head at this |
@@ -3505,7 +3623,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode, | |||
3505 | * the block was cleared. Check for this instead of OOPSing. | 3623 | * the block was cleared. Check for this instead of OOPSing. |
3506 | */ | 3624 | */ |
3507 | if (bh2jh(this_bh)) | 3625 | if (bh2jh(this_bh)) |
3508 | ext4_journal_dirty_metadata(handle, this_bh); | 3626 | ext4_handle_dirty_metadata(handle, inode, this_bh); |
3509 | else | 3627 | else |
3510 | ext4_error(inode->i_sb, __func__, | 3628 | ext4_error(inode->i_sb, __func__, |
3511 | "circular indirect block detected, " | 3629 | "circular indirect block detected, " |
@@ -3535,7 +3653,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
3535 | ext4_fsblk_t nr; | 3653 | ext4_fsblk_t nr; |
3536 | __le32 *p; | 3654 | __le32 *p; |
3537 | 3655 | ||
3538 | if (is_handle_aborted(handle)) | 3656 | if (ext4_handle_is_aborted(handle)) |
3539 | return; | 3657 | return; |
3540 | 3658 | ||
3541 | if (depth--) { | 3659 | if (depth--) { |
@@ -3605,7 +3723,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
3605 | * will merely complain about releasing a free block, | 3723 | * will merely complain about releasing a free block, |
3606 | * rather than leaking blocks. | 3724 | * rather than leaking blocks. |
3607 | */ | 3725 | */ |
3608 | if (is_handle_aborted(handle)) | 3726 | if (ext4_handle_is_aborted(handle)) |
3609 | return; | 3727 | return; |
3610 | if (try_to_extend_transaction(handle, inode)) { | 3728 | if (try_to_extend_transaction(handle, inode)) { |
3611 | ext4_mark_inode_dirty(handle, inode); | 3729 | ext4_mark_inode_dirty(handle, inode); |
@@ -3624,9 +3742,10 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
3624 | parent_bh)){ | 3742 | parent_bh)){ |
3625 | *p = 0; | 3743 | *p = 0; |
3626 | BUFFER_TRACE(parent_bh, | 3744 | BUFFER_TRACE(parent_bh, |
3627 | "call ext4_journal_dirty_metadata"); | 3745 | "call ext4_handle_dirty_metadata"); |
3628 | ext4_journal_dirty_metadata(handle, | 3746 | ext4_handle_dirty_metadata(handle, |
3629 | parent_bh); | 3747 | inode, |
3748 | parent_bh); | ||
3630 | } | 3749 | } |
3631 | } | 3750 | } |
3632 | } | 3751 | } |
@@ -3814,7 +3933,7 @@ do_indirects: | |||
3814 | * synchronous | 3933 | * synchronous |
3815 | */ | 3934 | */ |
3816 | if (IS_SYNC(inode)) | 3935 | if (IS_SYNC(inode)) |
3817 | handle->h_sync = 1; | 3936 | ext4_handle_sync(handle); |
3818 | out_stop: | 3937 | out_stop: |
3819 | /* | 3938 | /* |
3820 | * If this was a simple ftruncate(), and the file will remain alive | 3939 | * If this was a simple ftruncate(), and the file will remain alive |
@@ -3844,7 +3963,7 @@ static int __ext4_get_inode_loc(struct inode *inode, | |||
3844 | ext4_fsblk_t block; | 3963 | ext4_fsblk_t block; |
3845 | int inodes_per_block, inode_offset; | 3964 | int inodes_per_block, inode_offset; |
3846 | 3965 | ||
3847 | iloc->bh = 0; | 3966 | iloc->bh = NULL; |
3848 | if (!ext4_valid_inum(sb, inode->i_ino)) | 3967 | if (!ext4_valid_inum(sb, inode->i_ino)) |
3849 | return -EIO; | 3968 | return -EIO; |
3850 | 3969 | ||
@@ -3951,7 +4070,7 @@ make_io: | |||
3951 | num = EXT4_INODES_PER_GROUP(sb); | 4070 | num = EXT4_INODES_PER_GROUP(sb); |
3952 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, | 4071 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, |
3953 | EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) | 4072 | EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) |
3954 | num -= le16_to_cpu(gdp->bg_itable_unused); | 4073 | num -= ext4_itable_unused_count(sb, gdp); |
3955 | table += num / inodes_per_block; | 4074 | table += num / inodes_per_block; |
3956 | if (end > table) | 4075 | if (end > table) |
3957 | end = table; | 4076 | end = table; |
@@ -4313,8 +4432,8 @@ static int ext4_do_update_inode(handle_t *handle, | |||
4313 | EXT4_SET_RO_COMPAT_FEATURE(sb, | 4432 | EXT4_SET_RO_COMPAT_FEATURE(sb, |
4314 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); | 4433 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE); |
4315 | sb->s_dirt = 1; | 4434 | sb->s_dirt = 1; |
4316 | handle->h_sync = 1; | 4435 | ext4_handle_sync(handle); |
4317 | err = ext4_journal_dirty_metadata(handle, | 4436 | err = ext4_handle_dirty_metadata(handle, inode, |
4318 | EXT4_SB(sb)->s_sbh); | 4437 | EXT4_SB(sb)->s_sbh); |
4319 | } | 4438 | } |
4320 | } | 4439 | } |
@@ -4341,9 +4460,8 @@ static int ext4_do_update_inode(handle_t *handle, | |||
4341 | raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); | 4460 | raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); |
4342 | } | 4461 | } |
4343 | 4462 | ||
4344 | 4463 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | |
4345 | BUFFER_TRACE(bh, "call ext4_journal_dirty_metadata"); | 4464 | rc = ext4_handle_dirty_metadata(handle, inode, bh); |
4346 | rc = ext4_journal_dirty_metadata(handle, bh); | ||
4347 | if (!err) | 4465 | if (!err) |
4348 | err = rc; | 4466 | err = rc; |
4349 | ei->i_state &= ~EXT4_STATE_NEW; | 4467 | ei->i_state &= ~EXT4_STATE_NEW; |
@@ -4406,6 +4524,25 @@ int ext4_write_inode(struct inode *inode, int wait) | |||
4406 | return ext4_force_commit(inode->i_sb); | 4524 | return ext4_force_commit(inode->i_sb); |
4407 | } | 4525 | } |
4408 | 4526 | ||
4527 | int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh) | ||
4528 | { | ||
4529 | int err = 0; | ||
4530 | |||
4531 | mark_buffer_dirty(bh); | ||
4532 | if (inode && inode_needs_sync(inode)) { | ||
4533 | sync_dirty_buffer(bh); | ||
4534 | if (buffer_req(bh) && !buffer_uptodate(bh)) { | ||
4535 | ext4_error(inode->i_sb, __func__, | ||
4536 | "IO error syncing inode, " | ||
4537 | "inode=%lu, block=%llu", | ||
4538 | inode->i_ino, | ||
4539 | (unsigned long long)bh->b_blocknr); | ||
4540 | err = -EIO; | ||
4541 | } | ||
4542 | } | ||
4543 | return err; | ||
4544 | } | ||
4545 | |||
4409 | /* | 4546 | /* |
4410 | * ext4_setattr() | 4547 | * ext4_setattr() |
4411 | * | 4548 | * |
@@ -4710,16 +4847,15 @@ int | |||
4710 | ext4_reserve_inode_write(handle_t *handle, struct inode *inode, | 4847 | ext4_reserve_inode_write(handle_t *handle, struct inode *inode, |
4711 | struct ext4_iloc *iloc) | 4848 | struct ext4_iloc *iloc) |
4712 | { | 4849 | { |
4713 | int err = 0; | 4850 | int err; |
4714 | if (handle) { | 4851 | |
4715 | err = ext4_get_inode_loc(inode, iloc); | 4852 | err = ext4_get_inode_loc(inode, iloc); |
4716 | if (!err) { | 4853 | if (!err) { |
4717 | BUFFER_TRACE(iloc->bh, "get_write_access"); | 4854 | BUFFER_TRACE(iloc->bh, "get_write_access"); |
4718 | err = ext4_journal_get_write_access(handle, iloc->bh); | 4855 | err = ext4_journal_get_write_access(handle, iloc->bh); |
4719 | if (err) { | 4856 | if (err) { |
4720 | brelse(iloc->bh); | 4857 | brelse(iloc->bh); |
4721 | iloc->bh = NULL; | 4858 | iloc->bh = NULL; |
4722 | } | ||
4723 | } | 4859 | } |
4724 | } | 4860 | } |
4725 | ext4_std_error(inode->i_sb, err); | 4861 | ext4_std_error(inode->i_sb, err); |
@@ -4791,7 +4927,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
4791 | 4927 | ||
4792 | might_sleep(); | 4928 | might_sleep(); |
4793 | err = ext4_reserve_inode_write(handle, inode, &iloc); | 4929 | err = ext4_reserve_inode_write(handle, inode, &iloc); |
4794 | if (EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && | 4930 | if (ext4_handle_valid(handle) && |
4931 | EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize && | ||
4795 | !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { | 4932 | !(EXT4_I(inode)->i_state & EXT4_STATE_NO_EXPAND)) { |
4796 | /* | 4933 | /* |
4797 | * We need extra buffer credits since we may write into EA block | 4934 | * We need extra buffer credits since we may write into EA block |
@@ -4843,6 +4980,11 @@ void ext4_dirty_inode(struct inode *inode) | |||
4843 | handle_t *current_handle = ext4_journal_current_handle(); | 4980 | handle_t *current_handle = ext4_journal_current_handle(); |
4844 | handle_t *handle; | 4981 | handle_t *handle; |
4845 | 4982 | ||
4983 | if (!ext4_handle_valid(current_handle)) { | ||
4984 | ext4_mark_inode_dirty(current_handle, inode); | ||
4985 | return; | ||
4986 | } | ||
4987 | |||
4846 | handle = ext4_journal_start(inode, 2); | 4988 | handle = ext4_journal_start(inode, 2); |
4847 | if (IS_ERR(handle)) | 4989 | if (IS_ERR(handle)) |
4848 | goto out; | 4990 | goto out; |
@@ -4880,8 +5022,9 @@ static int ext4_pin_inode(handle_t *handle, struct inode *inode) | |||
4880 | BUFFER_TRACE(iloc.bh, "get_write_access"); | 5022 | BUFFER_TRACE(iloc.bh, "get_write_access"); |
4881 | err = jbd2_journal_get_write_access(handle, iloc.bh); | 5023 | err = jbd2_journal_get_write_access(handle, iloc.bh); |
4882 | if (!err) | 5024 | if (!err) |
4883 | err = ext4_journal_dirty_metadata(handle, | 5025 | err = ext4_handle_dirty_metadata(handle, |
4884 | iloc.bh); | 5026 | inode, |
5027 | iloc.bh); | ||
4885 | brelse(iloc.bh); | 5028 | brelse(iloc.bh); |
4886 | } | 5029 | } |
4887 | } | 5030 | } |
@@ -4907,6 +5050,8 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) | |||
4907 | */ | 5050 | */ |
4908 | 5051 | ||
4909 | journal = EXT4_JOURNAL(inode); | 5052 | journal = EXT4_JOURNAL(inode); |
5053 | if (!journal) | ||
5054 | return 0; | ||
4910 | if (is_journal_aborted(journal)) | 5055 | if (is_journal_aborted(journal)) |
4911 | return -EROFS; | 5056 | return -EROFS; |
4912 | 5057 | ||
@@ -4936,7 +5081,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val) | |||
4936 | return PTR_ERR(handle); | 5081 | return PTR_ERR(handle); |
4937 | 5082 | ||
4938 | err = ext4_mark_inode_dirty(handle, inode); | 5083 | err = ext4_mark_inode_dirty(handle, inode); |
4939 | handle->h_sync = 1; | 5084 | ext4_handle_sync(handle); |
4940 | ext4_journal_stop(handle); | 5085 | ext4_journal_stop(handle); |
4941 | ext4_std_error(inode->i_sb, err); | 5086 | ext4_std_error(inode->i_sb, err); |
4942 | 5087 | ||