aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd2
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
committerMichal Marek <mmarek@suse.cz>2010-08-04 07:59:13 -0400
commit772320e84588dcbe1600ffb83e5f328f2209ac2a (patch)
treea7de21b79340aeaa17c58126f6b801b82c77b53a /fs/jbd2
parent1ce53adf13a54375d2a5c7cdbe341b2558389615 (diff)
parent9fe6206f400646a2322096b56c59891d530e8d51 (diff)
Merge commit 'v2.6.35' into kbuild/kbuild
Conflicts: arch/powerpc/Makefile
Diffstat (limited to 'fs/jbd2')
-rw-r--r--fs/jbd2/checkpoint.c4
-rw-r--r--fs/jbd2/commit.c19
-rw-r--r--fs/jbd2/journal.c147
-rw-r--r--fs/jbd2/recovery.c1
-rw-r--r--fs/jbd2/transaction.c57
5 files changed, 190 insertions, 38 deletions
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 886849370950..076d1cc44f95 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -507,6 +507,7 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
507 if (blocknr < journal->j_tail) 507 if (blocknr < journal->j_tail)
508 freed = freed + journal->j_last - journal->j_first; 508 freed = freed + journal->j_last - journal->j_first;
509 509
510 trace_jbd2_cleanup_journal_tail(journal, first_tid, blocknr, freed);
510 jbd_debug(1, 511 jbd_debug(1,
511 "Cleaning journal tail from %d to %d (offset %lu), " 512 "Cleaning journal tail from %d to %d (offset %lu), "
512 "freeing %lu\n", 513 "freeing %lu\n",
@@ -529,7 +530,8 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
529 */ 530 */
530 if ((journal->j_fs_dev != journal->j_dev) && 531 if ((journal->j_fs_dev != journal->j_dev) &&
531 (journal->j_flags & JBD2_BARRIER)) 532 (journal->j_flags & JBD2_BARRIER))
532 blkdev_issue_flush(journal->j_fs_dev, NULL); 533 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
534 BLKDEV_IFL_WAIT);
533 if (!(journal->j_flags & JBD2_ABORT)) 535 if (!(journal->j_flags & JBD2_ABORT))
534 jbd2_journal_update_superblock(journal, 1); 536 jbd2_journal_update_superblock(journal, 1);
535 return 0; 537 return 0;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 1bc74b6f26d2..75716d3d2be0 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -717,7 +717,8 @@ start_journal_io:
717 if (commit_transaction->t_flushed_data_blocks && 717 if (commit_transaction->t_flushed_data_blocks &&
718 (journal->j_fs_dev != journal->j_dev) && 718 (journal->j_fs_dev != journal->j_dev) &&
719 (journal->j_flags & JBD2_BARRIER)) 719 (journal->j_flags & JBD2_BARRIER))
720 blkdev_issue_flush(journal->j_fs_dev, NULL); 720 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL,
721 BLKDEV_IFL_WAIT);
721 722
722 /* Done it all: now write the commit record asynchronously. */ 723 /* Done it all: now write the commit record asynchronously. */
723 if (JBD2_HAS_INCOMPAT_FEATURE(journal, 724 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
@@ -727,7 +728,8 @@ start_journal_io:
727 if (err) 728 if (err)
728 __jbd2_journal_abort_hard(journal); 729 __jbd2_journal_abort_hard(journal);
729 if (journal->j_flags & JBD2_BARRIER) 730 if (journal->j_flags & JBD2_BARRIER)
730 blkdev_issue_flush(journal->j_dev, NULL); 731 blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL,
732 BLKDEV_IFL_WAIT);
731 } 733 }
732 734
733 err = journal_finish_inode_data_buffers(journal, commit_transaction); 735 err = journal_finish_inode_data_buffers(journal, commit_transaction);
@@ -883,8 +885,7 @@ restart_loop:
883 spin_unlock(&journal->j_list_lock); 885 spin_unlock(&journal->j_list_lock);
884 bh = jh2bh(jh); 886 bh = jh2bh(jh);
885 jbd_lock_bh_state(bh); 887 jbd_lock_bh_state(bh);
886 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || 888 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
887 jh->b_transaction == journal->j_running_transaction);
888 889
889 /* 890 /*
890 * If there is undo-protected committed data against 891 * If there is undo-protected committed data against
@@ -930,12 +931,12 @@ restart_loop:
930 /* A buffer which has been freed while still being 931 /* A buffer which has been freed while still being
931 * journaled by a previous transaction may end up still 932 * journaled by a previous transaction may end up still
932 * being dirty here, but we want to avoid writing back 933 * being dirty here, but we want to avoid writing back
933 * that buffer in the future now that the last use has 934 * that buffer in the future after the "add to orphan"
934 * been committed. That's not only a performance gain, 935 * operation been committed, That's not only a performance
935 * it also stops aliasing problems if the buffer is left 936 * gain, it also stops aliasing problems if the buffer is
936 * behind for writeback and gets reallocated for another 937 * left behind for writeback and gets reallocated for another
937 * use in a different page. */ 938 * use in a different page. */
938 if (buffer_freed(bh)) { 939 if (buffer_freed(bh) && !jh->b_next_transaction) {
939 clear_buffer_freed(bh); 940 clear_buffer_freed(bh);
940 clear_buffer_jbddirty(bh); 941 clear_buffer_jbddirty(bh);
941 } 942 }
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index ac0d027595d0..036880895bfc 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -39,6 +39,8 @@
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40#include <linux/math64.h> 40#include <linux/math64.h>
41#include <linux/hash.h> 41#include <linux/hash.h>
42#include <linux/log2.h>
43#include <linux/vmalloc.h>
42 44
43#define CREATE_TRACE_POINTS 45#define CREATE_TRACE_POINTS
44#include <trace/events/jbd2.h> 46#include <trace/events/jbd2.h>
@@ -93,6 +95,7 @@ EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
93 95
94static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); 96static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
95static void __journal_abort_soft (journal_t *journal, int errno); 97static void __journal_abort_soft (journal_t *journal, int errno);
98static int jbd2_journal_create_slab(size_t slab_size);
96 99
97/* 100/*
98 * Helper function used to manage commit timeouts 101 * Helper function used to manage commit timeouts
@@ -294,7 +297,6 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
294 struct page *new_page; 297 struct page *new_page;
295 unsigned int new_offset; 298 unsigned int new_offset;
296 struct buffer_head *bh_in = jh2bh(jh_in); 299 struct buffer_head *bh_in = jh2bh(jh_in);
297 struct jbd2_buffer_trigger_type *triggers;
298 journal_t *journal = transaction->t_journal; 300 journal_t *journal = transaction->t_journal;
299 301
300 /* 302 /*
@@ -325,21 +327,21 @@ repeat:
325 done_copy_out = 1; 327 done_copy_out = 1;
326 new_page = virt_to_page(jh_in->b_frozen_data); 328 new_page = virt_to_page(jh_in->b_frozen_data);
327 new_offset = offset_in_page(jh_in->b_frozen_data); 329 new_offset = offset_in_page(jh_in->b_frozen_data);
328 triggers = jh_in->b_frozen_triggers;
329 } else { 330 } else {
330 new_page = jh2bh(jh_in)->b_page; 331 new_page = jh2bh(jh_in)->b_page;
331 new_offset = offset_in_page(jh2bh(jh_in)->b_data); 332 new_offset = offset_in_page(jh2bh(jh_in)->b_data);
332 triggers = jh_in->b_triggers;
333 } 333 }
334 334
335 mapped_data = kmap_atomic(new_page, KM_USER0); 335 mapped_data = kmap_atomic(new_page, KM_USER0);
336 /* 336 /*
337 * Fire any commit trigger. Do this before checking for escaping, 337 * Fire data frozen trigger if data already wasn't frozen. Do this
338 * as the trigger may modify the magic offset. If a copy-out 338 * before checking for escaping, as the trigger may modify the magic
339 * happens afterwards, it will have the correct data in the buffer. 339 * offset. If a copy-out happens afterwards, it will have the correct
340 * data in the buffer.
340 */ 341 */
341 jbd2_buffer_commit_trigger(jh_in, mapped_data + new_offset, 342 if (!done_copy_out)
342 triggers); 343 jbd2_buffer_frozen_trigger(jh_in, mapped_data + new_offset,
344 jh_in->b_triggers);
343 345
344 /* 346 /*
345 * Check for escaping 347 * Check for escaping
@@ -1248,6 +1250,13 @@ int jbd2_journal_load(journal_t *journal)
1248 } 1250 }
1249 } 1251 }
1250 1252
1253 /*
1254 * Create a slab for this blocksize
1255 */
1256 err = jbd2_journal_create_slab(be32_to_cpu(sb->s_blocksize));
1257 if (err)
1258 return err;
1259
1251 /* Let the recovery code check whether it needs to recover any 1260 /* Let the recovery code check whether it needs to recover any
1252 * data from the journal. */ 1261 * data from the journal. */
1253 if (jbd2_journal_recover(journal)) 1262 if (jbd2_journal_recover(journal))
@@ -1807,6 +1816,127 @@ size_t journal_tag_bytes(journal_t *journal)
1807} 1816}
1808 1817
1809/* 1818/*
1819 * JBD memory management
1820 *
1821 * These functions are used to allocate block-sized chunks of memory
1822 * used for making copies of buffer_head data. Very often it will be
1823 * page-sized chunks of data, but sometimes it will be in
1824 * sub-page-size chunks. (For example, 16k pages on Power systems
1825 * with a 4k block file system.) For blocks smaller than a page, we
1826 * use a SLAB allocator. There are slab caches for each block size,
1827 * which are allocated at mount time, if necessary, and we only free
1828 * (all of) the slab caches when/if the jbd2 module is unloaded. For
1829 * this reason we don't need to a mutex to protect access to
1830 * jbd2_slab[] allocating or releasing memory; only in
1831 * jbd2_journal_create_slab().
1832 */
1833#define JBD2_MAX_SLABS 8
1834static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS];
1835static DECLARE_MUTEX(jbd2_slab_create_sem);
1836
1837static const char *jbd2_slab_names[JBD2_MAX_SLABS] = {
1838 "jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k",
1839 "jbd2_16k", "jbd2_32k", "jbd2_64k", "jbd2_128k"
1840};
1841
1842
1843static void jbd2_journal_destroy_slabs(void)
1844{
1845 int i;
1846
1847 for (i = 0; i < JBD2_MAX_SLABS; i++) {
1848 if (jbd2_slab[i])
1849 kmem_cache_destroy(jbd2_slab[i]);
1850 jbd2_slab[i] = NULL;
1851 }
1852}
1853
1854static int jbd2_journal_create_slab(size_t size)
1855{
1856 int i = order_base_2(size) - 10;
1857 size_t slab_size;
1858
1859 if (size == PAGE_SIZE)
1860 return 0;
1861
1862 if (i >= JBD2_MAX_SLABS)
1863 return -EINVAL;
1864
1865 if (unlikely(i < 0))
1866 i = 0;
1867 down(&jbd2_slab_create_sem);
1868 if (jbd2_slab[i]) {
1869 up(&jbd2_slab_create_sem);
1870 return 0; /* Already created */
1871 }
1872
1873 slab_size = 1 << (i+10);
1874 jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size,
1875 slab_size, 0, NULL);
1876 up(&jbd2_slab_create_sem);
1877 if (!jbd2_slab[i]) {
1878 printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n");
1879 return -ENOMEM;
1880 }
1881 return 0;
1882}
1883
1884static struct kmem_cache *get_slab(size_t size)
1885{
1886 int i = order_base_2(size) - 10;
1887
1888 BUG_ON(i >= JBD2_MAX_SLABS);
1889 if (unlikely(i < 0))
1890 i = 0;
1891 BUG_ON(jbd2_slab[i] == NULL);
1892 return jbd2_slab[i];
1893}
1894
1895void *jbd2_alloc(size_t size, gfp_t flags)
1896{
1897 void *ptr;
1898
1899 BUG_ON(size & (size-1)); /* Must be a power of 2 */
1900
1901 flags |= __GFP_REPEAT;
1902 if (size == PAGE_SIZE)
1903 ptr = (void *)__get_free_pages(flags, 0);
1904 else if (size > PAGE_SIZE) {
1905 int order = get_order(size);
1906
1907 if (order < 3)
1908 ptr = (void *)__get_free_pages(flags, order);
1909 else
1910 ptr = vmalloc(size);
1911 } else
1912 ptr = kmem_cache_alloc(get_slab(size), flags);
1913
1914 /* Check alignment; SLUB has gotten this wrong in the past,
1915 * and this can lead to user data corruption! */
1916 BUG_ON(((unsigned long) ptr) & (size-1));
1917
1918 return ptr;
1919}
1920
1921void jbd2_free(void *ptr, size_t size)
1922{
1923 if (size == PAGE_SIZE) {
1924 free_pages((unsigned long)ptr, 0);
1925 return;
1926 }
1927 if (size > PAGE_SIZE) {
1928 int order = get_order(size);
1929
1930 if (order < 3)
1931 free_pages((unsigned long)ptr, order);
1932 else
1933 vfree(ptr);
1934 return;
1935 }
1936 kmem_cache_free(get_slab(size), ptr);
1937};
1938
1939/*
1810 * Journal_head storage management 1940 * Journal_head storage management
1811 */ 1941 */
1812static struct kmem_cache *jbd2_journal_head_cache; 1942static struct kmem_cache *jbd2_journal_head_cache;
@@ -2204,6 +2334,7 @@ static void jbd2_journal_destroy_caches(void)
2204 jbd2_journal_destroy_revoke_caches(); 2334 jbd2_journal_destroy_revoke_caches();
2205 jbd2_journal_destroy_jbd2_journal_head_cache(); 2335 jbd2_journal_destroy_jbd2_journal_head_cache();
2206 jbd2_journal_destroy_handle_cache(); 2336 jbd2_journal_destroy_handle_cache();
2337 jbd2_journal_destroy_slabs();
2207} 2338}
2208 2339
2209static int __init journal_init(void) 2340static int __init journal_init(void)
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 73063285b13f..049281b7cb89 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -20,7 +20,6 @@
20#include <linux/fs.h> 20#include <linux/fs.h>
21#include <linux/jbd2.h> 21#include <linux/jbd2.h>
22#include <linux/errno.h> 22#include <linux/errno.h>
23#include <linux/slab.h>
24#include <linux/crc32.h> 23#include <linux/crc32.h>
25#endif 24#endif
26 25
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a0512700542f..b8e0806681bb 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -725,6 +725,9 @@ done:
725 page = jh2bh(jh)->b_page; 725 page = jh2bh(jh)->b_page;
726 offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; 726 offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
727 source = kmap_atomic(page, KM_USER0); 727 source = kmap_atomic(page, KM_USER0);
728 /* Fire data frozen trigger just before we copy the data */
729 jbd2_buffer_frozen_trigger(jh, source + offset,
730 jh->b_triggers);
728 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); 731 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
729 kunmap_atomic(source, KM_USER0); 732 kunmap_atomic(source, KM_USER0);
730 733
@@ -963,15 +966,15 @@ void jbd2_journal_set_triggers(struct buffer_head *bh,
963 jh->b_triggers = type; 966 jh->b_triggers = type;
964} 967}
965 968
966void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data, 969void jbd2_buffer_frozen_trigger(struct journal_head *jh, void *mapped_data,
967 struct jbd2_buffer_trigger_type *triggers) 970 struct jbd2_buffer_trigger_type *triggers)
968{ 971{
969 struct buffer_head *bh = jh2bh(jh); 972 struct buffer_head *bh = jh2bh(jh);
970 973
971 if (!triggers || !triggers->t_commit) 974 if (!triggers || !triggers->t_frozen)
972 return; 975 return;
973 976
974 triggers->t_commit(triggers, bh, mapped_data, bh->b_size); 977 triggers->t_frozen(triggers, bh, mapped_data, bh->b_size);
975} 978}
976 979
977void jbd2_buffer_abort_trigger(struct journal_head *jh, 980void jbd2_buffer_abort_trigger(struct journal_head *jh,
@@ -1311,7 +1314,6 @@ int jbd2_journal_stop(handle_t *handle)
1311 if (handle->h_sync) 1314 if (handle->h_sync)
1312 transaction->t_synchronous_commit = 1; 1315 transaction->t_synchronous_commit = 1;
1313 current->journal_info = NULL; 1316 current->journal_info = NULL;
1314 spin_lock(&journal->j_state_lock);
1315 spin_lock(&transaction->t_handle_lock); 1317 spin_lock(&transaction->t_handle_lock);
1316 transaction->t_outstanding_credits -= handle->h_buffer_credits; 1318 transaction->t_outstanding_credits -= handle->h_buffer_credits;
1317 transaction->t_updates--; 1319 transaction->t_updates--;
@@ -1340,8 +1342,7 @@ int jbd2_journal_stop(handle_t *handle)
1340 jbd_debug(2, "transaction too old, requesting commit for " 1342 jbd_debug(2, "transaction too old, requesting commit for "
1341 "handle %p\n", handle); 1343 "handle %p\n", handle);
1342 /* This is non-blocking */ 1344 /* This is non-blocking */
1343 __jbd2_log_start_commit(journal, transaction->t_tid); 1345 jbd2_log_start_commit(journal, transaction->t_tid);
1344 spin_unlock(&journal->j_state_lock);
1345 1346
1346 /* 1347 /*
1347 * Special case: JBD2_SYNC synchronous updates require us 1348 * Special case: JBD2_SYNC synchronous updates require us
@@ -1351,7 +1352,6 @@ int jbd2_journal_stop(handle_t *handle)
1351 err = jbd2_log_wait_commit(journal, tid); 1352 err = jbd2_log_wait_commit(journal, tid);
1352 } else { 1353 } else {
1353 spin_unlock(&transaction->t_handle_lock); 1354 spin_unlock(&transaction->t_handle_lock);
1354 spin_unlock(&journal->j_state_lock);
1355 } 1355 }
1356 1356
1357 lock_map_release(&handle->h_lockdep_map); 1357 lock_map_release(&handle->h_lockdep_map);
@@ -1727,6 +1727,21 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1727 if (!jh) 1727 if (!jh)
1728 goto zap_buffer_no_jh; 1728 goto zap_buffer_no_jh;
1729 1729
1730 /*
1731 * We cannot remove the buffer from checkpoint lists until the
1732 * transaction adding inode to orphan list (let's call it T)
1733 * is committed. Otherwise if the transaction changing the
1734 * buffer would be cleaned from the journal before T is
1735 * committed, a crash will cause that the correct contents of
1736 * the buffer will be lost. On the other hand we have to
1737 * clear the buffer dirty bit at latest at the moment when the
1738 * transaction marking the buffer as freed in the filesystem
1739 * structures is committed because from that moment on the
1740 * buffer can be reallocated and used by a different page.
1741 * Since the block hasn't been freed yet but the inode has
1742 * already been added to orphan list, it is safe for us to add
1743 * the buffer to BJ_Forget list of the newest transaction.
1744 */
1730 transaction = jh->b_transaction; 1745 transaction = jh->b_transaction;
1731 if (transaction == NULL) { 1746 if (transaction == NULL) {
1732 /* First case: not on any transaction. If it 1747 /* First case: not on any transaction. If it
@@ -1783,16 +1798,15 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1783 } else if (transaction == journal->j_committing_transaction) { 1798 } else if (transaction == journal->j_committing_transaction) {
1784 JBUFFER_TRACE(jh, "on committing transaction"); 1799 JBUFFER_TRACE(jh, "on committing transaction");
1785 /* 1800 /*
1786 * If it is committing, we simply cannot touch it. We 1801 * The buffer is committing, we simply cannot touch
1787 * can remove it's next_transaction pointer from the 1802 * it. So we just set j_next_transaction to the
1788 * running transaction if that is set, but nothing 1803 * running transaction (if there is one) and mark
1789 * else. */ 1804 * buffer as freed so that commit code knows it should
1805 * clear dirty bits when it is done with the buffer.
1806 */
1790 set_buffer_freed(bh); 1807 set_buffer_freed(bh);
1791 if (jh->b_next_transaction) { 1808 if (journal->j_running_transaction && buffer_jbddirty(bh))
1792 J_ASSERT(jh->b_next_transaction == 1809 jh->b_next_transaction = journal->j_running_transaction;
1793 journal->j_running_transaction);
1794 jh->b_next_transaction = NULL;
1795 }
1796 jbd2_journal_put_journal_head(jh); 1810 jbd2_journal_put_journal_head(jh);
1797 spin_unlock(&journal->j_list_lock); 1811 spin_unlock(&journal->j_list_lock);
1798 jbd_unlock_bh_state(bh); 1812 jbd_unlock_bh_state(bh);
@@ -1969,7 +1983,7 @@ void jbd2_journal_file_buffer(struct journal_head *jh,
1969 */ 1983 */
1970void __jbd2_journal_refile_buffer(struct journal_head *jh) 1984void __jbd2_journal_refile_buffer(struct journal_head *jh)
1971{ 1985{
1972 int was_dirty; 1986 int was_dirty, jlist;
1973 struct buffer_head *bh = jh2bh(jh); 1987 struct buffer_head *bh = jh2bh(jh);
1974 1988
1975 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); 1989 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
@@ -1991,8 +2005,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
1991 __jbd2_journal_temp_unlink_buffer(jh); 2005 __jbd2_journal_temp_unlink_buffer(jh);
1992 jh->b_transaction = jh->b_next_transaction; 2006 jh->b_transaction = jh->b_next_transaction;
1993 jh->b_next_transaction = NULL; 2007 jh->b_next_transaction = NULL;
1994 __jbd2_journal_file_buffer(jh, jh->b_transaction, 2008 if (buffer_freed(bh))
1995 jh->b_modified ? BJ_Metadata : BJ_Reserved); 2009 jlist = BJ_Forget;
2010 else if (jh->b_modified)
2011 jlist = BJ_Metadata;
2012 else
2013 jlist = BJ_Reserved;
2014 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
1996 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING); 2015 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
1997 2016
1998 if (was_dirty) 2017 if (was_dirty)