aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd2
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2009-09-14 00:16:56 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2009-09-14 00:16:56 -0400
commitfc8e1ead9314cf0e0f1922e661428b93d3a50d88 (patch)
treef3cb97c4769b74f6627a59769f1ed5c92a13c58a /fs/jbd2
parent2bcaa6a4238094c5695d5b1943078388d82d3004 (diff)
parent9de48cc300fb10f7d9faa978670becf5e352462a (diff)
Merge branch 'next' into for-linus
Diffstat (limited to 'fs/jbd2')
-rw-r--r--fs/jbd2/checkpoint.c5
-rw-r--r--fs/jbd2/commit.c13
-rw-r--r--fs/jbd2/journal.c98
-rw-r--r--fs/jbd2/transaction.c117
4 files changed, 129 insertions, 104 deletions
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 17159cacbd9e..5d70b3e6d49b 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -20,9 +20,9 @@
20#include <linux/time.h> 20#include <linux/time.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/jbd2.h> 22#include <linux/jbd2.h>
23#include <linux/marker.h>
24#include <linux/errno.h> 23#include <linux/errno.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
25#include <trace/events/jbd2.h>
26 26
27/* 27/*
28 * Unlink a buffer from a transaction checkpoint list. 28 * Unlink a buffer from a transaction checkpoint list.
@@ -358,8 +358,7 @@ int jbd2_log_do_checkpoint(journal_t *journal)
358 * journal straight away. 358 * journal straight away.
359 */ 359 */
360 result = jbd2_cleanup_journal_tail(journal); 360 result = jbd2_cleanup_journal_tail(journal);
361 trace_mark(jbd2_checkpoint, "dev %s need_checkpoint %d", 361 trace_jbd2_checkpoint(journal, result);
362 journal->j_devname, result);
363 jbd_debug(1, "cleanup_journal_tail returned %d\n", result); 362 jbd_debug(1, "cleanup_journal_tail returned %d\n", result);
364 if (result <= 0) 363 if (result <= 0)
365 return result; 364 return result;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 0b7d3b8226fd..7b4088b2364d 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -16,7 +16,6 @@
16#include <linux/time.h> 16#include <linux/time.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/jbd2.h> 18#include <linux/jbd2.h>
19#include <linux/marker.h>
20#include <linux/errno.h> 19#include <linux/errno.h>
21#include <linux/slab.h> 20#include <linux/slab.h>
22#include <linux/mm.h> 21#include <linux/mm.h>
@@ -26,6 +25,7 @@
26#include <linux/writeback.h> 25#include <linux/writeback.h>
27#include <linux/backing-dev.h> 26#include <linux/backing-dev.h>
28#include <linux/bio.h> 27#include <linux/bio.h>
28#include <trace/events/jbd2.h>
29 29
30/* 30/*
31 * Default IO end handler for temporary BJ_IO buffer_heads. 31 * Default IO end handler for temporary BJ_IO buffer_heads.
@@ -253,6 +253,7 @@ static int journal_submit_data_buffers(journal_t *journal,
253 * block allocation with delalloc. We need to write 253 * block allocation with delalloc. We need to write
254 * only allocated blocks here. 254 * only allocated blocks here.
255 */ 255 */
256 trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
256 err = journal_submit_inode_data_buffers(mapping); 257 err = journal_submit_inode_data_buffers(mapping);
257 if (!ret) 258 if (!ret)
258 ret = err; 259 ret = err;
@@ -394,8 +395,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
394 commit_transaction = journal->j_running_transaction; 395 commit_transaction = journal->j_running_transaction;
395 J_ASSERT(commit_transaction->t_state == T_RUNNING); 396 J_ASSERT(commit_transaction->t_state == T_RUNNING);
396 397
397 trace_mark(jbd2_start_commit, "dev %s transaction %d", 398 trace_jbd2_start_commit(journal, commit_transaction);
398 journal->j_devname, commit_transaction->t_tid);
399 jbd_debug(1, "JBD: starting commit of transaction %d\n", 399 jbd_debug(1, "JBD: starting commit of transaction %d\n",
400 commit_transaction->t_tid); 400 commit_transaction->t_tid);
401 401
@@ -409,6 +409,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
409 */ 409 */
410 if (commit_transaction->t_synchronous_commit) 410 if (commit_transaction->t_synchronous_commit)
411 write_op = WRITE_SYNC_PLUG; 411 write_op = WRITE_SYNC_PLUG;
412 trace_jbd2_commit_locking(journal, commit_transaction);
412 stats.u.run.rs_wait = commit_transaction->t_max_wait; 413 stats.u.run.rs_wait = commit_transaction->t_max_wait;
413 stats.u.run.rs_locked = jiffies; 414 stats.u.run.rs_locked = jiffies;
414 stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start, 415 stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
@@ -484,6 +485,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
484 */ 485 */
485 jbd2_journal_switch_revoke_table(journal); 486 jbd2_journal_switch_revoke_table(journal);
486 487
488 trace_jbd2_commit_flushing(journal, commit_transaction);
487 stats.u.run.rs_flushing = jiffies; 489 stats.u.run.rs_flushing = jiffies;
488 stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked, 490 stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked,
489 stats.u.run.rs_flushing); 491 stats.u.run.rs_flushing);
@@ -520,6 +522,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
520 commit_transaction->t_state = T_COMMIT; 522 commit_transaction->t_state = T_COMMIT;
521 spin_unlock(&journal->j_state_lock); 523 spin_unlock(&journal->j_state_lock);
522 524
525 trace_jbd2_commit_logging(journal, commit_transaction);
523 stats.u.run.rs_logging = jiffies; 526 stats.u.run.rs_logging = jiffies;
524 stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing, 527 stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing,
525 stats.u.run.rs_logging); 528 stats.u.run.rs_logging);
@@ -1054,9 +1057,7 @@ restart_loop:
1054 if (journal->j_commit_callback) 1057 if (journal->j_commit_callback)
1055 journal->j_commit_callback(journal, commit_transaction); 1058 journal->j_commit_callback(journal, commit_transaction);
1056 1059
1057 trace_mark(jbd2_end_commit, "dev %s transaction %d head %d", 1060 trace_jbd2_end_commit(journal, commit_transaction);
1058 journal->j_devname, commit_transaction->t_tid,
1059 journal->j_tail_sequence);
1060 jbd_debug(1, "JBD: commit %d complete, head %d\n", 1061 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1061 journal->j_commit_sequence, journal->j_tail_sequence); 1062 journal->j_commit_sequence, journal->j_tail_sequence);
1062 if (to_free) 1063 if (to_free)
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 58144102bf25..e378cb383979 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -38,6 +38,10 @@
38#include <linux/debugfs.h> 38#include <linux/debugfs.h>
39#include <linux/seq_file.h> 39#include <linux/seq_file.h>
40#include <linux/math64.h> 40#include <linux/math64.h>
41#include <linux/hash.h>
42
43#define CREATE_TRACE_POINTS
44#include <trace/events/jbd2.h>
41 45
42#include <asm/uaccess.h> 46#include <asm/uaccess.h>
43#include <asm/page.h> 47#include <asm/page.h>
@@ -293,6 +297,7 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
293 unsigned int new_offset; 297 unsigned int new_offset;
294 struct buffer_head *bh_in = jh2bh(jh_in); 298 struct buffer_head *bh_in = jh2bh(jh_in);
295 struct jbd2_buffer_trigger_type *triggers; 299 struct jbd2_buffer_trigger_type *triggers;
300 journal_t *journal = transaction->t_journal;
296 301
297 /* 302 /*
298 * The buffer really shouldn't be locked: only the current committing 303 * The buffer really shouldn't be locked: only the current committing
@@ -306,6 +311,11 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
306 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in)); 311 J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
307 312
308 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL); 313 new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
314 /* keep subsequent assertions sane */
315 new_bh->b_state = 0;
316 init_buffer(new_bh, NULL, NULL);
317 atomic_set(&new_bh->b_count, 1);
318 new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
309 319
310 /* 320 /*
311 * If a new transaction has already done a buffer copy-out, then 321 * If a new transaction has already done a buffer copy-out, then
@@ -384,14 +394,6 @@ repeat:
384 kunmap_atomic(mapped_data, KM_USER0); 394 kunmap_atomic(mapped_data, KM_USER0);
385 } 395 }
386 396
387 /* keep subsequent assertions sane */
388 new_bh->b_state = 0;
389 init_buffer(new_bh, NULL, NULL);
390 atomic_set(&new_bh->b_count, 1);
391 jbd_unlock_bh_state(bh_in);
392
393 new_jh = jbd2_journal_add_journal_head(new_bh); /* This sleeps */
394
395 set_bh_page(new_bh, new_page, new_offset); 397 set_bh_page(new_bh, new_page, new_offset);
396 new_jh->b_transaction = NULL; 398 new_jh->b_transaction = NULL;
397 new_bh->b_size = jh2bh(jh_in)->b_size; 399 new_bh->b_size = jh2bh(jh_in)->b_size;
@@ -408,7 +410,11 @@ repeat:
408 * copying is moved to the transaction's shadow queue. 410 * copying is moved to the transaction's shadow queue.
409 */ 411 */
410 JBUFFER_TRACE(jh_in, "file as BJ_Shadow"); 412 JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
411 jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow); 413 spin_lock(&journal->j_list_lock);
414 __jbd2_journal_file_buffer(jh_in, transaction, BJ_Shadow);
415 spin_unlock(&journal->j_list_lock);
416 jbd_unlock_bh_state(bh_in);
417
412 JBUFFER_TRACE(new_jh, "file as BJ_IO"); 418 JBUFFER_TRACE(new_jh, "file as BJ_IO");
413 jbd2_journal_file_buffer(new_jh, transaction, BJ_IO); 419 jbd2_journal_file_buffer(new_jh, transaction, BJ_IO);
414 420
@@ -1781,7 +1787,7 @@ int jbd2_journal_wipe(journal_t *journal, int write)
1781 * Journal abort has very specific semantics, which we describe 1787 * Journal abort has very specific semantics, which we describe
1782 * for journal abort. 1788 * for journal abort.
1783 * 1789 *
1784 * Two internal function, which provide abort to te jbd layer 1790 * Two internal functions, which provide abort to the jbd layer
1785 * itself are here. 1791 * itself are here.
1786 */ 1792 */
1787 1793
@@ -1879,7 +1885,7 @@ void jbd2_journal_abort(journal_t *journal, int errno)
1879 * int jbd2_journal_errno () - returns the journal's error state. 1885 * int jbd2_journal_errno () - returns the journal's error state.
1880 * @journal: journal to examine. 1886 * @journal: journal to examine.
1881 * 1887 *
1882 * This is the errno numbet set with jbd2_journal_abort(), the last 1888 * This is the errno number set with jbd2_journal_abort(), the last
1883 * time the journal was mounted - if the journal was stopped 1889 * time the journal was mounted - if the journal was stopped
1884 * without calling abort this will be 0. 1890 * without calling abort this will be 0.
1885 * 1891 *
@@ -1903,7 +1909,7 @@ int jbd2_journal_errno(journal_t *journal)
1903 * int jbd2_journal_clear_err () - clears the journal's error state 1909 * int jbd2_journal_clear_err () - clears the journal's error state
1904 * @journal: journal to act on. 1910 * @journal: journal to act on.
1905 * 1911 *
1906 * An error must be cleared or Acked to take a FS out of readonly 1912 * An error must be cleared or acked to take a FS out of readonly
1907 * mode. 1913 * mode.
1908 */ 1914 */
1909int jbd2_journal_clear_err(journal_t *journal) 1915int jbd2_journal_clear_err(journal_t *journal)
@@ -1923,7 +1929,7 @@ int jbd2_journal_clear_err(journal_t *journal)
1923 * void jbd2_journal_ack_err() - Ack journal err. 1929 * void jbd2_journal_ack_err() - Ack journal err.
1924 * @journal: journal to act on. 1930 * @journal: journal to act on.
1925 * 1931 *
1926 * An error must be cleared or Acked to take a FS out of readonly 1932 * An error must be cleared or acked to take a FS out of readonly
1927 * mode. 1933 * mode.
1928 */ 1934 */
1929void jbd2_journal_ack_err(journal_t *journal) 1935void jbd2_journal_ack_err(journal_t *journal)
@@ -2377,6 +2383,72 @@ static void __exit journal_exit(void)
2377 jbd2_journal_destroy_caches(); 2383 jbd2_journal_destroy_caches();
2378} 2384}
2379 2385
2386/*
2387 * jbd2_dev_to_name is a utility function used by the jbd2 and ext4
2388 * tracing infrastructure to map a dev_t to a device name.
2389 *
2390 * The caller should use rcu_read_lock() in order to make sure the
2391 * device name stays valid until its done with it. We use
2392 * rcu_read_lock() as well to make sure we're safe in case the caller
2393 * gets sloppy, and because rcu_read_lock() is cheap and can be safely
2394 * nested.
2395 */
2396struct devname_cache {
2397 struct rcu_head rcu;
2398 dev_t device;
2399 char devname[BDEVNAME_SIZE];
2400};
2401#define CACHE_SIZE_BITS 6
2402static struct devname_cache *devcache[1 << CACHE_SIZE_BITS];
2403static DEFINE_SPINLOCK(devname_cache_lock);
2404
2405static void free_devcache(struct rcu_head *rcu)
2406{
2407 kfree(rcu);
2408}
2409
2410const char *jbd2_dev_to_name(dev_t device)
2411{
2412 int i = hash_32(device, CACHE_SIZE_BITS);
2413 char *ret;
2414 struct block_device *bd;
2415 static struct devname_cache *new_dev;
2416
2417 rcu_read_lock();
2418 if (devcache[i] && devcache[i]->device == device) {
2419 ret = devcache[i]->devname;
2420 rcu_read_unlock();
2421 return ret;
2422 }
2423 rcu_read_unlock();
2424
2425 new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL);
2426 if (!new_dev)
2427 return "NODEV-ALLOCFAILURE"; /* Something non-NULL */
2428 spin_lock(&devname_cache_lock);
2429 if (devcache[i]) {
2430 if (devcache[i]->device == device) {
2431 kfree(new_dev);
2432 ret = devcache[i]->devname;
2433 spin_unlock(&devname_cache_lock);
2434 return ret;
2435 }
2436 call_rcu(&devcache[i]->rcu, free_devcache);
2437 }
2438 devcache[i] = new_dev;
2439 devcache[i]->device = device;
2440 bd = bdget(device);
2441 if (bd) {
2442 bdevname(bd, devcache[i]->devname);
2443 bdput(bd);
2444 } else
2445 __bdevname(device, devcache[i]->devname);
2446 ret = devcache[i]->devname;
2447 spin_unlock(&devname_cache_lock);
2448 return ret;
2449}
2450EXPORT_SYMBOL(jbd2_dev_to_name);
2451
2380MODULE_LICENSE("GPL"); 2452MODULE_LICENSE("GPL");
2381module_init(journal_init); 2453module_init(journal_init);
2382module_exit(journal_exit); 2454module_exit(journal_exit);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 996ffda06bf3..6213ac728f30 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -499,34 +499,15 @@ void jbd2_journal_unlock_updates (journal_t *journal)
499 wake_up(&journal->j_wait_transaction_locked); 499 wake_up(&journal->j_wait_transaction_locked);
500} 500}
501 501
502/* 502static void warn_dirty_buffer(struct buffer_head *bh)
503 * Report any unexpected dirty buffers which turn up. Normally those
504 * indicate an error, but they can occur if the user is running (say)
505 * tune2fs to modify the live filesystem, so we need the option of
506 * continuing as gracefully as possible. #
507 *
508 * The caller should already hold the journal lock and
509 * j_list_lock spinlock: most callers will need those anyway
510 * in order to probe the buffer's journaling state safely.
511 */
512static void jbd_unexpected_dirty_buffer(struct journal_head *jh)
513{ 503{
514 int jlist; 504 char b[BDEVNAME_SIZE];
515
516 /* If this buffer is one which might reasonably be dirty
517 * --- ie. data, or not part of this journal --- then
518 * we're OK to leave it alone, but otherwise we need to
519 * move the dirty bit to the journal's own internal
520 * JBDDirty bit. */
521 jlist = jh->b_jlist;
522
523 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
524 jlist == BJ_Shadow || jlist == BJ_Forget) {
525 struct buffer_head *bh = jh2bh(jh);
526 505
527 if (test_clear_buffer_dirty(bh)) 506 printk(KERN_WARNING
528 set_buffer_jbddirty(bh); 507 "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
529 } 508 "There's a risk of filesystem corruption in case of system "
509 "crash.\n",
510 bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
530} 511}
531 512
532/* 513/*
@@ -593,14 +574,16 @@ repeat:
593 if (jh->b_next_transaction) 574 if (jh->b_next_transaction)
594 J_ASSERT_JH(jh, jh->b_next_transaction == 575 J_ASSERT_JH(jh, jh->b_next_transaction ==
595 transaction); 576 transaction);
577 warn_dirty_buffer(bh);
596 } 578 }
597 /* 579 /*
598 * In any case we need to clean the dirty flag and we must 580 * In any case we need to clean the dirty flag and we must
599 * do it under the buffer lock to be sure we don't race 581 * do it under the buffer lock to be sure we don't race
600 * with running write-out. 582 * with running write-out.
601 */ 583 */
602 JBUFFER_TRACE(jh, "Unexpected dirty buffer"); 584 JBUFFER_TRACE(jh, "Journalling dirty buffer");
603 jbd_unexpected_dirty_buffer(jh); 585 clear_buffer_dirty(bh);
586 set_buffer_jbddirty(bh);
604 } 587 }
605 588
606 unlock_buffer(bh); 589 unlock_buffer(bh);
@@ -843,6 +826,15 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
843 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); 826 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
844 827
845 if (jh->b_transaction == NULL) { 828 if (jh->b_transaction == NULL) {
829 /*
830 * Previous jbd2_journal_forget() could have left the buffer
831 * with jbddirty bit set because it was being committed. When
832 * the commit finished, we've filed the buffer for
833 * checkpointing and marked it dirty. Now we are reallocating
834 * the buffer so the transaction freeing it must have
835 * committed and so it's safe to clear the dirty bit.
836 */
837 clear_buffer_dirty(jh2bh(jh));
846 jh->b_transaction = transaction; 838 jh->b_transaction = transaction;
847 839
848 /* first access by this transaction */ 840 /* first access by this transaction */
@@ -1547,36 +1539,6 @@ out:
1547 return; 1539 return;
1548} 1540}
1549 1541
1550/*
1551 * jbd2_journal_try_to_free_buffers() could race with
1552 * jbd2_journal_commit_transaction(). The later might still hold the
1553 * reference count to the buffers when inspecting them on
1554 * t_syncdata_list or t_locked_list.
1555 *
1556 * jbd2_journal_try_to_free_buffers() will call this function to
1557 * wait for the current transaction to finish syncing data buffers, before
1558 * try to free that buffer.
1559 *
1560 * Called with journal->j_state_lock hold.
1561 */
1562static void jbd2_journal_wait_for_transaction_sync_data(journal_t *journal)
1563{
1564 transaction_t *transaction;
1565 tid_t tid;
1566
1567 spin_lock(&journal->j_state_lock);
1568 transaction = journal->j_committing_transaction;
1569
1570 if (!transaction) {
1571 spin_unlock(&journal->j_state_lock);
1572 return;
1573 }
1574
1575 tid = transaction->t_tid;
1576 spin_unlock(&journal->j_state_lock);
1577 jbd2_log_wait_commit(journal, tid);
1578}
1579
1580/** 1542/**
1581 * int jbd2_journal_try_to_free_buffers() - try to free page buffers. 1543 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1582 * @journal: journal for operation 1544 * @journal: journal for operation
@@ -1649,25 +1611,6 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
1649 1611
1650 ret = try_to_free_buffers(page); 1612 ret = try_to_free_buffers(page);
1651 1613
1652 /*
1653 * There are a number of places where jbd2_journal_try_to_free_buffers()
1654 * could race with jbd2_journal_commit_transaction(), the later still
1655 * holds the reference to the buffers to free while processing them.
1656 * try_to_free_buffers() failed to free those buffers. Some of the
1657 * caller of releasepage() request page buffers to be dropped, otherwise
1658 * treat the fail-to-free as errors (such as generic_file_direct_IO())
1659 *
1660 * So, if the caller of try_to_release_page() wants the synchronous
1661 * behaviour(i.e make sure buffers are dropped upon return),
1662 * let's wait for the current transaction to finish flush of
1663 * dirty data buffers, then try to free those buffers again,
1664 * with the journal locked.
1665 */
1666 if (ret == 0 && (gfp_mask & __GFP_WAIT) && (gfp_mask & __GFP_FS)) {
1667 jbd2_journal_wait_for_transaction_sync_data(journal);
1668 ret = try_to_free_buffers(page);
1669 }
1670
1671busy: 1614busy:
1672 return ret; 1615 return ret;
1673} 1616}
@@ -1693,8 +1636,13 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1693 1636
1694 if (jh->b_cp_transaction) { 1637 if (jh->b_cp_transaction) {
1695 JBUFFER_TRACE(jh, "on running+cp transaction"); 1638 JBUFFER_TRACE(jh, "on running+cp transaction");
1639 /*
1640 * We don't want to write the buffer anymore, clear the
1641 * bit so that we don't confuse checks in
1642 * __journal_file_buffer
1643 */
1644 clear_buffer_dirty(bh);
1696 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); 1645 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1697 clear_buffer_jbddirty(bh);
1698 may_free = 0; 1646 may_free = 0;
1699 } else { 1647 } else {
1700 JBUFFER_TRACE(jh, "on running transaction"); 1648 JBUFFER_TRACE(jh, "on running transaction");
@@ -1945,12 +1893,17 @@ void __jbd2_journal_file_buffer(struct journal_head *jh,
1945 if (jh->b_transaction && jh->b_jlist == jlist) 1893 if (jh->b_transaction && jh->b_jlist == jlist)
1946 return; 1894 return;
1947 1895
1948 /* The following list of buffer states needs to be consistent
1949 * with __jbd_unexpected_dirty_buffer()'s handling of dirty
1950 * state. */
1951
1952 if (jlist == BJ_Metadata || jlist == BJ_Reserved || 1896 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1953 jlist == BJ_Shadow || jlist == BJ_Forget) { 1897 jlist == BJ_Shadow || jlist == BJ_Forget) {
1898 /*
1899 * For metadata buffers, we track dirty bit in buffer_jbddirty
1900 * instead of buffer_dirty. We should not see a dirty bit set
1901 * here because we clear it in do_get_write_access but e.g.
1902 * tune2fs can modify the sb and set the dirty bit at any time
1903 * so we try to gracefully handle that.
1904 */
1905 if (buffer_dirty(bh))
1906 warn_dirty_buffer(bh);
1954 if (test_clear_buffer_dirty(bh) || 1907 if (test_clear_buffer_dirty(bh) ||
1955 test_clear_buffer_jbddirty(bh)) 1908 test_clear_buffer_jbddirty(bh))
1956 was_dirty = 1; 1909 was_dirty = 1;