aboutsummaryrefslogtreecommitdiffstats
path: root/fs/jbd
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jbd')
-rw-r--r--fs/jbd/commit.c24
-rw-r--r--fs/jbd/journal.c4
-rw-r--r--fs/jbd/revoke.c2
-rw-r--r--fs/jbd/transaction.c2
4 files changed, 16 insertions, 16 deletions
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 34a4861c14b8..69b180459463 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -20,6 +20,7 @@
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/pagemap.h> 21#include <linux/pagemap.h>
22#include <linux/bio.h> 22#include <linux/bio.h>
23#include <linux/blkdev.h>
23 24
24/* 25/*
25 * Default IO end handler for temporary BJ_IO buffer_heads. 26 * Default IO end handler for temporary BJ_IO buffer_heads.
@@ -294,7 +295,7 @@ void journal_commit_transaction(journal_t *journal)
294 int first_tag = 0; 295 int first_tag = 0;
295 int tag_flag; 296 int tag_flag;
296 int i; 297 int i;
297 int write_op = WRITE_SYNC; 298 struct blk_plug plug;
298 299
299 /* 300 /*
300 * First job: lock down the current transaction and wait for 301 * First job: lock down the current transaction and wait for
@@ -327,13 +328,6 @@ void journal_commit_transaction(journal_t *journal)
327 spin_lock(&journal->j_state_lock); 328 spin_lock(&journal->j_state_lock);
328 commit_transaction->t_state = T_LOCKED; 329 commit_transaction->t_state = T_LOCKED;
329 330
330 /*
331 * Use plugged writes here, since we want to submit several before
332 * we unplug the device. We don't do explicit unplugging in here,
333 * instead we rely on sync_buffer() doing the unplug for us.
334 */
335 if (commit_transaction->t_synchronous_commit)
336 write_op = WRITE_SYNC_PLUG;
337 spin_lock(&commit_transaction->t_handle_lock); 331 spin_lock(&commit_transaction->t_handle_lock);
338 while (commit_transaction->t_updates) { 332 while (commit_transaction->t_updates) {
339 DEFINE_WAIT(wait); 333 DEFINE_WAIT(wait);
@@ -368,7 +362,7 @@ void journal_commit_transaction(journal_t *journal)
368 * we do not require it to remember exactly which old buffers it 362 * we do not require it to remember exactly which old buffers it
369 * has reserved. This is consistent with the existing behaviour 363 * has reserved. This is consistent with the existing behaviour
370 * that multiple journal_get_write_access() calls to the same 364 * that multiple journal_get_write_access() calls to the same
371 * buffer are perfectly permissable. 365 * buffer are perfectly permissible.
372 */ 366 */
373 while (commit_transaction->t_reserved_list) { 367 while (commit_transaction->t_reserved_list) {
374 jh = commit_transaction->t_reserved_list; 368 jh = commit_transaction->t_reserved_list;
@@ -418,8 +412,10 @@ void journal_commit_transaction(journal_t *journal)
418 * Now start flushing things to disk, in the order they appear 412 * Now start flushing things to disk, in the order they appear
419 * on the transaction lists. Data blocks go first. 413 * on the transaction lists. Data blocks go first.
420 */ 414 */
415 blk_start_plug(&plug);
421 err = journal_submit_data_buffers(journal, commit_transaction, 416 err = journal_submit_data_buffers(journal, commit_transaction,
422 write_op); 417 WRITE_SYNC);
418 blk_finish_plug(&plug);
423 419
424 /* 420 /*
425 * Wait for all previously submitted IO to complete. 421 * Wait for all previously submitted IO to complete.
@@ -480,7 +476,9 @@ void journal_commit_transaction(journal_t *journal)
480 err = 0; 476 err = 0;
481 } 477 }
482 478
483 journal_write_revoke_records(journal, commit_transaction, write_op); 479 blk_start_plug(&plug);
480
481 journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC);
484 482
485 /* 483 /*
486 * If we found any dirty or locked buffers, then we should have 484 * If we found any dirty or locked buffers, then we should have
@@ -650,7 +648,7 @@ start_journal_io:
650 clear_buffer_dirty(bh); 648 clear_buffer_dirty(bh);
651 set_buffer_uptodate(bh); 649 set_buffer_uptodate(bh);
652 bh->b_end_io = journal_end_buffer_io_sync; 650 bh->b_end_io = journal_end_buffer_io_sync;
653 submit_bh(write_op, bh); 651 submit_bh(WRITE_SYNC, bh);
654 } 652 }
655 cond_resched(); 653 cond_resched();
656 654
@@ -661,6 +659,8 @@ start_journal_io:
661 } 659 }
662 } 660 }
663 661
662 blk_finish_plug(&plug);
663
664 /* Lo and behold: we have just managed to send a transaction to 664 /* Lo and behold: we have just managed to send a transaction to
665 the log. Before we can commit it, wait for the IO so far to 665 the log. Before we can commit it, wait for the IO so far to
666 complete. Control buffers being written are on the 666 complete. Control buffers being written are on the
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index eb11601f2e00..b3713afaaa9e 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -770,7 +770,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
770 journal->j_wbufsize = n; 770 journal->j_wbufsize = n;
771 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); 771 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
772 if (!journal->j_wbuf) { 772 if (!journal->j_wbuf) {
773 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 773 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
774 __func__); 774 __func__);
775 goto out_err; 775 goto out_err;
776 } 776 }
@@ -831,7 +831,7 @@ journal_t * journal_init_inode (struct inode *inode)
831 journal->j_wbufsize = n; 831 journal->j_wbufsize = n;
832 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); 832 journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
833 if (!journal->j_wbuf) { 833 if (!journal->j_wbuf) {
834 printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", 834 printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n",
835 __func__); 835 __func__);
836 goto out_err; 836 goto out_err;
837 } 837 }
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index d29018307e2e..305a90763154 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -71,7 +71,7 @@
71 * switching hash tables under them. For operations on the lists of entries in 71 * switching hash tables under them. For operations on the lists of entries in
72 * the hash table j_revoke_lock is used. 72 * the hash table j_revoke_lock is used.
73 * 73 *
74 * Finally, also replay code uses the hash tables but at this moment noone else 74 * Finally, also replay code uses the hash tables but at this moment no one else
75 * can touch them (filesystem isn't mounted yet) and hence no locking is 75 * can touch them (filesystem isn't mounted yet) and hence no locking is
76 * needed. 76 * needed.
77 */ 77 */
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 5b2e4c30a2a1..60d2319651b2 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1392,7 +1392,7 @@ int journal_stop(handle_t *handle)
1392 * by 30x or more... 1392 * by 30x or more...
1393 * 1393 *
1394 * We try and optimize the sleep time against what the underlying disk 1394 * We try and optimize the sleep time against what the underlying disk
1395 * can do, instead of having a static sleep time. This is usefull for 1395 * can do, instead of having a static sleep time. This is useful for
1396 * the case where our storage is so fast that it is more optimal to go 1396 * the case where our storage is so fast that it is more optimal to go
1397 * ahead and force a flush and wait for the transaction to be committed 1397 * ahead and force a flush and wait for the transaction to be committed
1398 * than it is to wait for an arbitrary amount of time for new writers to 1398 * than it is to wait for an arbitrary amount of time for new writers to