diff options
author | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2011-05-10 14:52:07 -0400 |
---|---|---|
committer | Stefan Richter <stefanr@s5r6.in-berlin.de> | 2011-05-10 16:50:41 -0400 |
commit | 020abf03cd659388f94cb328e1e1df0656e0d7ff (patch) | |
tree | 40d05011708ad1b4a05928d167eb120420581aa6 /fs/jbd2 | |
parent | 0ff8fbc61727c926883eec381fbd3d32d1fab504 (diff) | |
parent | 693d92a1bbc9e42681c42ed190bd42b636ca876f (diff) |
Merge tag 'v2.6.39-rc7'
in order to pull in changes in drivers/media/dvb/firewire/ and
sound/firewire/.
Diffstat (limited to 'fs/jbd2')
-rw-r--r-- | fs/jbd2/commit.c | 28 | ||||
-rw-r--r-- | fs/jbd2/journal.c | 52 | ||||
-rw-r--r-- | fs/jbd2/recovery.c | 2 | ||||
-rw-r--r-- | fs/jbd2/revoke.c | 2 | ||||
-rw-r--r-- | fs/jbd2/transaction.c | 31 |
5 files changed, 63 insertions, 52 deletions
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index f3ad1598b201..6e28000a4b21 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -105,6 +105,8 @@ static int journal_submit_commit_record(journal_t *journal, | |||
105 | int ret; | 105 | int ret; |
106 | struct timespec now = current_kernel_time(); | 106 | struct timespec now = current_kernel_time(); |
107 | 107 | ||
108 | *cbh = NULL; | ||
109 | |||
108 | if (is_journal_aborted(journal)) | 110 | if (is_journal_aborted(journal)) |
109 | return 0; | 111 | return 0; |
110 | 112 | ||
@@ -137,9 +139,9 @@ static int journal_submit_commit_record(journal_t *journal, | |||
137 | if (journal->j_flags & JBD2_BARRIER && | 139 | if (journal->j_flags & JBD2_BARRIER && |
138 | !JBD2_HAS_INCOMPAT_FEATURE(journal, | 140 | !JBD2_HAS_INCOMPAT_FEATURE(journal, |
139 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) | 141 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) |
140 | ret = submit_bh(WRITE_SYNC_PLUG | WRITE_FLUSH_FUA, bh); | 142 | ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh); |
141 | else | 143 | else |
142 | ret = submit_bh(WRITE_SYNC_PLUG, bh); | 144 | ret = submit_bh(WRITE_SYNC, bh); |
143 | 145 | ||
144 | *cbh = bh; | 146 | *cbh = bh; |
145 | return ret; | 147 | return ret; |
@@ -329,7 +331,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
329 | int tag_bytes = journal_tag_bytes(journal); | 331 | int tag_bytes = journal_tag_bytes(journal); |
330 | struct buffer_head *cbh = NULL; /* For transactional checksums */ | 332 | struct buffer_head *cbh = NULL; /* For transactional checksums */ |
331 | __u32 crc32_sum = ~0; | 333 | __u32 crc32_sum = ~0; |
332 | int write_op = WRITE_SYNC; | 334 | struct blk_plug plug; |
333 | 335 | ||
334 | /* | 336 | /* |
335 | * First job: lock down the current transaction and wait for | 337 | * First job: lock down the current transaction and wait for |
@@ -363,13 +365,6 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
363 | write_lock(&journal->j_state_lock); | 365 | write_lock(&journal->j_state_lock); |
364 | commit_transaction->t_state = T_LOCKED; | 366 | commit_transaction->t_state = T_LOCKED; |
365 | 367 | ||
366 | /* | ||
367 | * Use plugged writes here, since we want to submit several before | ||
368 | * we unplug the device. We don't do explicit unplugging in here, | ||
369 | * instead we rely on sync_buffer() doing the unplug for us. | ||
370 | */ | ||
371 | if (commit_transaction->t_synchronous_commit) | ||
372 | write_op = WRITE_SYNC_PLUG; | ||
373 | trace_jbd2_commit_locking(journal, commit_transaction); | 368 | trace_jbd2_commit_locking(journal, commit_transaction); |
374 | stats.run.rs_wait = commit_transaction->t_max_wait; | 369 | stats.run.rs_wait = commit_transaction->t_max_wait; |
375 | stats.run.rs_locked = jiffies; | 370 | stats.run.rs_locked = jiffies; |
@@ -410,7 +405,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
410 | * we do not require it to remember exactly which old buffers it | 405 | * we do not require it to remember exactly which old buffers it |
411 | * has reserved. This is consistent with the existing behaviour | 406 | * has reserved. This is consistent with the existing behaviour |
412 | * that multiple jbd2_journal_get_write_access() calls to the same | 407 | * that multiple jbd2_journal_get_write_access() calls to the same |
413 | * buffer are perfectly permissable. | 408 | * buffer are perfectly permissible. |
414 | */ | 409 | */ |
415 | while (commit_transaction->t_reserved_list) { | 410 | while (commit_transaction->t_reserved_list) { |
416 | jh = commit_transaction->t_reserved_list; | 411 | jh = commit_transaction->t_reserved_list; |
@@ -469,8 +464,10 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
469 | if (err) | 464 | if (err) |
470 | jbd2_journal_abort(journal, err); | 465 | jbd2_journal_abort(journal, err); |
471 | 466 | ||
467 | blk_start_plug(&plug); | ||
472 | jbd2_journal_write_revoke_records(journal, commit_transaction, | 468 | jbd2_journal_write_revoke_records(journal, commit_transaction, |
473 | write_op); | 469 | WRITE_SYNC); |
470 | blk_finish_plug(&plug); | ||
474 | 471 | ||
475 | jbd_debug(3, "JBD: commit phase 2\n"); | 472 | jbd_debug(3, "JBD: commit phase 2\n"); |
476 | 473 | ||
@@ -497,6 +494,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) | |||
497 | err = 0; | 494 | err = 0; |
498 | descriptor = NULL; | 495 | descriptor = NULL; |
499 | bufs = 0; | 496 | bufs = 0; |
497 | blk_start_plug(&plug); | ||
500 | while (commit_transaction->t_buffers) { | 498 | while (commit_transaction->t_buffers) { |
501 | 499 | ||
502 | /* Find the next buffer to be journaled... */ | 500 | /* Find the next buffer to be journaled... */ |
@@ -658,7 +656,7 @@ start_journal_io: | |||
658 | clear_buffer_dirty(bh); | 656 | clear_buffer_dirty(bh); |
659 | set_buffer_uptodate(bh); | 657 | set_buffer_uptodate(bh); |
660 | bh->b_end_io = journal_end_buffer_io_sync; | 658 | bh->b_end_io = journal_end_buffer_io_sync; |
661 | submit_bh(write_op, bh); | 659 | submit_bh(WRITE_SYNC, bh); |
662 | } | 660 | } |
663 | cond_resched(); | 661 | cond_resched(); |
664 | stats.run.rs_blocks_logged += bufs; | 662 | stats.run.rs_blocks_logged += bufs; |
@@ -699,6 +697,8 @@ start_journal_io: | |||
699 | __jbd2_journal_abort_hard(journal); | 697 | __jbd2_journal_abort_hard(journal); |
700 | } | 698 | } |
701 | 699 | ||
700 | blk_finish_plug(&plug); | ||
701 | |||
702 | /* Lo and behold: we have just managed to send a transaction to | 702 | /* Lo and behold: we have just managed to send a transaction to |
703 | the log. Before we can commit it, wait for the IO so far to | 703 | the log. Before we can commit it, wait for the IO so far to |
704 | complete. Control buffers being written are on the | 704 | complete. Control buffers being written are on the |
@@ -808,7 +808,7 @@ wait_for_iobuf: | |||
808 | if (err) | 808 | if (err) |
809 | __jbd2_journal_abort_hard(journal); | 809 | __jbd2_journal_abort_hard(journal); |
810 | } | 810 | } |
811 | if (!err && !is_journal_aborted(journal)) | 811 | if (cbh) |
812 | err = journal_wait_on_commit_record(journal, cbh); | 812 | err = journal_wait_on_commit_record(journal, cbh); |
813 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, | 813 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, |
814 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && | 814 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index f837ba953529..e0ec3db1c395 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/vmalloc.h> | 43 | #include <linux/vmalloc.h> |
44 | #include <linux/backing-dev.h> | 44 | #include <linux/backing-dev.h> |
45 | #include <linux/bitops.h> | 45 | #include <linux/bitops.h> |
46 | #include <linux/ratelimit.h> | ||
46 | 47 | ||
47 | #define CREATE_TRACE_POINTS | 48 | #define CREATE_TRACE_POINTS |
48 | #include <trace/events/jbd2.h> | 49 | #include <trace/events/jbd2.h> |
@@ -93,6 +94,7 @@ EXPORT_SYMBOL(jbd2_journal_file_inode); | |||
93 | EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); | 94 | EXPORT_SYMBOL(jbd2_journal_init_jbd_inode); |
94 | EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); | 95 | EXPORT_SYMBOL(jbd2_journal_release_jbd_inode); |
95 | EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); | 96 | EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate); |
97 | EXPORT_SYMBOL(jbd2_inode_cache); | ||
96 | 98 | ||
97 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); | 99 | static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); |
98 | static void __journal_abort_soft (journal_t *journal, int errno); | 100 | static void __journal_abort_soft (journal_t *journal, int errno); |
@@ -471,7 +473,8 @@ int __jbd2_log_space_left(journal_t *journal) | |||
471 | } | 473 | } |
472 | 474 | ||
473 | /* | 475 | /* |
474 | * Called under j_state_lock. Returns true if a transaction commit was started. | 476 | * Called with j_state_lock locked for writing. |
477 | * Returns true if a transaction commit was started. | ||
475 | */ | 478 | */ |
476 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) | 479 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) |
477 | { | 480 | { |
@@ -518,11 +521,13 @@ int jbd2_journal_force_commit_nested(journal_t *journal) | |||
518 | { | 521 | { |
519 | transaction_t *transaction = NULL; | 522 | transaction_t *transaction = NULL; |
520 | tid_t tid; | 523 | tid_t tid; |
524 | int need_to_start = 0; | ||
521 | 525 | ||
522 | read_lock(&journal->j_state_lock); | 526 | read_lock(&journal->j_state_lock); |
523 | if (journal->j_running_transaction && !current->journal_info) { | 527 | if (journal->j_running_transaction && !current->journal_info) { |
524 | transaction = journal->j_running_transaction; | 528 | transaction = journal->j_running_transaction; |
525 | __jbd2_log_start_commit(journal, transaction->t_tid); | 529 | if (!tid_geq(journal->j_commit_request, transaction->t_tid)) |
530 | need_to_start = 1; | ||
526 | } else if (journal->j_committing_transaction) | 531 | } else if (journal->j_committing_transaction) |
527 | transaction = journal->j_committing_transaction; | 532 | transaction = journal->j_committing_transaction; |
528 | 533 | ||
@@ -533,6 +538,8 @@ int jbd2_journal_force_commit_nested(journal_t *journal) | |||
533 | 538 | ||
534 | tid = transaction->t_tid; | 539 | tid = transaction->t_tid; |
535 | read_unlock(&journal->j_state_lock); | 540 | read_unlock(&journal->j_state_lock); |
541 | if (need_to_start) | ||
542 | jbd2_log_start_commit(journal, tid); | ||
536 | jbd2_log_wait_commit(journal, tid); | 543 | jbd2_log_wait_commit(journal, tid); |
537 | return 1; | 544 | return 1; |
538 | } | 545 | } |
@@ -827,7 +834,7 @@ static journal_t * journal_init_common (void) | |||
827 | 834 | ||
828 | journal = kzalloc(sizeof(*journal), GFP_KERNEL); | 835 | journal = kzalloc(sizeof(*journal), GFP_KERNEL); |
829 | if (!journal) | 836 | if (!journal) |
830 | goto fail; | 837 | return NULL; |
831 | 838 | ||
832 | init_waitqueue_head(&journal->j_wait_transaction_locked); | 839 | init_waitqueue_head(&journal->j_wait_transaction_locked); |
833 | init_waitqueue_head(&journal->j_wait_logspace); | 840 | init_waitqueue_head(&journal->j_wait_logspace); |
@@ -852,14 +859,12 @@ static journal_t * journal_init_common (void) | |||
852 | err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); | 859 | err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); |
853 | if (err) { | 860 | if (err) { |
854 | kfree(journal); | 861 | kfree(journal); |
855 | goto fail; | 862 | return NULL; |
856 | } | 863 | } |
857 | 864 | ||
858 | spin_lock_init(&journal->j_history_lock); | 865 | spin_lock_init(&journal->j_history_lock); |
859 | 866 | ||
860 | return journal; | 867 | return journal; |
861 | fail: | ||
862 | return NULL; | ||
863 | } | 868 | } |
864 | 869 | ||
865 | /* jbd2_journal_init_dev and jbd2_journal_init_inode: | 870 | /* jbd2_journal_init_dev and jbd2_journal_init_inode: |
@@ -912,7 +917,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev, | |||
912 | journal->j_wbufsize = n; | 917 | journal->j_wbufsize = n; |
913 | journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); | 918 | journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); |
914 | if (!journal->j_wbuf) { | 919 | if (!journal->j_wbuf) { |
915 | printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", | 920 | printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n", |
916 | __func__); | 921 | __func__); |
917 | goto out_err; | 922 | goto out_err; |
918 | } | 923 | } |
@@ -978,7 +983,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode) | |||
978 | journal->j_wbufsize = n; | 983 | journal->j_wbufsize = n; |
979 | journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); | 984 | journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL); |
980 | if (!journal->j_wbuf) { | 985 | if (!journal->j_wbuf) { |
981 | printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n", | 986 | printk(KERN_ERR "%s: Can't allocate bhs for commit thread\n", |
982 | __func__); | 987 | __func__); |
983 | goto out_err; | 988 | goto out_err; |
984 | } | 989 | } |
@@ -986,7 +991,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode) | |||
986 | err = jbd2_journal_bmap(journal, 0, &blocknr); | 991 | err = jbd2_journal_bmap(journal, 0, &blocknr); |
987 | /* If that failed, give up */ | 992 | /* If that failed, give up */ |
988 | if (err) { | 993 | if (err) { |
989 | printk(KERN_ERR "%s: Cannnot locate journal superblock\n", | 994 | printk(KERN_ERR "%s: Cannot locate journal superblock\n", |
990 | __func__); | 995 | __func__); |
991 | goto out_err; | 996 | goto out_err; |
992 | } | 997 | } |
@@ -1982,7 +1987,6 @@ static void jbd2_journal_destroy_jbd2_journal_head_cache(void) | |||
1982 | static struct journal_head *journal_alloc_journal_head(void) | 1987 | static struct journal_head *journal_alloc_journal_head(void) |
1983 | { | 1988 | { |
1984 | struct journal_head *ret; | 1989 | struct journal_head *ret; |
1985 | static unsigned long last_warning; | ||
1986 | 1990 | ||
1987 | #ifdef CONFIG_JBD2_DEBUG | 1991 | #ifdef CONFIG_JBD2_DEBUG |
1988 | atomic_inc(&nr_journal_heads); | 1992 | atomic_inc(&nr_journal_heads); |
@@ -1990,11 +1994,7 @@ static struct journal_head *journal_alloc_journal_head(void) | |||
1990 | ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); | 1994 | ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); |
1991 | if (!ret) { | 1995 | if (!ret) { |
1992 | jbd_debug(1, "out of memory for journal_head\n"); | 1996 | jbd_debug(1, "out of memory for journal_head\n"); |
1993 | if (time_after(jiffies, last_warning + 5*HZ)) { | 1997 | pr_notice_ratelimited("ENOMEM in %s, retrying.\n", __func__); |
1994 | printk(KERN_NOTICE "ENOMEM in %s, retrying.\n", | ||
1995 | __func__); | ||
1996 | last_warning = jiffies; | ||
1997 | } | ||
1998 | while (!ret) { | 1998 | while (!ret) { |
1999 | yield(); | 1999 | yield(); |
2000 | ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); | 2000 | ret = kmem_cache_alloc(jbd2_journal_head_cache, GFP_NOFS); |
@@ -2292,17 +2292,19 @@ static void __exit jbd2_remove_jbd_stats_proc_entry(void) | |||
2292 | 2292 | ||
2293 | #endif | 2293 | #endif |
2294 | 2294 | ||
2295 | struct kmem_cache *jbd2_handle_cache; | 2295 | struct kmem_cache *jbd2_handle_cache, *jbd2_inode_cache; |
2296 | 2296 | ||
2297 | static int __init journal_init_handle_cache(void) | 2297 | static int __init journal_init_handle_cache(void) |
2298 | { | 2298 | { |
2299 | jbd2_handle_cache = kmem_cache_create("jbd2_journal_handle", | 2299 | jbd2_handle_cache = KMEM_CACHE(jbd2_journal_handle, SLAB_TEMPORARY); |
2300 | sizeof(handle_t), | ||
2301 | 0, /* offset */ | ||
2302 | SLAB_TEMPORARY, /* flags */ | ||
2303 | NULL); /* ctor */ | ||
2304 | if (jbd2_handle_cache == NULL) { | 2300 | if (jbd2_handle_cache == NULL) { |
2305 | printk(KERN_EMERG "JBD: failed to create handle cache\n"); | 2301 | printk(KERN_EMERG "JBD2: failed to create handle cache\n"); |
2302 | return -ENOMEM; | ||
2303 | } | ||
2304 | jbd2_inode_cache = KMEM_CACHE(jbd2_inode, 0); | ||
2305 | if (jbd2_inode_cache == NULL) { | ||
2306 | printk(KERN_EMERG "JBD2: failed to create inode cache\n"); | ||
2307 | kmem_cache_destroy(jbd2_handle_cache); | ||
2306 | return -ENOMEM; | 2308 | return -ENOMEM; |
2307 | } | 2309 | } |
2308 | return 0; | 2310 | return 0; |
@@ -2312,6 +2314,9 @@ static void jbd2_journal_destroy_handle_cache(void) | |||
2312 | { | 2314 | { |
2313 | if (jbd2_handle_cache) | 2315 | if (jbd2_handle_cache) |
2314 | kmem_cache_destroy(jbd2_handle_cache); | 2316 | kmem_cache_destroy(jbd2_handle_cache); |
2317 | if (jbd2_inode_cache) | ||
2318 | kmem_cache_destroy(jbd2_inode_cache); | ||
2319 | |||
2315 | } | 2320 | } |
2316 | 2321 | ||
2317 | /* | 2322 | /* |
@@ -2408,10 +2413,12 @@ const char *jbd2_dev_to_name(dev_t device) | |||
2408 | new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); | 2413 | new_dev = kmalloc(sizeof(struct devname_cache), GFP_KERNEL); |
2409 | if (!new_dev) | 2414 | if (!new_dev) |
2410 | return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ | 2415 | return "NODEV-ALLOCFAILURE"; /* Something non-NULL */ |
2416 | bd = bdget(device); | ||
2411 | spin_lock(&devname_cache_lock); | 2417 | spin_lock(&devname_cache_lock); |
2412 | if (devcache[i]) { | 2418 | if (devcache[i]) { |
2413 | if (devcache[i]->device == device) { | 2419 | if (devcache[i]->device == device) { |
2414 | kfree(new_dev); | 2420 | kfree(new_dev); |
2421 | bdput(bd); | ||
2415 | ret = devcache[i]->devname; | 2422 | ret = devcache[i]->devname; |
2416 | spin_unlock(&devname_cache_lock); | 2423 | spin_unlock(&devname_cache_lock); |
2417 | return ret; | 2424 | return ret; |
@@ -2420,7 +2427,6 @@ const char *jbd2_dev_to_name(dev_t device) | |||
2420 | } | 2427 | } |
2421 | devcache[i] = new_dev; | 2428 | devcache[i] = new_dev; |
2422 | devcache[i]->device = device; | 2429 | devcache[i]->device = device; |
2423 | bd = bdget(device); | ||
2424 | if (bd) { | 2430 | if (bd) { |
2425 | bdevname(bd, devcache[i]->devname); | 2431 | bdevname(bd, devcache[i]->devname); |
2426 | bdput(bd); | 2432 | bdput(bd); |
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 2bc4d5f116f1..1cad869494f0 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c | |||
@@ -299,10 +299,10 @@ int jbd2_journal_skip_recovery(journal_t *journal) | |||
299 | #ifdef CONFIG_JBD2_DEBUG | 299 | #ifdef CONFIG_JBD2_DEBUG |
300 | int dropped = info.end_transaction - | 300 | int dropped = info.end_transaction - |
301 | be32_to_cpu(journal->j_superblock->s_sequence); | 301 | be32_to_cpu(journal->j_superblock->s_sequence); |
302 | #endif | ||
303 | jbd_debug(1, | 302 | jbd_debug(1, |
304 | "JBD: ignoring %d transaction%s from the journal.\n", | 303 | "JBD: ignoring %d transaction%s from the journal.\n", |
305 | dropped, (dropped == 1) ? "" : "s"); | 304 | dropped, (dropped == 1) ? "" : "s"); |
305 | #endif | ||
306 | journal->j_transaction_sequence = ++info.end_transaction; | 306 | journal->j_transaction_sequence = ++info.end_transaction; |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index 9ad321fd63fd..69fd93588118 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c | |||
@@ -71,7 +71,7 @@ | |||
71 | * switching hash tables under them. For operations on the lists of entries in | 71 | * switching hash tables under them. For operations on the lists of entries in |
72 | * the hash table j_revoke_lock is used. | 72 | * the hash table j_revoke_lock is used. |
73 | * | 73 | * |
74 | * Finally, also replay code uses the hash tables but at this moment noone else | 74 | * Finally, also replay code uses the hash tables but at this moment no one else |
75 | * can touch them (filesystem isn't mounted yet) and hence no locking is | 75 | * can touch them (filesystem isn't mounted yet) and hence no locking is |
76 | * needed. | 76 | * needed. |
77 | */ | 77 | */ |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 6bf0a242613e..05fa77a23711 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -117,10 +117,10 @@ static inline void update_t_max_wait(transaction_t *transaction) | |||
117 | static int start_this_handle(journal_t *journal, handle_t *handle, | 117 | static int start_this_handle(journal_t *journal, handle_t *handle, |
118 | int gfp_mask) | 118 | int gfp_mask) |
119 | { | 119 | { |
120 | transaction_t *transaction; | 120 | transaction_t *transaction, *new_transaction = NULL; |
121 | int needed; | 121 | tid_t tid; |
122 | int nblocks = handle->h_buffer_credits; | 122 | int needed, need_to_start; |
123 | transaction_t *new_transaction = NULL; | 123 | int nblocks = handle->h_buffer_credits; |
124 | 124 | ||
125 | if (nblocks > journal->j_max_transaction_buffers) { | 125 | if (nblocks > journal->j_max_transaction_buffers) { |
126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", | 126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", |
@@ -222,8 +222,11 @@ repeat: | |||
222 | atomic_sub(nblocks, &transaction->t_outstanding_credits); | 222 | atomic_sub(nblocks, &transaction->t_outstanding_credits); |
223 | prepare_to_wait(&journal->j_wait_transaction_locked, &wait, | 223 | prepare_to_wait(&journal->j_wait_transaction_locked, &wait, |
224 | TASK_UNINTERRUPTIBLE); | 224 | TASK_UNINTERRUPTIBLE); |
225 | __jbd2_log_start_commit(journal, transaction->t_tid); | 225 | tid = transaction->t_tid; |
226 | need_to_start = !tid_geq(journal->j_commit_request, tid); | ||
226 | read_unlock(&journal->j_state_lock); | 227 | read_unlock(&journal->j_state_lock); |
228 | if (need_to_start) | ||
229 | jbd2_log_start_commit(journal, tid); | ||
227 | schedule(); | 230 | schedule(); |
228 | finish_wait(&journal->j_wait_transaction_locked, &wait); | 231 | finish_wait(&journal->j_wait_transaction_locked, &wait); |
229 | goto repeat; | 232 | goto repeat; |
@@ -251,7 +254,7 @@ repeat: | |||
251 | * the committing transaction. Really, we only need to give it | 254 | * the committing transaction. Really, we only need to give it |
252 | * committing_transaction->t_outstanding_credits plus "enough" for | 255 | * committing_transaction->t_outstanding_credits plus "enough" for |
253 | * the log control blocks. | 256 | * the log control blocks. |
254 | * Also, this test is inconsitent with the matching one in | 257 | * Also, this test is inconsistent with the matching one in |
255 | * jbd2_journal_extend(). | 258 | * jbd2_journal_extend(). |
256 | */ | 259 | */ |
257 | if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { | 260 | if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { |
@@ -340,9 +343,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask) | |||
340 | jbd2_free_handle(handle); | 343 | jbd2_free_handle(handle); |
341 | current->journal_info = NULL; | 344 | current->journal_info = NULL; |
342 | handle = ERR_PTR(err); | 345 | handle = ERR_PTR(err); |
343 | goto out; | ||
344 | } | 346 | } |
345 | out: | ||
346 | return handle; | 347 | return handle; |
347 | } | 348 | } |
348 | EXPORT_SYMBOL(jbd2__journal_start); | 349 | EXPORT_SYMBOL(jbd2__journal_start); |
@@ -444,7 +445,8 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) | |||
444 | { | 445 | { |
445 | transaction_t *transaction = handle->h_transaction; | 446 | transaction_t *transaction = handle->h_transaction; |
446 | journal_t *journal = transaction->t_journal; | 447 | journal_t *journal = transaction->t_journal; |
447 | int ret; | 448 | tid_t tid; |
449 | int need_to_start, ret; | ||
448 | 450 | ||
449 | /* If we've had an abort of any type, don't even think about | 451 | /* If we've had an abort of any type, don't even think about |
450 | * actually doing the restart! */ | 452 | * actually doing the restart! */ |
@@ -467,8 +469,11 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) | |||
467 | spin_unlock(&transaction->t_handle_lock); | 469 | spin_unlock(&transaction->t_handle_lock); |
468 | 470 | ||
469 | jbd_debug(2, "restarting handle %p\n", handle); | 471 | jbd_debug(2, "restarting handle %p\n", handle); |
470 | __jbd2_log_start_commit(journal, transaction->t_tid); | 472 | tid = transaction->t_tid; |
473 | need_to_start = !tid_geq(journal->j_commit_request, tid); | ||
471 | read_unlock(&journal->j_state_lock); | 474 | read_unlock(&journal->j_state_lock); |
475 | if (need_to_start) | ||
476 | jbd2_log_start_commit(journal, tid); | ||
472 | 477 | ||
473 | lock_map_release(&handle->h_lockdep_map); | 478 | lock_map_release(&handle->h_lockdep_map); |
474 | handle->h_buffer_credits = nblocks; | 479 | handle->h_buffer_credits = nblocks; |
@@ -589,7 +594,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, | |||
589 | transaction = handle->h_transaction; | 594 | transaction = handle->h_transaction; |
590 | journal = transaction->t_journal; | 595 | journal = transaction->t_journal; |
591 | 596 | ||
592 | jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy); | 597 | jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); |
593 | 598 | ||
594 | JBUFFER_TRACE(jh, "entry"); | 599 | JBUFFER_TRACE(jh, "entry"); |
595 | repeat: | 600 | repeat: |
@@ -774,7 +779,7 @@ done: | |||
774 | J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), | 779 | J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), |
775 | "Possible IO failure.\n"); | 780 | "Possible IO failure.\n"); |
776 | page = jh2bh(jh)->b_page; | 781 | page = jh2bh(jh)->b_page; |
777 | offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK; | 782 | offset = offset_in_page(jh2bh(jh)->b_data); |
778 | source = kmap_atomic(page, KM_USER0); | 783 | source = kmap_atomic(page, KM_USER0); |
779 | /* Fire data frozen trigger just before we copy the data */ | 784 | /* Fire data frozen trigger just before we copy the data */ |
780 | jbd2_buffer_frozen_trigger(jh, source + offset, | 785 | jbd2_buffer_frozen_trigger(jh, source + offset, |
@@ -1398,7 +1403,7 @@ int jbd2_journal_stop(handle_t *handle) | |||
1398 | 1403 | ||
1399 | /* | 1404 | /* |
1400 | * Once we drop t_updates, if it goes to zero the transaction | 1405 | * Once we drop t_updates, if it goes to zero the transaction |
1401 | * could start commiting on us and eventually disappear. So | 1406 | * could start committing on us and eventually disappear. So |
1402 | * once we do this, we must not dereference transaction | 1407 | * once we do this, we must not dereference transaction |
1403 | * pointer again. | 1408 | * pointer again. |
1404 | */ | 1409 | */ |