aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/super.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2009-01-05 22:19:52 -0500
committerTheodore Ts'o <tytso@mit.edu>2009-01-05 22:19:52 -0500
commit5d1b1b3f492f8696ea18950a454a141381b0f926 (patch)
treee6277cd3e01c074403b9da7390de1daa6b9f248f /fs/ext4/super.c
parentb7be019e80da4db96d283734d55366014509911c (diff)
ext4: fix BUG when calling ext4_error with locked block group
The mballoc code likes to call ext4_error while it is holding locked block groups. This can causes a scheduling in atomic context BUG. We can't just unlock the block group and relock it after/if ext4_error returns since that might result in race conditions in the case where the filesystem is set to continue after finding errors. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/super.c')
-rw-r--r--fs/ext4/super.c45
1 files changed, 43 insertions, 2 deletions
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a9dd1170bfea..2415e2b09707 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -366,6 +366,44 @@ void ext4_warning(struct super_block *sb, const char *function,
366 va_end(args); 366 va_end(args);
367} 367}
368 368
369void ext4_grp_locked_error(struct super_block *sb, ext4_group_t grp,
370 const char *function, const char *fmt, ...)
371__releases(bitlock)
372__acquires(bitlock)
373{
374 va_list args;
375 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
376
377 va_start(args, fmt);
378 printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
379 vprintk(fmt, args);
380 printk("\n");
381 va_end(args);
382
383 if (test_opt(sb, ERRORS_CONT)) {
384 EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
385 es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
386 ext4_commit_super(sb, es, 0);
387 return;
388 }
389 ext4_unlock_group(sb, grp);
390 ext4_handle_error(sb);
391 /*
392 * We only get here in the ERRORS_RO case; relocking the group
393 * may be dangerous, but nothing bad will happen since the
394 * filesystem will have already been marked read/only and the
395 * journal has been aborted. We return 1 as a hint to callers
396 * who might what to use the return value from
397 * ext4_grp_locked_error() to distinguish beween the
398 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
399 * aggressively from the ext4 function in question, with a
400 * more appropriate error code.
401 */
402 ext4_lock_group(sb, grp);
403 return;
404}
405
406
369void ext4_update_dynamic_rev(struct super_block *sb) 407void ext4_update_dynamic_rev(struct super_block *sb)
370{ 408{
371 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 409 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
@@ -2868,8 +2906,11 @@ static void ext4_commit_super(struct super_block *sb,
2868 set_buffer_uptodate(sbh); 2906 set_buffer_uptodate(sbh);
2869 } 2907 }
2870 es->s_wtime = cpu_to_le32(get_seconds()); 2908 es->s_wtime = cpu_to_le32(get_seconds());
2871 ext4_free_blocks_count_set(es, ext4_count_free_blocks(sb)); 2909 ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
2872 es->s_free_inodes_count = cpu_to_le32(ext4_count_free_inodes(sb)); 2910 &EXT4_SB(sb)->s_freeblocks_counter));
2911 es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
2912 &EXT4_SB(sb)->s_freeinodes_counter));
2913
2873 BUFFER_TRACE(sbh, "marking dirty"); 2914 BUFFER_TRACE(sbh, "marking dirty");
2874 mark_buffer_dirty(sbh); 2915 mark_buffer_dirty(sbh);
2875 if (sync) { 2916 if (sync) {