diff options
author | Lukas Czerner <lczerner@redhat.com> | 2010-10-27 21:30:05 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2010-10-27 21:30:05 -0400 |
commit | bfff68738f1cb5c93dab1114634cea02aae9e7ba (patch) | |
tree | b6cdf3f26e86464c7088cab62d837eb32f559fb9 /fs/ext4/super.c | |
parent | e6fa0be699449d28a20e815bfe9ce26725ec4962 (diff) |
ext4: add support for lazy inode table initialization
When the lazy_itable_init extended option is passed to mke2fs, it
considerably speeds up filesystem creation because inode tables are
not zeroed out. The fact that parts of the inode table are
uninitialized is not a problem so long as the block group descriptors,
which contain information regarding how much of the inode table has
been initialized, has not been corrupted However, if the block group
checksums are not valid, e2fsck must scan the entire inode table, and
the the old, uninitialized data could potentially cause e2fsck to
report false problems.
Hence, it is important for the inode tables to be initialized as soon
as possble. This commit adds this feature so that mke2fs can safely
use the lazy inode table initialization feature to speed up formatting
file systems.
This is done via a new new kernel thread called ext4lazyinit, which is
created on demand and destroyed, when it is no longer needed. There
is only one thread for all ext4 filesystems in the system. When the
first filesystem with inititable mount option is mounted, ext4lazyinit
thread is created, then the filesystem can register its request in the
request list.
This thread then walks through the list of requests picking up
scheduled requests and invoking ext4_init_inode_table(). Next schedule
time for the request is computed by multiplying the time it took to
zero out last inode table with wait multiplier, which can be set with
the (init_itable=n) mount option (default is 10). We are doing
this so we do not take the whole I/O bandwidth. When the thread is no
longer necessary (request list is empty) it frees the appropriate
structures and exits (and can be created later later by another
filesystem).
We do not disturb regular inode allocations in any way, it just do not
care whether the inode table is, or is not zeroed. But when zeroing, we
have to skip used inodes, obviously. Also we should prevent new inode
allocations from the group, while zeroing is on the way. For that we
take write alloc_sem lock in ext4_init_inode_table() and read alloc_sem
in the ext4_claim_inode, so when we are unlucky and allocator hits the
group which is currently being zeroed, it just has to wait.
This can be suppresed using the mount option no_init_itable.
Signed-off-by: Lukas Czerner <lczerner@redhat.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/super.c')
-rw-r--r-- | fs/ext4/super.c | 440 |
1 files changed, 437 insertions, 3 deletions
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 751997d2cefe..5066537e5a38 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -41,6 +41,9 @@ | |||
41 | #include <linux/crc16.h> | 41 | #include <linux/crc16.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | 43 | ||
44 | #include <linux/kthread.h> | ||
45 | #include <linux/freezer.h> | ||
46 | |||
44 | #include "ext4.h" | 47 | #include "ext4.h" |
45 | #include "ext4_jbd2.h" | 48 | #include "ext4_jbd2.h" |
46 | #include "xattr.h" | 49 | #include "xattr.h" |
@@ -52,6 +55,8 @@ | |||
52 | 55 | ||
53 | struct proc_dir_entry *ext4_proc_root; | 56 | struct proc_dir_entry *ext4_proc_root; |
54 | static struct kset *ext4_kset; | 57 | static struct kset *ext4_kset; |
58 | struct ext4_lazy_init *ext4_li_info; | ||
59 | struct mutex ext4_li_mtx; | ||
55 | 60 | ||
56 | static int ext4_load_journal(struct super_block *, struct ext4_super_block *, | 61 | static int ext4_load_journal(struct super_block *, struct ext4_super_block *, |
57 | unsigned long journal_devnum); | 62 | unsigned long journal_devnum); |
@@ -70,6 +75,8 @@ static void ext4_write_super(struct super_block *sb); | |||
70 | static int ext4_freeze(struct super_block *sb); | 75 | static int ext4_freeze(struct super_block *sb); |
71 | static int ext4_get_sb(struct file_system_type *fs_type, int flags, | 76 | static int ext4_get_sb(struct file_system_type *fs_type, int flags, |
72 | const char *dev_name, void *data, struct vfsmount *mnt); | 77 | const char *dev_name, void *data, struct vfsmount *mnt); |
78 | static void ext4_destroy_lazyinit_thread(void); | ||
79 | static void ext4_unregister_li_request(struct super_block *sb); | ||
73 | 80 | ||
74 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 81 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
75 | static struct file_system_type ext3_fs_type = { | 82 | static struct file_system_type ext3_fs_type = { |
@@ -720,6 +727,7 @@ static void ext4_put_super(struct super_block *sb) | |||
720 | } | 727 | } |
721 | 728 | ||
722 | del_timer(&sbi->s_err_report); | 729 | del_timer(&sbi->s_err_report); |
730 | ext4_unregister_li_request(sb); | ||
723 | ext4_release_system_zone(sb); | 731 | ext4_release_system_zone(sb); |
724 | ext4_mb_release(sb); | 732 | ext4_mb_release(sb); |
725 | ext4_ext_release(sb); | 733 | ext4_ext_release(sb); |
@@ -1046,6 +1054,12 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
1046 | !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)) | 1054 | !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)) |
1047 | seq_puts(seq, ",block_validity"); | 1055 | seq_puts(seq, ",block_validity"); |
1048 | 1056 | ||
1057 | if (!test_opt(sb, INIT_INODE_TABLE)) | ||
1058 | seq_puts(seq, ",noinit_inode_table"); | ||
1059 | else if (sbi->s_li_wait_mult) | ||
1060 | seq_printf(seq, ",init_inode_table=%u", | ||
1061 | (unsigned) sbi->s_li_wait_mult); | ||
1062 | |||
1049 | ext4_show_quota_options(seq, sb); | 1063 | ext4_show_quota_options(seq, sb); |
1050 | 1064 | ||
1051 | return 0; | 1065 | return 0; |
@@ -1220,6 +1234,7 @@ enum { | |||
1220 | Opt_inode_readahead_blks, Opt_journal_ioprio, | 1234 | Opt_inode_readahead_blks, Opt_journal_ioprio, |
1221 | Opt_dioread_nolock, Opt_dioread_lock, | 1235 | Opt_dioread_nolock, Opt_dioread_lock, |
1222 | Opt_discard, Opt_nodiscard, | 1236 | Opt_discard, Opt_nodiscard, |
1237 | Opt_init_inode_table, Opt_noinit_inode_table, | ||
1223 | }; | 1238 | }; |
1224 | 1239 | ||
1225 | static const match_table_t tokens = { | 1240 | static const match_table_t tokens = { |
@@ -1290,6 +1305,9 @@ static const match_table_t tokens = { | |||
1290 | {Opt_dioread_lock, "dioread_lock"}, | 1305 | {Opt_dioread_lock, "dioread_lock"}, |
1291 | {Opt_discard, "discard"}, | 1306 | {Opt_discard, "discard"}, |
1292 | {Opt_nodiscard, "nodiscard"}, | 1307 | {Opt_nodiscard, "nodiscard"}, |
1308 | {Opt_init_inode_table, "init_itable=%u"}, | ||
1309 | {Opt_init_inode_table, "init_itable"}, | ||
1310 | {Opt_noinit_inode_table, "noinit_itable"}, | ||
1293 | {Opt_err, NULL}, | 1311 | {Opt_err, NULL}, |
1294 | }; | 1312 | }; |
1295 | 1313 | ||
@@ -1760,6 +1778,20 @@ set_qf_format: | |||
1760 | case Opt_dioread_lock: | 1778 | case Opt_dioread_lock: |
1761 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); | 1779 | clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK); |
1762 | break; | 1780 | break; |
1781 | case Opt_init_inode_table: | ||
1782 | set_opt(sbi->s_mount_opt, INIT_INODE_TABLE); | ||
1783 | if (args[0].from) { | ||
1784 | if (match_int(&args[0], &option)) | ||
1785 | return 0; | ||
1786 | } else | ||
1787 | option = EXT4_DEF_LI_WAIT_MULT; | ||
1788 | if (option < 0) | ||
1789 | return 0; | ||
1790 | sbi->s_li_wait_mult = option; | ||
1791 | break; | ||
1792 | case Opt_noinit_inode_table: | ||
1793 | clear_opt(sbi->s_mount_opt, INIT_INODE_TABLE); | ||
1794 | break; | ||
1763 | default: | 1795 | default: |
1764 | ext4_msg(sb, KERN_ERR, | 1796 | ext4_msg(sb, KERN_ERR, |
1765 | "Unrecognized mount option \"%s\" " | 1797 | "Unrecognized mount option \"%s\" " |
@@ -1943,7 +1975,8 @@ int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group, | |||
1943 | } | 1975 | } |
1944 | 1976 | ||
1945 | /* Called at mount-time, super-block is locked */ | 1977 | /* Called at mount-time, super-block is locked */ |
1946 | static int ext4_check_descriptors(struct super_block *sb) | 1978 | static int ext4_check_descriptors(struct super_block *sb, |
1979 | ext4_group_t *first_not_zeroed) | ||
1947 | { | 1980 | { |
1948 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 1981 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
1949 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); | 1982 | ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); |
@@ -1952,7 +1985,7 @@ static int ext4_check_descriptors(struct super_block *sb) | |||
1952 | ext4_fsblk_t inode_bitmap; | 1985 | ext4_fsblk_t inode_bitmap; |
1953 | ext4_fsblk_t inode_table; | 1986 | ext4_fsblk_t inode_table; |
1954 | int flexbg_flag = 0; | 1987 | int flexbg_flag = 0; |
1955 | ext4_group_t i; | 1988 | ext4_group_t i, grp = sbi->s_groups_count; |
1956 | 1989 | ||
1957 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) | 1990 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) |
1958 | flexbg_flag = 1; | 1991 | flexbg_flag = 1; |
@@ -1968,6 +2001,10 @@ static int ext4_check_descriptors(struct super_block *sb) | |||
1968 | last_block = first_block + | 2001 | last_block = first_block + |
1969 | (EXT4_BLOCKS_PER_GROUP(sb) - 1); | 2002 | (EXT4_BLOCKS_PER_GROUP(sb) - 1); |
1970 | 2003 | ||
2004 | if ((grp == sbi->s_groups_count) && | ||
2005 | !(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) | ||
2006 | grp = i; | ||
2007 | |||
1971 | block_bitmap = ext4_block_bitmap(sb, gdp); | 2008 | block_bitmap = ext4_block_bitmap(sb, gdp); |
1972 | if (block_bitmap < first_block || block_bitmap > last_block) { | 2009 | if (block_bitmap < first_block || block_bitmap > last_block) { |
1973 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " | 2010 | ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " |
@@ -2005,6 +2042,8 @@ static int ext4_check_descriptors(struct super_block *sb) | |||
2005 | if (!flexbg_flag) | 2042 | if (!flexbg_flag) |
2006 | first_block += EXT4_BLOCKS_PER_GROUP(sb); | 2043 | first_block += EXT4_BLOCKS_PER_GROUP(sb); |
2007 | } | 2044 | } |
2045 | if (NULL != first_not_zeroed) | ||
2046 | *first_not_zeroed = grp; | ||
2008 | 2047 | ||
2009 | ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb)); | 2048 | ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb)); |
2010 | sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb)); | 2049 | sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb)); |
@@ -2543,6 +2582,378 @@ static void print_daily_error_info(unsigned long arg) | |||
2543 | mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ | 2582 | mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ |
2544 | } | 2583 | } |
2545 | 2584 | ||
2585 | static void ext4_lazyinode_timeout(unsigned long data) | ||
2586 | { | ||
2587 | struct task_struct *p = (struct task_struct *)data; | ||
2588 | wake_up_process(p); | ||
2589 | } | ||
2590 | |||
2591 | /* Find next suitable group and run ext4_init_inode_table */ | ||
2592 | static int ext4_run_li_request(struct ext4_li_request *elr) | ||
2593 | { | ||
2594 | struct ext4_group_desc *gdp = NULL; | ||
2595 | ext4_group_t group, ngroups; | ||
2596 | struct super_block *sb; | ||
2597 | unsigned long timeout = 0; | ||
2598 | int ret = 0; | ||
2599 | |||
2600 | sb = elr->lr_super; | ||
2601 | ngroups = EXT4_SB(sb)->s_groups_count; | ||
2602 | |||
2603 | for (group = elr->lr_next_group; group < ngroups; group++) { | ||
2604 | gdp = ext4_get_group_desc(sb, group, NULL); | ||
2605 | if (!gdp) { | ||
2606 | ret = 1; | ||
2607 | break; | ||
2608 | } | ||
2609 | |||
2610 | if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) | ||
2611 | break; | ||
2612 | } | ||
2613 | |||
2614 | if (group == ngroups) | ||
2615 | ret = 1; | ||
2616 | |||
2617 | if (!ret) { | ||
2618 | timeout = jiffies; | ||
2619 | ret = ext4_init_inode_table(sb, group, | ||
2620 | elr->lr_timeout ? 0 : 1); | ||
2621 | if (elr->lr_timeout == 0) { | ||
2622 | timeout = jiffies - timeout; | ||
2623 | if (elr->lr_sbi->s_li_wait_mult) | ||
2624 | timeout *= elr->lr_sbi->s_li_wait_mult; | ||
2625 | else | ||
2626 | timeout *= 20; | ||
2627 | elr->lr_timeout = timeout; | ||
2628 | } | ||
2629 | elr->lr_next_sched = jiffies + elr->lr_timeout; | ||
2630 | elr->lr_next_group = group + 1; | ||
2631 | } | ||
2632 | |||
2633 | return ret; | ||
2634 | } | ||
2635 | |||
2636 | /* | ||
2637 | * Remove lr_request from the list_request and free the | ||
2638 | * request tructure. Should be called with li_list_mtx held | ||
2639 | */ | ||
2640 | static void ext4_remove_li_request(struct ext4_li_request *elr) | ||
2641 | { | ||
2642 | struct ext4_sb_info *sbi; | ||
2643 | |||
2644 | if (!elr) | ||
2645 | return; | ||
2646 | |||
2647 | sbi = elr->lr_sbi; | ||
2648 | |||
2649 | list_del(&elr->lr_request); | ||
2650 | sbi->s_li_request = NULL; | ||
2651 | kfree(elr); | ||
2652 | } | ||
2653 | |||
2654 | static void ext4_unregister_li_request(struct super_block *sb) | ||
2655 | { | ||
2656 | struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request; | ||
2657 | |||
2658 | if (!ext4_li_info) | ||
2659 | return; | ||
2660 | |||
2661 | mutex_lock(&ext4_li_info->li_list_mtx); | ||
2662 | ext4_remove_li_request(elr); | ||
2663 | mutex_unlock(&ext4_li_info->li_list_mtx); | ||
2664 | } | ||
2665 | |||
2666 | /* | ||
2667 | * This is the function where ext4lazyinit thread lives. It walks | ||
2668 | * through the request list searching for next scheduled filesystem. | ||
2669 | * When such a fs is found, run the lazy initialization request | ||
2670 | * (ext4_rn_li_request) and keep track of the time spend in this | ||
2671 | * function. Based on that time we compute next schedule time of | ||
2672 | * the request. When walking through the list is complete, compute | ||
2673 | * next waking time and put itself into sleep. | ||
2674 | */ | ||
2675 | static int ext4_lazyinit_thread(void *arg) | ||
2676 | { | ||
2677 | struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; | ||
2678 | struct list_head *pos, *n; | ||
2679 | struct ext4_li_request *elr; | ||
2680 | unsigned long next_wakeup; | ||
2681 | DEFINE_WAIT(wait); | ||
2682 | int ret; | ||
2683 | |||
2684 | BUG_ON(NULL == eli); | ||
2685 | |||
2686 | eli->li_timer.data = (unsigned long)current; | ||
2687 | eli->li_timer.function = ext4_lazyinode_timeout; | ||
2688 | |||
2689 | eli->li_task = current; | ||
2690 | wake_up(&eli->li_wait_task); | ||
2691 | |||
2692 | cont_thread: | ||
2693 | while (true) { | ||
2694 | next_wakeup = MAX_JIFFY_OFFSET; | ||
2695 | |||
2696 | mutex_lock(&eli->li_list_mtx); | ||
2697 | if (list_empty(&eli->li_request_list)) { | ||
2698 | mutex_unlock(&eli->li_list_mtx); | ||
2699 | goto exit_thread; | ||
2700 | } | ||
2701 | |||
2702 | list_for_each_safe(pos, n, &eli->li_request_list) { | ||
2703 | elr = list_entry(pos, struct ext4_li_request, | ||
2704 | lr_request); | ||
2705 | |||
2706 | if (time_after_eq(jiffies, elr->lr_next_sched)) | ||
2707 | ret = ext4_run_li_request(elr); | ||
2708 | |||
2709 | if (ret) { | ||
2710 | ret = 0; | ||
2711 | ext4_remove_li_request(elr); | ||
2712 | continue; | ||
2713 | } | ||
2714 | |||
2715 | if (time_before(elr->lr_next_sched, next_wakeup)) | ||
2716 | next_wakeup = elr->lr_next_sched; | ||
2717 | } | ||
2718 | mutex_unlock(&eli->li_list_mtx); | ||
2719 | |||
2720 | if (freezing(current)) | ||
2721 | refrigerator(); | ||
2722 | |||
2723 | if (time_after_eq(jiffies, next_wakeup)) { | ||
2724 | cond_resched(); | ||
2725 | continue; | ||
2726 | } | ||
2727 | |||
2728 | eli->li_timer.expires = next_wakeup; | ||
2729 | add_timer(&eli->li_timer); | ||
2730 | prepare_to_wait(&eli->li_wait_daemon, &wait, | ||
2731 | TASK_INTERRUPTIBLE); | ||
2732 | if (time_before(jiffies, next_wakeup)) | ||
2733 | schedule(); | ||
2734 | finish_wait(&eli->li_wait_daemon, &wait); | ||
2735 | } | ||
2736 | |||
2737 | exit_thread: | ||
2738 | /* | ||
2739 | * It looks like the request list is empty, but we need | ||
2740 | * to check it under the li_list_mtx lock, to prevent any | ||
2741 | * additions into it, and of course we should lock ext4_li_mtx | ||
2742 | * to atomically free the list and ext4_li_info, because at | ||
2743 | * this point another ext4 filesystem could be registering | ||
2744 | * new one. | ||
2745 | */ | ||
2746 | mutex_lock(&ext4_li_mtx); | ||
2747 | mutex_lock(&eli->li_list_mtx); | ||
2748 | if (!list_empty(&eli->li_request_list)) { | ||
2749 | mutex_unlock(&eli->li_list_mtx); | ||
2750 | mutex_unlock(&ext4_li_mtx); | ||
2751 | goto cont_thread; | ||
2752 | } | ||
2753 | mutex_unlock(&eli->li_list_mtx); | ||
2754 | del_timer_sync(&ext4_li_info->li_timer); | ||
2755 | eli->li_task = NULL; | ||
2756 | wake_up(&eli->li_wait_task); | ||
2757 | |||
2758 | kfree(ext4_li_info); | ||
2759 | ext4_li_info = NULL; | ||
2760 | mutex_unlock(&ext4_li_mtx); | ||
2761 | |||
2762 | return 0; | ||
2763 | } | ||
2764 | |||
2765 | static void ext4_clear_request_list(void) | ||
2766 | { | ||
2767 | struct list_head *pos, *n; | ||
2768 | struct ext4_li_request *elr; | ||
2769 | |||
2770 | mutex_lock(&ext4_li_info->li_list_mtx); | ||
2771 | if (list_empty(&ext4_li_info->li_request_list)) | ||
2772 | return; | ||
2773 | |||
2774 | list_for_each_safe(pos, n, &ext4_li_info->li_request_list) { | ||
2775 | elr = list_entry(pos, struct ext4_li_request, | ||
2776 | lr_request); | ||
2777 | ext4_remove_li_request(elr); | ||
2778 | } | ||
2779 | mutex_unlock(&ext4_li_info->li_list_mtx); | ||
2780 | } | ||
2781 | |||
2782 | static int ext4_run_lazyinit_thread(void) | ||
2783 | { | ||
2784 | struct task_struct *t; | ||
2785 | |||
2786 | t = kthread_run(ext4_lazyinit_thread, ext4_li_info, "ext4lazyinit"); | ||
2787 | if (IS_ERR(t)) { | ||
2788 | int err = PTR_ERR(t); | ||
2789 | ext4_clear_request_list(); | ||
2790 | del_timer_sync(&ext4_li_info->li_timer); | ||
2791 | kfree(ext4_li_info); | ||
2792 | ext4_li_info = NULL; | ||
2793 | printk(KERN_CRIT "EXT4: error %d creating inode table " | ||
2794 | "initialization thread\n", | ||
2795 | err); | ||
2796 | return err; | ||
2797 | } | ||
2798 | ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; | ||
2799 | |||
2800 | wait_event(ext4_li_info->li_wait_task, ext4_li_info->li_task != NULL); | ||
2801 | return 0; | ||
2802 | } | ||
2803 | |||
2804 | /* | ||
2805 | * Check whether it make sense to run itable init. thread or not. | ||
2806 | * If there is at least one uninitialized inode table, return | ||
2807 | * corresponding group number, else the loop goes through all | ||
2808 | * groups and return total number of groups. | ||
2809 | */ | ||
2810 | static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) | ||
2811 | { | ||
2812 | ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; | ||
2813 | struct ext4_group_desc *gdp = NULL; | ||
2814 | |||
2815 | for (group = 0; group < ngroups; group++) { | ||
2816 | gdp = ext4_get_group_desc(sb, group, NULL); | ||
2817 | if (!gdp) | ||
2818 | continue; | ||
2819 | |||
2820 | if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) | ||
2821 | break; | ||
2822 | } | ||
2823 | |||
2824 | return group; | ||
2825 | } | ||
2826 | |||
2827 | static int ext4_li_info_new(void) | ||
2828 | { | ||
2829 | struct ext4_lazy_init *eli = NULL; | ||
2830 | |||
2831 | eli = kzalloc(sizeof(*eli), GFP_KERNEL); | ||
2832 | if (!eli) | ||
2833 | return -ENOMEM; | ||
2834 | |||
2835 | eli->li_task = NULL; | ||
2836 | INIT_LIST_HEAD(&eli->li_request_list); | ||
2837 | mutex_init(&eli->li_list_mtx); | ||
2838 | |||
2839 | init_waitqueue_head(&eli->li_wait_daemon); | ||
2840 | init_waitqueue_head(&eli->li_wait_task); | ||
2841 | init_timer(&eli->li_timer); | ||
2842 | eli->li_state |= EXT4_LAZYINIT_QUIT; | ||
2843 | |||
2844 | ext4_li_info = eli; | ||
2845 | |||
2846 | return 0; | ||
2847 | } | ||
2848 | |||
2849 | static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, | ||
2850 | ext4_group_t start) | ||
2851 | { | ||
2852 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
2853 | struct ext4_li_request *elr; | ||
2854 | unsigned long rnd; | ||
2855 | |||
2856 | elr = kzalloc(sizeof(*elr), GFP_KERNEL); | ||
2857 | if (!elr) | ||
2858 | return NULL; | ||
2859 | |||
2860 | elr->lr_super = sb; | ||
2861 | elr->lr_sbi = sbi; | ||
2862 | elr->lr_next_group = start; | ||
2863 | |||
2864 | /* | ||
2865 | * Randomize first schedule time of the request to | ||
2866 | * spread the inode table initialization requests | ||
2867 | * better. | ||
2868 | */ | ||
2869 | get_random_bytes(&rnd, sizeof(rnd)); | ||
2870 | elr->lr_next_sched = jiffies + (unsigned long)rnd % | ||
2871 | (EXT4_DEF_LI_MAX_START_DELAY * HZ); | ||
2872 | |||
2873 | return elr; | ||
2874 | } | ||
2875 | |||
2876 | static int ext4_register_li_request(struct super_block *sb, | ||
2877 | ext4_group_t first_not_zeroed) | ||
2878 | { | ||
2879 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
2880 | struct ext4_li_request *elr; | ||
2881 | ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; | ||
2882 | int ret = 0; | ||
2883 | |||
2884 | if (sbi->s_li_request != NULL) | ||
2885 | goto out; | ||
2886 | |||
2887 | if (first_not_zeroed == ngroups || | ||
2888 | (sb->s_flags & MS_RDONLY) || | ||
2889 | !test_opt(sb, INIT_INODE_TABLE)) { | ||
2890 | sbi->s_li_request = NULL; | ||
2891 | goto out; | ||
2892 | } | ||
2893 | |||
2894 | if (first_not_zeroed == ngroups) { | ||
2895 | sbi->s_li_request = NULL; | ||
2896 | goto out; | ||
2897 | } | ||
2898 | |||
2899 | elr = ext4_li_request_new(sb, first_not_zeroed); | ||
2900 | if (!elr) { | ||
2901 | ret = -ENOMEM; | ||
2902 | goto out; | ||
2903 | } | ||
2904 | |||
2905 | mutex_lock(&ext4_li_mtx); | ||
2906 | |||
2907 | if (NULL == ext4_li_info) { | ||
2908 | ret = ext4_li_info_new(); | ||
2909 | if (ret) | ||
2910 | goto out; | ||
2911 | } | ||
2912 | |||
2913 | mutex_lock(&ext4_li_info->li_list_mtx); | ||
2914 | list_add(&elr->lr_request, &ext4_li_info->li_request_list); | ||
2915 | mutex_unlock(&ext4_li_info->li_list_mtx); | ||
2916 | |||
2917 | sbi->s_li_request = elr; | ||
2918 | |||
2919 | if (!(ext4_li_info->li_state & EXT4_LAZYINIT_RUNNING)) { | ||
2920 | ret = ext4_run_lazyinit_thread(); | ||
2921 | if (ret) | ||
2922 | goto out; | ||
2923 | } | ||
2924 | |||
2925 | mutex_unlock(&ext4_li_mtx); | ||
2926 | |||
2927 | out: | ||
2928 | if (ret) { | ||
2929 | mutex_unlock(&ext4_li_mtx); | ||
2930 | kfree(elr); | ||
2931 | } | ||
2932 | return ret; | ||
2933 | } | ||
2934 | |||
2935 | /* | ||
2936 | * We do not need to lock anything since this is called on | ||
2937 | * module unload. | ||
2938 | */ | ||
2939 | static void ext4_destroy_lazyinit_thread(void) | ||
2940 | { | ||
2941 | /* | ||
2942 | * If thread exited earlier | ||
2943 | * there's nothing to be done. | ||
2944 | */ | ||
2945 | if (!ext4_li_info) | ||
2946 | return; | ||
2947 | |||
2948 | ext4_clear_request_list(); | ||
2949 | |||
2950 | while (ext4_li_info->li_task) { | ||
2951 | wake_up(&ext4_li_info->li_wait_daemon); | ||
2952 | wait_event(ext4_li_info->li_wait_task, | ||
2953 | ext4_li_info->li_task == NULL); | ||
2954 | } | ||
2955 | } | ||
2956 | |||
2546 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) | 2957 | static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
2547 | __releases(kernel_lock) | 2958 | __releases(kernel_lock) |
2548 | __acquires(kernel_lock) | 2959 | __acquires(kernel_lock) |
@@ -2568,6 +2979,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2568 | __u64 blocks_count; | 2979 | __u64 blocks_count; |
2569 | int err; | 2980 | int err; |
2570 | unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; | 2981 | unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; |
2982 | ext4_group_t first_not_zeroed; | ||
2571 | 2983 | ||
2572 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); | 2984 | sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); |
2573 | if (!sbi) | 2985 | if (!sbi) |
@@ -2630,6 +3042,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2630 | 3042 | ||
2631 | /* Set defaults before we parse the mount options */ | 3043 | /* Set defaults before we parse the mount options */ |
2632 | def_mount_opts = le32_to_cpu(es->s_default_mount_opts); | 3044 | def_mount_opts = le32_to_cpu(es->s_default_mount_opts); |
3045 | set_opt(sbi->s_mount_opt, INIT_INODE_TABLE); | ||
2633 | if (def_mount_opts & EXT4_DEFM_DEBUG) | 3046 | if (def_mount_opts & EXT4_DEFM_DEBUG) |
2634 | set_opt(sbi->s_mount_opt, DEBUG); | 3047 | set_opt(sbi->s_mount_opt, DEBUG); |
2635 | if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { | 3048 | if (def_mount_opts & EXT4_DEFM_BSDGROUPS) { |
@@ -2909,7 +3322,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
2909 | goto failed_mount2; | 3322 | goto failed_mount2; |
2910 | } | 3323 | } |
2911 | } | 3324 | } |
2912 | if (!ext4_check_descriptors(sb)) { | 3325 | if (!ext4_check_descriptors(sb, &first_not_zeroed)) { |
2913 | ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); | 3326 | ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); |
2914 | goto failed_mount2; | 3327 | goto failed_mount2; |
2915 | } | 3328 | } |
@@ -3130,6 +3543,10 @@ no_journal: | |||
3130 | goto failed_mount4; | 3543 | goto failed_mount4; |
3131 | } | 3544 | } |
3132 | 3545 | ||
3546 | err = ext4_register_li_request(sb, first_not_zeroed); | ||
3547 | if (err) | ||
3548 | goto failed_mount4; | ||
3549 | |||
3133 | sbi->s_kobj.kset = ext4_kset; | 3550 | sbi->s_kobj.kset = ext4_kset; |
3134 | init_completion(&sbi->s_kobj_unregister); | 3551 | init_completion(&sbi->s_kobj_unregister); |
3135 | err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL, | 3552 | err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL, |
@@ -3847,6 +4264,19 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) | |||
3847 | enable_quota = 1; | 4264 | enable_quota = 1; |
3848 | } | 4265 | } |
3849 | } | 4266 | } |
4267 | |||
4268 | /* | ||
4269 | * Reinitialize lazy itable initialization thread based on | ||
4270 | * current settings | ||
4271 | */ | ||
4272 | if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE)) | ||
4273 | ext4_unregister_li_request(sb); | ||
4274 | else { | ||
4275 | ext4_group_t first_not_zeroed; | ||
4276 | first_not_zeroed = ext4_has_uninit_itable(sb); | ||
4277 | ext4_register_li_request(sb, first_not_zeroed); | ||
4278 | } | ||
4279 | |||
3850 | ext4_setup_system_zone(sb); | 4280 | ext4_setup_system_zone(sb); |
3851 | if (sbi->s_journal == NULL) | 4281 | if (sbi->s_journal == NULL) |
3852 | ext4_commit_super(sb, 1); | 4282 | ext4_commit_super(sb, 1); |
@@ -4317,6 +4747,9 @@ static int __init init_ext4_fs(void) | |||
4317 | err = register_filesystem(&ext4_fs_type); | 4747 | err = register_filesystem(&ext4_fs_type); |
4318 | if (err) | 4748 | if (err) |
4319 | goto out; | 4749 | goto out; |
4750 | |||
4751 | ext4_li_info = NULL; | ||
4752 | mutex_init(&ext4_li_mtx); | ||
4320 | return 0; | 4753 | return 0; |
4321 | out: | 4754 | out: |
4322 | unregister_as_ext2(); | 4755 | unregister_as_ext2(); |
@@ -4336,6 +4769,7 @@ out4: | |||
4336 | 4769 | ||
4337 | static void __exit exit_ext4_fs(void) | 4770 | static void __exit exit_ext4_fs(void) |
4338 | { | 4771 | { |
4772 | ext4_destroy_lazyinit_thread(); | ||
4339 | unregister_as_ext2(); | 4773 | unregister_as_ext2(); |
4340 | unregister_as_ext3(); | 4774 | unregister_as_ext3(); |
4341 | unregister_filesystem(&ext4_fs_type); | 4775 | unregister_filesystem(&ext4_fs_type); |