aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2009-09-09 23:47:46 -0400
committerTheodore Ts'o <tytso@mit.edu>2009-09-09 23:47:46 -0400
commitb6a758ec3af3ec236dbfdcf6a06b84ac8f94957e (patch)
tree188505222c7fd0d92c18435aaf96e07c9f0ebdb6
parent91ac6f43317c0bf99969665f98016548011dfa38 (diff)
ext4: move ext4_mb_init_group() function earlier in the mballoc.c
This moves the function around so that it can be called from ext4_mb_load_buddy(). Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-rw-r--r--fs/ext4/mballoc.c182
1 files changed, 91 insertions, 91 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 5ef6daf0cdc6..fed5ac699141 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -910,6 +910,97 @@ out:
910 return err; 910 return err;
911} 911}
912 912
913static noinline_for_stack
914int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
915{
916
917 int ret = 0;
918 void *bitmap;
919 int blocks_per_page;
920 int block, pnum, poff;
921 int num_grp_locked = 0;
922 struct ext4_group_info *this_grp;
923 struct ext4_sb_info *sbi = EXT4_SB(sb);
924 struct inode *inode = sbi->s_buddy_cache;
925 struct page *page = NULL, *bitmap_page = NULL;
926
927 mb_debug(1, "init group %u\n", group);
928 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
929 this_grp = ext4_get_group_info(sb, group);
930 /*
931 * This ensures we don't add group
932 * to this buddy cache via resize
933 */
934 num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group);
935 if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
936 /*
937 * somebody initialized the group
938 * return without doing anything
939 */
940 ret = 0;
941 goto err;
942 }
943 /*
944 * the buddy cache inode stores the block bitmap
945 * and buddy information in consecutive blocks.
946 * So for each group we need two blocks.
947 */
948 block = group * 2;
949 pnum = block / blocks_per_page;
950 poff = block % blocks_per_page;
951 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
952 if (page) {
953 BUG_ON(page->mapping != inode->i_mapping);
954 ret = ext4_mb_init_cache(page, NULL);
955 if (ret) {
956 unlock_page(page);
957 goto err;
958 }
959 unlock_page(page);
960 }
961 if (page == NULL || !PageUptodate(page)) {
962 ret = -EIO;
963 goto err;
964 }
965 mark_page_accessed(page);
966 bitmap_page = page;
967 bitmap = page_address(page) + (poff * sb->s_blocksize);
968
969 /* init buddy cache */
970 block++;
971 pnum = block / blocks_per_page;
972 poff = block % blocks_per_page;
973 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
974 if (page == bitmap_page) {
975 /*
976 * If both the bitmap and buddy are in
977 * the same page we don't need to force
978 * init the buddy
979 */
980 unlock_page(page);
981 } else if (page) {
982 BUG_ON(page->mapping != inode->i_mapping);
983 ret = ext4_mb_init_cache(page, bitmap);
984 if (ret) {
985 unlock_page(page);
986 goto err;
987 }
988 unlock_page(page);
989 }
990 if (page == NULL || !PageUptodate(page)) {
991 ret = -EIO;
992 goto err;
993 }
994 mark_page_accessed(page);
995err:
996 ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
997 if (bitmap_page)
998 page_cache_release(bitmap_page);
999 if (page)
1000 page_cache_release(page);
1001 return ret;
1002}
1003
913static noinline_for_stack int 1004static noinline_for_stack int
914ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1005ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
915 struct ext4_buddy *e4b) 1006 struct ext4_buddy *e4b)
@@ -1839,97 +1930,6 @@ void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
1839 1930
1840} 1931}
1841 1932
1842static noinline_for_stack
1843int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
1844{
1845
1846 int ret;
1847 void *bitmap;
1848 int blocks_per_page;
1849 int block, pnum, poff;
1850 int num_grp_locked = 0;
1851 struct ext4_group_info *this_grp;
1852 struct ext4_sb_info *sbi = EXT4_SB(sb);
1853 struct inode *inode = sbi->s_buddy_cache;
1854 struct page *page = NULL, *bitmap_page = NULL;
1855
1856 mb_debug(1, "init group %u\n", group);
1857 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1858 this_grp = ext4_get_group_info(sb, group);
1859 /*
1860 * This ensures we don't add group
1861 * to this buddy cache via resize
1862 */
1863 num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group);
1864 if (!EXT4_MB_GRP_NEED_INIT(this_grp)) {
1865 /*
1866 * somebody initialized the group
1867 * return without doing anything
1868 */
1869 ret = 0;
1870 goto err;
1871 }
1872 /*
1873 * the buddy cache inode stores the block bitmap
1874 * and buddy information in consecutive blocks.
1875 * So for each group we need two blocks.
1876 */
1877 block = group * 2;
1878 pnum = block / blocks_per_page;
1879 poff = block % blocks_per_page;
1880 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1881 if (page) {
1882 BUG_ON(page->mapping != inode->i_mapping);
1883 ret = ext4_mb_init_cache(page, NULL);
1884 if (ret) {
1885 unlock_page(page);
1886 goto err;
1887 }
1888 unlock_page(page);
1889 }
1890 if (page == NULL || !PageUptodate(page)) {
1891 ret = -EIO;
1892 goto err;
1893 }
1894 mark_page_accessed(page);
1895 bitmap_page = page;
1896 bitmap = page_address(page) + (poff * sb->s_blocksize);
1897
1898 /* init buddy cache */
1899 block++;
1900 pnum = block / blocks_per_page;
1901 poff = block % blocks_per_page;
1902 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
1903 if (page == bitmap_page) {
1904 /*
1905 * If both the bitmap and buddy are in
1906 * the same page we don't need to force
1907 * init the buddy
1908 */
1909 unlock_page(page);
1910 } else if (page) {
1911 BUG_ON(page->mapping != inode->i_mapping);
1912 ret = ext4_mb_init_cache(page, bitmap);
1913 if (ret) {
1914 unlock_page(page);
1915 goto err;
1916 }
1917 unlock_page(page);
1918 }
1919 if (page == NULL || !PageUptodate(page)) {
1920 ret = -EIO;
1921 goto err;
1922 }
1923 mark_page_accessed(page);
1924err:
1925 ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked);
1926 if (bitmap_page)
1927 page_cache_release(bitmap_page);
1928 if (page)
1929 page_cache_release(page);
1930 return ret;
1931}
1932
1933static noinline_for_stack int 1933static noinline_for_stack int
1934ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 1934ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1935{ 1935{