aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Sandeen <sandeen@redhat.com>2010-10-27 21:30:15 -0400
committerTheodore Ts'o <tytso@mit.edu>2010-10-27 21:30:15 -0400
commiteee4adc709afe40d8c02fa154c63dbeb55d911e3 (patch)
treee65d8235eb7cf745a1d9dc2ac9ab1c62bdd45758
parent61d08673de1fe68bfba86203258377bf39f234b6 (diff)
ext4: move ext4_mb_{get,put}_buddy_cache_lock and make them static
These functions are only used within fs/ext4/mballoc.c, so move them so they are used after they are defined, and then make them be static. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
-rw-r--r--fs/ext4/ext4.h3
-rw-r--r--fs/ext4/mballoc.c157
2 files changed, 79 insertions, 81 deletions
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 202668c5607d..8b5dd6369f82 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1684,9 +1684,6 @@ extern void ext4_free_blocks(handle_t *handle, struct inode *inode,
1684 unsigned long count, int flags); 1684 unsigned long count, int flags);
1685extern int ext4_mb_add_groupinfo(struct super_block *sb, 1685extern int ext4_mb_add_groupinfo(struct super_block *sb,
1686 ext4_group_t i, struct ext4_group_desc *desc); 1686 ext4_group_t i, struct ext4_group_desc *desc);
1687extern int ext4_mb_get_buddy_cache_lock(struct super_block *, ext4_group_t);
1688extern void ext4_mb_put_buddy_cache_lock(struct super_block *,
1689 ext4_group_t, int);
1690extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); 1687extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
1691 1688
1692/* inode.c */ 1689/* inode.c */
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 381ac565786a..328ea9cec57b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -947,6 +947,85 @@ out:
947} 947}
948 948
949/* 949/*
950 * lock the group_info alloc_sem of all the groups
951 * belonging to the same buddy cache page. This
952 * make sure other parallel operation on the buddy
953 * cache doesn't happen whild holding the buddy cache
954 * lock
955 */
956static int ext4_mb_get_buddy_cache_lock(struct super_block *sb,
957 ext4_group_t group)
958{
959 int i;
960 int block, pnum;
961 int blocks_per_page;
962 int groups_per_page;
963 ext4_group_t ngroups = ext4_get_groups_count(sb);
964 ext4_group_t first_group;
965 struct ext4_group_info *grp;
966
967 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
968 /*
969 * the buddy cache inode stores the block bitmap
970 * and buddy information in consecutive blocks.
971 * So for each group we need two blocks.
972 */
973 block = group * 2;
974 pnum = block / blocks_per_page;
975 first_group = pnum * blocks_per_page / 2;
976
977 groups_per_page = blocks_per_page >> 1;
978 if (groups_per_page == 0)
979 groups_per_page = 1;
980 /* read all groups the page covers into the cache */
981 for (i = 0; i < groups_per_page; i++) {
982
983 if ((first_group + i) >= ngroups)
984 break;
985 grp = ext4_get_group_info(sb, first_group + i);
986 /* take all groups write allocation
987 * semaphore. This make sure there is
988 * no block allocation going on in any
989 * of that groups
990 */
991 down_write_nested(&grp->alloc_sem, i);
992 }
993 return i;
994}
995
996static void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
997 ext4_group_t group, int locked_group)
998{
999 int i;
1000 int block, pnum;
1001 int blocks_per_page;
1002 ext4_group_t first_group;
1003 struct ext4_group_info *grp;
1004
1005 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1006 /*
1007 * the buddy cache inode stores the block bitmap
1008 * and buddy information in consecutive blocks.
1009 * So for each group we need two blocks.
1010 */
1011 block = group * 2;
1012 pnum = block / blocks_per_page;
1013 first_group = pnum * blocks_per_page / 2;
1014 /* release locks on all the groups */
1015 for (i = 0; i < locked_group; i++) {
1016
1017 grp = ext4_get_group_info(sb, first_group + i);
1018 /* take all groups write allocation
1019 * semaphore. This make sure there is
1020 * no block allocation going on in any
1021 * of that groups
1022 */
1023 up_write(&grp->alloc_sem);
1024 }
1025
1026}
1027
1028/*
950 * Locking note: This routine calls ext4_mb_init_cache(), which takes the 1029 * Locking note: This routine calls ext4_mb_init_cache(), which takes the
951 * block group lock of all groups for this page; do not hold the BG lock when 1030 * block group lock of all groups for this page; do not hold the BG lock when
952 * calling this routine! 1031 * calling this routine!
@@ -1923,84 +2002,6 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1923 return 0; 2002 return 0;
1924} 2003}
1925 2004
1926/*
1927 * lock the group_info alloc_sem of all the groups
1928 * belonging to the same buddy cache page. This
1929 * make sure other parallel operation on the buddy
1930 * cache doesn't happen whild holding the buddy cache
1931 * lock
1932 */
1933int ext4_mb_get_buddy_cache_lock(struct super_block *sb, ext4_group_t group)
1934{
1935 int i;
1936 int block, pnum;
1937 int blocks_per_page;
1938 int groups_per_page;
1939 ext4_group_t ngroups = ext4_get_groups_count(sb);
1940 ext4_group_t first_group;
1941 struct ext4_group_info *grp;
1942
1943 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1944 /*
1945 * the buddy cache inode stores the block bitmap
1946 * and buddy information in consecutive blocks.
1947 * So for each group we need two blocks.
1948 */
1949 block = group * 2;
1950 pnum = block / blocks_per_page;
1951 first_group = pnum * blocks_per_page / 2;
1952
1953 groups_per_page = blocks_per_page >> 1;
1954 if (groups_per_page == 0)
1955 groups_per_page = 1;
1956 /* read all groups the page covers into the cache */
1957 for (i = 0; i < groups_per_page; i++) {
1958
1959 if ((first_group + i) >= ngroups)
1960 break;
1961 grp = ext4_get_group_info(sb, first_group + i);
1962 /* take all groups write allocation
1963 * semaphore. This make sure there is
1964 * no block allocation going on in any
1965 * of that groups
1966 */
1967 down_write_nested(&grp->alloc_sem, i);
1968 }
1969 return i;
1970}
1971
1972void ext4_mb_put_buddy_cache_lock(struct super_block *sb,
1973 ext4_group_t group, int locked_group)
1974{
1975 int i;
1976 int block, pnum;
1977 int blocks_per_page;
1978 ext4_group_t first_group;
1979 struct ext4_group_info *grp;
1980
1981 blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
1982 /*
1983 * the buddy cache inode stores the block bitmap
1984 * and buddy information in consecutive blocks.
1985 * So for each group we need two blocks.
1986 */
1987 block = group * 2;
1988 pnum = block / blocks_per_page;
1989 first_group = pnum * blocks_per_page / 2;
1990 /* release locks on all the groups */
1991 for (i = 0; i < locked_group; i++) {
1992
1993 grp = ext4_get_group_info(sb, first_group + i);
1994 /* take all groups write allocation
1995 * semaphore. This make sure there is
1996 * no block allocation going on in any
1997 * of that groups
1998 */
1999 up_write(&grp->alloc_sem);
2000 }
2001
2002}
2003
2004static noinline_for_stack int 2005static noinline_for_stack int
2005ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2006ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
2006{ 2007{