aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/mballoc.c
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2012-02-20 17:52:46 -0500
committerTheodore Ts'o <tytso@mit.edu>2012-02-20 17:52:46 -0500
commit813e57276fd909f7d5a816ef7ca706fca491ee61 (patch)
treecc2d262c0f6524f1a18d2a168d03e2ae3eb624ab /fs/ext4/mballoc.c
parent119c0d4460b001e44b41dcf73dc6ee794b98bd31 (diff)
ext4: fix race when setting bitmap_uptodate flag
In ext4_read_{inode,block}_bitmap() we were setting bitmap_uptodate() before submitting the buffer for read. The is bad, since we check bitmap_uptodate() without locking the buffer, and so if another process is racing with us, it's possible that they will think the bitmap is uptodate even though the read has not completed yet, resulting in inodes and blocks potentially getting allocated more than once if we get really unlucky. Addresses-Google-Bug: 2828254 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/mballoc.c')
-rw-r--r--fs/ext4/mballoc.c79
1 files changed, 15 insertions, 64 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index cb990b21c698..545fa0256606 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -782,7 +782,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
782 int groups_per_page; 782 int groups_per_page;
783 int err = 0; 783 int err = 0;
784 int i; 784 int i;
785 ext4_group_t first_group; 785 ext4_group_t first_group, group;
786 int first_block; 786 int first_block;
787 struct super_block *sb; 787 struct super_block *sb;
788 struct buffer_head *bhs; 788 struct buffer_head *bhs;
@@ -806,24 +806,23 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
806 806
807 /* allocate buffer_heads to read bitmaps */ 807 /* allocate buffer_heads to read bitmaps */
808 if (groups_per_page > 1) { 808 if (groups_per_page > 1) {
809 err = -ENOMEM;
810 i = sizeof(struct buffer_head *) * groups_per_page; 809 i = sizeof(struct buffer_head *) * groups_per_page;
811 bh = kzalloc(i, GFP_NOFS); 810 bh = kzalloc(i, GFP_NOFS);
812 if (bh == NULL) 811 if (bh == NULL) {
812 err = -ENOMEM;
813 goto out; 813 goto out;
814 }
814 } else 815 } else
815 bh = &bhs; 816 bh = &bhs;
816 817
817 first_group = page->index * blocks_per_page / 2; 818 first_group = page->index * blocks_per_page / 2;
818 819
819 /* read all groups the page covers into the cache */ 820 /* read all groups the page covers into the cache */
820 for (i = 0; i < groups_per_page; i++) { 821 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
821 struct ext4_group_desc *desc; 822 if (group >= ngroups)
822
823 if (first_group + i >= ngroups)
824 break; 823 break;
825 824
826 grinfo = ext4_get_group_info(sb, first_group + i); 825 grinfo = ext4_get_group_info(sb, group);
827 /* 826 /*
828 * If page is uptodate then we came here after online resize 827 * If page is uptodate then we came here after online resize
829 * which added some new uninitialized group info structs, so 828 * which added some new uninitialized group info structs, so
@@ -834,69 +833,21 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
834 bh[i] = NULL; 833 bh[i] = NULL;
835 continue; 834 continue;
836 } 835 }
837 836 if (!(bh[i] = ext4_read_block_bitmap_nowait(sb, group))) {
838 err = -EIO; 837 err = -ENOMEM;
839 desc = ext4_get_group_desc(sb, first_group + i, NULL);
840 if (desc == NULL)
841 goto out;
842
843 err = -ENOMEM;
844 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
845 if (bh[i] == NULL)
846 goto out; 838 goto out;
847
848 if (bitmap_uptodate(bh[i]))
849 continue;
850
851 lock_buffer(bh[i]);
852 if (bitmap_uptodate(bh[i])) {
853 unlock_buffer(bh[i]);
854 continue;
855 } 839 }
856 ext4_lock_group(sb, first_group + i); 840 mb_debug(1, "read bitmap for group %u\n", group);
857 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
858 ext4_init_block_bitmap(sb, bh[i],
859 first_group + i, desc);
860 set_bitmap_uptodate(bh[i]);
861 set_buffer_uptodate(bh[i]);
862 ext4_unlock_group(sb, first_group + i);
863 unlock_buffer(bh[i]);
864 continue;
865 }
866 ext4_unlock_group(sb, first_group + i);
867 if (buffer_uptodate(bh[i])) {
868 /*
869 * if not uninit if bh is uptodate,
870 * bitmap is also uptodate
871 */
872 set_bitmap_uptodate(bh[i]);
873 unlock_buffer(bh[i]);
874 continue;
875 }
876 get_bh(bh[i]);
877 /*
878 * submit the buffer_head for read. We can
879 * safely mark the bitmap as uptodate now.
880 * We do it here so the bitmap uptodate bit
881 * get set with buffer lock held.
882 */
883 set_bitmap_uptodate(bh[i]);
884 bh[i]->b_end_io = end_buffer_read_sync;
885 submit_bh(READ, bh[i]);
886 mb_debug(1, "read bitmap for group %u\n", first_group + i);
887 } 841 }
888 842
889 /* wait for I/O completion */ 843 /* wait for I/O completion */
890 for (i = 0; i < groups_per_page; i++) 844 for (i = 0, group = first_group; i < groups_per_page; i++, group++) {
891 if (bh[i]) 845 if (bh[i] && ext4_wait_block_bitmap(sb, group, bh[i])) {
892 wait_on_buffer(bh[i]); 846 err = -EIO;
893
894 err = -EIO;
895 for (i = 0; i < groups_per_page; i++)
896 if (bh[i] && !buffer_uptodate(bh[i]))
897 goto out; 847 goto out;
848 }
849 }
898 850
899 err = 0;
900 first_block = page->index * blocks_per_page; 851 first_block = page->index * blocks_per_page;
901 for (i = 0; i < blocks_per_page; i++) { 852 for (i = 0; i < blocks_per_page; i++) {
902 int group; 853 int group;