aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4/mballoc.h
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2009-01-05 22:19:52 -0500
committerTheodore Ts'o <tytso@mit.edu>2009-01-05 22:19:52 -0500
commit5d1b1b3f492f8696ea18950a454a141381b0f926 (patch)
treee6277cd3e01c074403b9da7390de1daa6b9f248f /fs/ext4/mballoc.h
parentb7be019e80da4db96d283734d55366014509911c (diff)
ext4: fix BUG when calling ext4_error with locked block group
The mballoc code likes to call ext4_error while it is holding locked block groups. This can causes a scheduling in atomic context BUG. We can't just unlock the block group and relock it after/if ext4_error returns since that might result in race conditions in the case where the filesystem is set to continue after finding errors. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4/mballoc.h')
-rw-r--r--fs/ext4/mballoc.h47
1 files changed, 0 insertions, 47 deletions
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 997f78fff129..95d4c7f29a8a 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -118,27 +118,6 @@ struct ext4_free_data {
118 tid_t t_tid; 118 tid_t t_tid;
119}; 119};
120 120
121struct ext4_group_info {
122 unsigned long bb_state;
123 struct rb_root bb_free_root;
124 unsigned short bb_first_free;
125 unsigned short bb_free;
126 unsigned short bb_fragments;
127 struct list_head bb_prealloc_list;
128#ifdef DOUBLE_CHECK
129 void *bb_bitmap;
130#endif
131 struct rw_semaphore alloc_sem;
132 unsigned short bb_counters[];
133};
134
135#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
136#define EXT4_GROUP_INFO_LOCKED_BIT 1
137
138#define EXT4_MB_GRP_NEED_INIT(grp) \
139 (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
140
141
142struct ext4_prealloc_space { 121struct ext4_prealloc_space {
143 struct list_head pa_inode_list; 122 struct list_head pa_inode_list;
144 struct list_head pa_group_list; 123 struct list_head pa_group_list;
@@ -264,32 +243,6 @@ static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
264#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) 243#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
265 244
266struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t); 245struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
267
268
269static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
270{
271 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
272
273 bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
274}
275
276static inline void ext4_unlock_group(struct super_block *sb,
277 ext4_group_t group)
278{
279 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
280
281 bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
282}
283
284static inline int ext4_is_group_locked(struct super_block *sb,
285 ext4_group_t group)
286{
287 struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
288
289 return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
290 &(grinfo->bb_state));
291}
292
293static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, 246static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
294 struct ext4_free_extent *fex) 247 struct ext4_free_extent *fex)
295{ 248{