aboutsummaryrefslogtreecommitdiffstats
path: root/fs/logfs/readwrite.c
diff options
context:
space:
mode:
authorJoern Engel <joern@logfs.org>2010-04-13 11:46:37 -0400
committerJoern Engel <joern@logfs.org>2010-04-13 11:46:37 -0400
commit032d8f7268444a0f5d4ee02d9513d682d5b8edfc (patch)
tree57cd841514abb9ffe7df7d2569513663f551f960 /fs/logfs/readwrite.c
parente05c378f4973674a16d5b9636f2310cf88aca5f2 (diff)
[LogFS] Prevent memory corruption on large deletes
Removing sufficiently large files would create aliases for a large number of segments. This in turn results in a large number of journal entries and an overflow of s_je_array. Cheap fix is to add a BUG_ON, turning memory corruption into something annoying, but less dangerous. Real fix is to count the number of affected segments and prevent the problem completely. Signed-off-by: Joern Engel <joern@logfs.org>
Diffstat (limited to 'fs/logfs/readwrite.c')
-rw-r--r--fs/logfs/readwrite.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 3659c37fbd72..7e0c39c49719 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1219,6 +1219,18 @@ static void free_shadow(struct inode *inode, struct logfs_shadow *shadow)
1219 mempool_free(shadow, super->s_shadow_pool); 1219 mempool_free(shadow, super->s_shadow_pool);
1220} 1220}
1221 1221
1222static void mark_segment(struct shadow_tree *tree, u32 segno)
1223{
1224 int err;
1225
1226 if (!btree_lookup32(&tree->segment_map, segno)) {
1227 err = btree_insert32(&tree->segment_map, segno, (void *)1,
1228 GFP_NOFS);
1229 BUG_ON(err);
1230 tree->no_shadowed_segments++;
1231 }
1232}
1233
1222/** 1234/**
1223 * fill_shadow_tree - Propagate shadow tree changes due to a write 1235 * fill_shadow_tree - Propagate shadow tree changes due to a write
1224 * @inode: Inode owning the page 1236 * @inode: Inode owning the page
@@ -1266,6 +1278,8 @@ static void fill_shadow_tree(struct inode *inode, struct page *page,
1266 1278
1267 super->s_dirty_used_bytes += shadow->new_len; 1279 super->s_dirty_used_bytes += shadow->new_len;
1268 super->s_dirty_free_bytes += shadow->old_len; 1280 super->s_dirty_free_bytes += shadow->old_len;
1281 mark_segment(tree, shadow->old_ofs >> super->s_segshift);
1282 mark_segment(tree, shadow->new_ofs >> super->s_segshift);
1269 } 1283 }
1270} 1284}
1271 1285