aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorEvgeniy Dushistov <dushistov@mail.ru>2007-01-05 19:37:04 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:29 -0500
commitd63b70902befe189ba2672925f28ec3f4db41352 (patch)
tree727b00bf66ebb3d1de20d630f310429fa5063325 /fs
parent7ba3485947ee7bc89a17f86250fe9b692a615dff (diff)
[PATCH] fix garbage instead of zeroes in UFS
Looks like this is the problem, which point Al Viro some time ago: ufs's get_block callback allocates 16k of disk at a time, and links that entire 16k into the file's metadata. But because get_block is called for only a single buffer_head (a 2k buffer_head in this case?) we are only able to tell the VFS that this 2k is buffer_new(). So when ufs_getfrag_block() is later called to map some more data in the file, and when that data resides within the remaining 14k of this fragment, ufs_getfrag_block() will incorrectly return a !buffer_new() buffer_head. I don't see _right_ way to do nullification of whole block, if use inode page cache, some pages may be outside of inode limits (inode size), and will be lost; if use blockdev page cache it is possible to zero real data, if later inode page cache will be used. The simpliest way, as can I see usage of block device page cache, but not only mark dirty, but also sync it during "nullification". I use my simple tests collection, which I used for check that create,open,write,read,close works on ufs, and I see that this patch makes ufs code 18% slower then before. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/ufs/balloc.c25
-rw-r--r--fs/ufs/inode.c41
2 files changed, 30 insertions, 36 deletions
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index b82381475779..2e0021e8f366 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -275,6 +275,25 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
275 UFSD("EXIT\n"); 275 UFSD("EXIT\n");
276} 276}
277 277
278static void ufs_clear_frags(struct inode *inode, sector_t beg, unsigned int n,
279 int sync)
280{
281 struct buffer_head *bh;
282 sector_t end = beg + n;
283
284 for (; beg < end; ++beg) {
285 bh = sb_getblk(inode->i_sb, beg);
286 lock_buffer(bh);
287 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
288 set_buffer_uptodate(bh);
289 mark_buffer_dirty(bh);
290 unlock_buffer(bh);
291 if (IS_SYNC(inode) || sync)
292 sync_dirty_buffer(bh);
293 brelse(bh);
294 }
295}
296
278unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment, 297unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
279 unsigned goal, unsigned count, int * err, struct page *locked_page) 298 unsigned goal, unsigned count, int * err, struct page *locked_page)
280{ 299{
@@ -350,6 +369,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
350 *p = cpu_to_fs32(sb, result); 369 *p = cpu_to_fs32(sb, result);
351 *err = 0; 370 *err = 0;
352 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 371 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
372 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
373 locked_page != NULL);
353 } 374 }
354 unlock_super(sb); 375 unlock_super(sb);
355 UFSD("EXIT, result %u\n", result); 376 UFSD("EXIT, result %u\n", result);
@@ -363,6 +384,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
363 if (result) { 384 if (result) {
364 *err = 0; 385 *err = 0;
365 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 386 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
387 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
388 locked_page != NULL);
366 unlock_super(sb); 389 unlock_super(sb);
367 UFSD("EXIT, result %u\n", result); 390 UFSD("EXIT, result %u\n", result);
368 return result; 391 return result;
@@ -398,6 +421,8 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
398 *p = cpu_to_fs32(sb, result); 421 *p = cpu_to_fs32(sb, result);
399 *err = 0; 422 *err = 0;
400 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 423 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
424 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
425 locked_page != NULL);
401 unlock_super(sb); 426 unlock_super(sb);
402 if (newcount < request) 427 if (newcount < request)
403 ufs_free_fragments (inode, result + newcount, request - newcount); 428 ufs_free_fragments (inode, result + newcount, request - newcount);
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index ee1eaa6f4ec2..2fbab0aab688 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -156,36 +156,6 @@ out:
156 return ret; 156 return ret;
157} 157}
158 158
159static void ufs_clear_frag(struct inode *inode, struct buffer_head *bh)
160{
161 lock_buffer(bh);
162 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
163 set_buffer_uptodate(bh);
164 mark_buffer_dirty(bh);
165 unlock_buffer(bh);
166 if (IS_SYNC(inode))
167 sync_dirty_buffer(bh);
168}
169
170static struct buffer_head *
171ufs_clear_frags(struct inode *inode, sector_t beg,
172 unsigned int n, sector_t want)
173{
174 struct buffer_head *res = NULL, *bh;
175 sector_t end = beg + n;
176
177 for (; beg < end; ++beg) {
178 bh = sb_getblk(inode->i_sb, beg);
179 ufs_clear_frag(inode, bh);
180 if (want != beg)
181 brelse(bh);
182 else
183 res = bh;
184 }
185 BUG_ON(!res);
186 return res;
187}
188
189/** 159/**
190 * ufs_inode_getfrag() - allocate new fragment(s) 160 * ufs_inode_getfrag() - allocate new fragment(s)
191 * @inode - pointer to inode 161 * @inode - pointer to inode
@@ -302,7 +272,7 @@ repeat:
302 } 272 }
303 273
304 if (!phys) { 274 if (!phys) {
305 result = ufs_clear_frags(inode, tmp, required, tmp + blockoff); 275 result = sb_getblk(sb, tmp + blockoff);
306 } else { 276 } else {
307 *phys = tmp + blockoff; 277 *phys = tmp + blockoff;
308 result = NULL; 278 result = NULL;
@@ -403,8 +373,7 @@ repeat:
403 373
404 374
405 if (!phys) { 375 if (!phys) {
406 result = ufs_clear_frags(inode, tmp, uspi->s_fpb, 376 result = sb_getblk(sb, tmp + blockoff);
407 tmp + blockoff);
408 } else { 377 } else {
409 *phys = tmp + blockoff; 378 *phys = tmp + blockoff;
410 *new = 1; 379 *new = 1;
@@ -471,13 +440,13 @@ int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head
471#define GET_INODE_DATABLOCK(x) \ 440#define GET_INODE_DATABLOCK(x) \
472 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new, bh_result->b_page) 441 ufs_inode_getfrag(inode, x, fragment, 1, &err, &phys, &new, bh_result->b_page)
473#define GET_INODE_PTR(x) \ 442#define GET_INODE_PTR(x) \
474 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, bh_result->b_page) 443 ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, NULL, NULL, NULL)
475#define GET_INDIRECT_DATABLOCK(x) \ 444#define GET_INDIRECT_DATABLOCK(x) \
476 ufs_inode_getblock(inode, bh, x, fragment, \ 445 ufs_inode_getblock(inode, bh, x, fragment, \
477 &err, &phys, &new, bh_result->b_page); 446 &err, &phys, &new, bh_result->b_page)
478#define GET_INDIRECT_PTR(x) \ 447#define GET_INDIRECT_PTR(x) \
479 ufs_inode_getblock(inode, bh, x, fragment, \ 448 ufs_inode_getblock(inode, bh, x, fragment, \
480 &err, NULL, NULL, bh_result->b_page); 449 &err, NULL, NULL, NULL)
481 450
482 if (ptr < UFS_NDIR_FRAGMENT) { 451 if (ptr < UFS_NDIR_FRAGMENT) {
483 bh = GET_INODE_DATABLOCK(ptr); 452 bh = GET_INODE_DATABLOCK(ptr);