aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorEvgeniy Dushistov <dushistov@mail.ru>2007-01-29 16:19:56 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-01-30 11:26:45 -0500
commitefee2b812645d10824bf6cb247789910bcb66881 (patch)
treeb4b4c54b0064bc3b17aa778807f2f914a60848b8 /fs
parent8682164a66325cab07620082eb7f413b547f4b4a (diff)
[PATCH] ufs: reallocation fix
In blocks reallocation function sometimes does not update some of buffer_head::b_blocknr, which may and cause data damage. Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/ufs/balloc.c41
1 files changed, 26 insertions, 15 deletions
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 96ca8453bab6..638f4c585e89 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -227,14 +227,14 @@ failed:
227 * We can come here from ufs_writepage or ufs_prepare_write, 227 * We can come here from ufs_writepage or ufs_prepare_write,
228 * locked_page is argument of these functions, so we already lock it. 228 * locked_page is argument of these functions, so we already lock it.
229 */ 229 */
230static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk, 230static void ufs_change_blocknr(struct inode *inode, unsigned int beg,
231 unsigned int count, unsigned int oldb, 231 unsigned int count, unsigned int oldb,
232 unsigned int newb, struct page *locked_page) 232 unsigned int newb, struct page *locked_page)
233{ 233{
234 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); 234 const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1;
235 struct address_space *mapping = inode->i_mapping; 235 struct address_space * const mapping = inode->i_mapping;
236 pgoff_t index, cur_index; 236 pgoff_t index, cur_index;
237 unsigned int i, j; 237 unsigned end, pos, j;
238 struct page *page; 238 struct page *page;
239 struct buffer_head *head, *bh; 239 struct buffer_head *head, *bh;
240 240
@@ -246,8 +246,8 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
246 246
247 cur_index = locked_page->index; 247 cur_index = locked_page->index;
248 248
249 for (i = 0; i < count; i += blk_per_page) { 249 for (end = count + beg; beg < end; beg = (beg | mask) + 1) {
250 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 250 index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
251 251
252 if (likely(cur_index != index)) { 252 if (likely(cur_index != index)) {
253 page = ufs_get_locked_page(mapping, index); 253 page = ufs_get_locked_page(mapping, index);
@@ -256,21 +256,32 @@ static void ufs_change_blocknr(struct inode *inode, unsigned int baseblk,
256 } else 256 } else
257 page = locked_page; 257 page = locked_page;
258 258
259 j = i;
260 head = page_buffers(page); 259 head = page_buffers(page);
261 bh = head; 260 bh = head;
261 pos = beg & mask;
262 for (j = 0; j < pos; ++j)
263 bh = bh->b_this_page;
264 j = 0;
262 do { 265 do {
263 if (likely(bh->b_blocknr == j + oldb && j < count)) { 266 if (buffer_mapped(bh)) {
264 unmap_underlying_metadata(bh->b_bdev, 267 pos = bh->b_blocknr - oldb;
265 bh->b_blocknr); 268 if (pos < count) {
266 bh->b_blocknr = newb + j++; 269 UFSD(" change from %llu to %llu\n",
267 mark_buffer_dirty(bh); 270 (unsigned long long)pos + oldb,
271 (unsigned long long)pos + newb);
272 bh->b_blocknr = newb + pos;
273 unmap_underlying_metadata(bh->b_bdev,
274 bh->b_blocknr);
275 mark_buffer_dirty(bh);
276 ++j;
277 }
268 } 278 }
269 279
270 bh = bh->b_this_page; 280 bh = bh->b_this_page;
271 } while (bh != head); 281 } while (bh != head);
272 282
273 set_page_dirty(page); 283 if (j)
284 set_page_dirty(page);
274 285
275 if (likely(cur_index != index)) 286 if (likely(cur_index != index))
276 ufs_put_locked_page(page); 287 ufs_put_locked_page(page);
@@ -418,14 +429,14 @@ unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
418 } 429 }
419 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 430 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
420 if (result) { 431 if (result) {
432 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
433 locked_page != NULL);
421 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp, 434 ufs_change_blocknr(inode, fragment - oldcount, oldcount, tmp,
422 result, locked_page); 435 result, locked_page);
423 436
424 *p = cpu_to_fs32(sb, result); 437 *p = cpu_to_fs32(sb, result);
425 *err = 0; 438 *err = 0;
426 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); 439 UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count);
427 ufs_clear_frags(inode, result + oldcount, newcount - oldcount,
428 locked_page != NULL);
429 unlock_super(sb); 440 unlock_super(sb);
430 if (newcount < request) 441 if (newcount < request)
431 ufs_free_fragments (inode, result + newcount, request - newcount); 442 ufs_free_fragments (inode, result + newcount, request - newcount);