aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ufs/balloc.c
diff options
context:
space:
mode:
authorEvgeniy Dushistov <dushistov@mail.ru>2007-03-16 17:38:08 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-16 22:25:03 -0400
commit5431bf97ce69065ed07de1ff12543d0800817b83 (patch)
treeb3c0753824a360282bb07adefd61956829d6999a /fs/ufs/balloc.c
parent2189850f42beff23af32d847bd043cd1d1811a80 (diff)
[PATCH] ufs: prepare write + change blocks on the fly
This fixes "change blocks numbers on the fly" in case when "prepare write page" is in the call chain, in this case some buffers may be not uptodate and not mapped, we should care to map them and load from disk. This patch was tested with: - ufs regressions simple tests - fsx-linux - ltp(20060306) - untar and build kernel Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/ufs/balloc.c')
-rw-r--r--fs/ufs/balloc.c81
1 files changed, 53 insertions, 28 deletions
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index bcc44084e004..b8fa34af87cc 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -244,62 +244,87 @@ failed:
244 * We can come here from ufs_writepage or ufs_prepare_write, 244 * We can come here from ufs_writepage or ufs_prepare_write,
245 * locked_page is argument of these functions, so we already lock it. 245 * locked_page is argument of these functions, so we already lock it.
246 */ 246 */
247static void ufs_change_blocknr(struct inode *inode, unsigned int beg, 247static void ufs_change_blocknr(struct inode *inode, sector_t beg,
248 unsigned int count, unsigned int oldb, 248 unsigned int count, sector_t oldb,
249 unsigned int newb, struct page *locked_page) 249 sector_t newb, struct page *locked_page)
250{ 250{
251 const unsigned mask = (1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1; 251 const unsigned blks_per_page =
252 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
253 const unsigned mask = blks_per_page - 1;
252 struct address_space * const mapping = inode->i_mapping; 254 struct address_space * const mapping = inode->i_mapping;
253 pgoff_t index, cur_index; 255 pgoff_t index, cur_index, last_index;
254 unsigned end, pos, j; 256 unsigned pos, j, lblock;
257 sector_t end, i;
255 struct page *page; 258 struct page *page;
256 struct buffer_head *head, *bh; 259 struct buffer_head *head, *bh;
257 260
258 UFSD("ENTER, ino %lu, count %u, oldb %u, newb %u\n", 261 UFSD("ENTER, ino %lu, count %u, oldb %llu, newb %llu\n",
259 inode->i_ino, count, oldb, newb); 262 inode->i_ino, count,
263 (unsigned long long)oldb, (unsigned long long)newb);
260 264
261 BUG_ON(!locked_page); 265 BUG_ON(!locked_page);
262 BUG_ON(!PageLocked(locked_page)); 266 BUG_ON(!PageLocked(locked_page));
263 267
264 cur_index = locked_page->index; 268 cur_index = locked_page->index;
265 269 end = count + beg;
266 for (end = count + beg; beg < end; beg = (beg | mask) + 1) { 270 last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
267 index = beg >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 271 for (i = beg; i < end; i = (i | mask) + 1) {
272 index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
268 273
269 if (likely(cur_index != index)) { 274 if (likely(cur_index != index)) {
270 page = ufs_get_locked_page(mapping, index); 275 page = ufs_get_locked_page(mapping, index);
271 if (!page || IS_ERR(page)) /* it was truncated or EIO */ 276 if (!page)/* it was truncated */
277 continue;
278 if (IS_ERR(page)) {/* or EIO */
279 ufs_error(inode->i_sb, __FUNCTION__,
280 "read of page %llu failed\n",
281 (unsigned long long)index);
272 continue; 282 continue;
283 }
273 } else 284 } else
274 page = locked_page; 285 page = locked_page;
275 286
276 head = page_buffers(page); 287 head = page_buffers(page);
277 bh = head; 288 bh = head;
278 pos = beg & mask; 289 pos = i & mask;
279 for (j = 0; j < pos; ++j) 290 for (j = 0; j < pos; ++j)
280 bh = bh->b_this_page; 291 bh = bh->b_this_page;
281 j = 0; 292
293
294 if (unlikely(index == last_index))
295 lblock = end & mask;
296 else
297 lblock = blks_per_page;
298
282 do { 299 do {
283 if (buffer_mapped(bh)) { 300 if (j >= lblock)
284 pos = bh->b_blocknr - oldb; 301 break;
285 if (pos < count) { 302 pos = (i - beg) + j;
286 UFSD(" change from %llu to %llu\n", 303
287 (unsigned long long)pos + oldb, 304 if (!buffer_mapped(bh))
288 (unsigned long long)pos + newb); 305 map_bh(bh, inode->i_sb, oldb + pos);
289 bh->b_blocknr = newb + pos; 306 if (!buffer_uptodate(bh)) {
290 unmap_underlying_metadata(bh->b_bdev, 307 ll_rw_block(READ, 1, &bh);
291 bh->b_blocknr); 308 wait_on_buffer(bh);
292 mark_buffer_dirty(bh); 309 if (!buffer_uptodate(bh)) {
293 ++j; 310 ufs_error(inode->i_sb, __FUNCTION__,
311 "read of block failed\n");
312 break;
294 } 313 }
295 } 314 }
296 315
316 UFSD(" change from %llu to %llu, pos %u\n",
317 (unsigned long long)pos + oldb,
318 (unsigned long long)pos + newb, pos);
319
320 bh->b_blocknr = newb + pos;
321 unmap_underlying_metadata(bh->b_bdev,
322 bh->b_blocknr);
323 mark_buffer_dirty(bh);
324 ++j;
297 bh = bh->b_this_page; 325 bh = bh->b_this_page;
298 } while (bh != head); 326 } while (bh != head);
299 327
300 if (j)
301 set_page_dirty(page);
302
303 if (likely(cur_index != index)) 328 if (likely(cur_index != index))
304 ufs_put_locked_page(page); 329 ufs_put_locked_page(page);
305 } 330 }