aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ufs/balloc.c
diff options
context:
space:
mode:
authorEvgeniy Dushistov <dushistov@mail.ru>2006-06-25 08:47:20 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:01:01 -0400
commit6ef4d6bf86a82965896eaa1a189177239ec2bbab (patch)
tree3217c5601d8cf6701f8783ec776aa96d0dd75d4a /fs/ufs/balloc.c
parentc9a27b5dca52bbd0955e065e49e56eb313d02c34 (diff)
[PATCH] ufs: change block number on the fly
First of all some necessary notes about UFS by it self: To avoid waste of disk space the tail of file consists not from blocks (which is ordinary big enough, 16K usually), it consists from fragments(which is ordinary 2K). When file is growing its tail occupy 1 fragment, 2 fragments... At some stage decision to allocate whole block is made and all fragments are moved to one block. How this situation was handled before: ufs_prepare_write ->block_prepare_write ->ufs_getfrag_block ->... ->ufs_new_fragments: bh = sb_bread bh->b_blocknr = result + i; mark_buffer_dirty (bh); This is wrong solution, because: - it didn't take into consideration that there is another cache: "inode page cache" - because of sb_getblk uses not b_blocknr, (it uses page->index) to find certain block, this breaks sb_getblk. How this situation is handled now: we go though all "page inode cache", if there are no such page in cache we load it into cache, and change b_blocknr. Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ufs/balloc.c')
-rw-r--r--fs/ufs/balloc.c137
1 files changed, 113 insertions, 24 deletions
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index cc0c8f15d8fd..06f970d02e3d 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -39,7 +39,8 @@ static void ufs_clusteracct(struct super_block *, struct ufs_cg_private_info *,
39/* 39/*
40 * Free 'count' fragments from fragment number 'fragment' 40 * Free 'count' fragments from fragment number 'fragment'
41 */ 41 */
42void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count) { 42void ufs_free_fragments(struct inode *inode, unsigned fragment, unsigned count)
43{
43 struct super_block * sb; 44 struct super_block * sb;
44 struct ufs_sb_private_info * uspi; 45 struct ufs_sb_private_info * uspi;
45 struct ufs_super_block_first * usb1; 46 struct ufs_super_block_first * usb1;
@@ -134,7 +135,8 @@ failed:
134/* 135/*
135 * Free 'count' fragments from fragment number 'fragment' (free whole blocks) 136 * Free 'count' fragments from fragment number 'fragment' (free whole blocks)
136 */ 137 */
137void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { 138void ufs_free_blocks(struct inode *inode, unsigned fragment, unsigned count)
139{
138 struct super_block * sb; 140 struct super_block * sb;
139 struct ufs_sb_private_info * uspi; 141 struct ufs_sb_private_info * uspi;
140 struct ufs_super_block_first * usb1; 142 struct ufs_super_block_first * usb1;
@@ -222,15 +224,118 @@ failed:
222 return; 224 return;
223} 225}
224 226
227static struct page *ufs_get_locked_page(struct address_space *mapping,
228 unsigned long index)
229{
230 struct page *page;
231
232try_again:
233 page = find_lock_page(mapping, index);
234 if (!page) {
235 page = read_cache_page(mapping, index,
236 (filler_t*)mapping->a_ops->readpage,
237 NULL);
238 if (IS_ERR(page)) {
239 printk(KERN_ERR "ufs_change_blocknr: "
240 "read_cache_page error: ino %lu, index: %lu\n",
241 mapping->host->i_ino, index);
242 goto out;
243 }
244
245 lock_page(page);
246
247 if (!PageUptodate(page) || PageError(page)) {
248 unlock_page(page);
249 page_cache_release(page);
250
251 printk(KERN_ERR "ufs_change_blocknr: "
252 "can not read page: ino %lu, index: %lu\n",
253 mapping->host->i_ino, index);
254
255 page = ERR_PTR(-EIO);
256 goto out;
257 }
258 }
259
260 if (unlikely(!page->mapping || !page_has_buffers(page))) {
261 unlock_page(page);
262 page_cache_release(page);
263 goto try_again;/*we really need these buffers*/
264 }
265out:
266 return page;
267}
268
269/*
270 * Modify inode page cache in such way:
271 * have - blocks with b_blocknr equal to oldb...oldb+count-1
272 * get - blocks with b_blocknr equal to newb...newb+count-1
273 * also we suppose that oldb...oldb+count-1 blocks
274 * situated at the end of file.
275 *
276 * We can come here from ufs_writepage or ufs_prepare_write,
277 * locked_page is argument of these functions, so we already lock it.
278 */
279static void ufs_change_blocknr(struct inode *inode, unsigned int count,
280 unsigned int oldb, unsigned int newb,
281 struct page *locked_page)
282{
283 unsigned int blk_per_page = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
284 sector_t baseblk;
285 struct address_space *mapping = inode->i_mapping;
286 pgoff_t index, cur_index = locked_page->index;
287 unsigned int i, j;
288 struct page *page;
289 struct buffer_head *head, *bh;
290
291 baseblk = ((i_size_read(inode) - 1) >> inode->i_blkbits) + 1 - count;
292
293 UFSD(("ENTER, ino %lu, count %u, oldb %u, newb %u\n",
294 inode->i_ino, count, oldb, newb));
295
296 BUG_ON(!PageLocked(locked_page));
297
298 for (i = 0; i < count; i += blk_per_page) {
299 index = (baseblk+i) >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
300
301 if (likely(cur_index != index)) {
302 page = ufs_get_locked_page(mapping, index);
303 if (IS_ERR(page))
304 continue;
305 } else
306 page = locked_page;
307
308 j = i;
309 head = page_buffers(page);
310 bh = head;
311 do {
312 if (likely(bh->b_blocknr == j + oldb && j < count)) {
313 unmap_underlying_metadata(bh->b_bdev,
314 bh->b_blocknr);
315 bh->b_blocknr = newb + j++;
316 mark_buffer_dirty(bh);
317 }
318
319 bh = bh->b_this_page;
320 } while (bh != head);
321
322 set_page_dirty(page);
225 323
226unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment, 324 if (likely(cur_index != index)) {
227 unsigned goal, unsigned count, int * err ) 325 unlock_page(page);
326 page_cache_release(page);
327 }
328 }
329 UFSD(("EXIT\n"));
330}
331
332unsigned ufs_new_fragments(struct inode * inode, __fs32 * p, unsigned fragment,
333 unsigned goal, unsigned count, int * err, struct page *locked_page)
228{ 334{
229 struct super_block * sb; 335 struct super_block * sb;
230 struct ufs_sb_private_info * uspi; 336 struct ufs_sb_private_info * uspi;
231 struct ufs_super_block_first * usb1; 337 struct ufs_super_block_first * usb1;
232 struct buffer_head * bh; 338 unsigned cgno, oldcount, newcount, tmp, request, result;
233 unsigned cgno, oldcount, newcount, tmp, request, i, result;
234 339
235 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count)) 340 UFSD(("ENTER, ino %lu, fragment %u, goal %u, count %u\n", inode->i_ino, fragment, goal, count))
236 341
@@ -343,24 +448,8 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
343 } 448 }
344 result = ufs_alloc_fragments (inode, cgno, goal, request, err); 449 result = ufs_alloc_fragments (inode, cgno, goal, request, err);
345 if (result) { 450 if (result) {
346 for (i = 0; i < oldcount; i++) { 451 ufs_change_blocknr(inode, oldcount, tmp, result, locked_page);
347 bh = sb_bread(sb, tmp + i); 452
348 if(bh)
349 {
350 clear_buffer_dirty(bh);
351 bh->b_blocknr = result + i;
352 mark_buffer_dirty (bh);
353 if (IS_SYNC(inode))
354 sync_dirty_buffer(bh);
355 brelse (bh);
356 }
357 else
358 {
359 printk(KERN_ERR "ufs_new_fragments: bread fail\n");
360 unlock_super(sb);
361 return 0;
362 }
363 }
364 *p = cpu_to_fs32(sb, result); 453 *p = cpu_to_fs32(sb, result);
365 *err = 0; 454 *err = 0;
366 inode->i_blocks += count << uspi->s_nspfshift; 455 inode->i_blocks += count << uspi->s_nspfshift;