diff options
author | Evgeniy Dushistov <dushistov@mail.ru> | 2006-06-25 08:47:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-25 13:01:01 -0400 |
commit | 6ef4d6bf86a82965896eaa1a189177239ec2bbab (patch) | |
tree | 3217c5601d8cf6701f8783ec776aa96d0dd75d4a /fs/ufs/inode.c | |
parent | c9a27b5dca52bbd0955e065e49e56eb313d02c34 (diff) |
[PATCH] ufs: change block number on the fly
First of all some necessary notes about UFS by it self: To avoid waste of disk
space the tail of file consists not from blocks (which is ordinary big enough,
16K usually), it consists from fragments(which is ordinary 2K). When file is
growing its tail occupy 1 fragment, 2 fragments... At some stage decision to
allocate whole block is made and all fragments are moved to one block.
How this situation was handled before:
ufs_prepare_write
->block_prepare_write
->ufs_getfrag_block
->...
->ufs_new_fragments:
bh = sb_bread
bh->b_blocknr = result + i;
mark_buffer_dirty (bh);
This is wrong solution, because:
- it didn't take into consideration that there is another cache: "inode page
cache"
- because of sb_getblk uses not b_blocknr, (it uses page->index) to find
certain block, this breaks sb_getblk.
How this situation is handled now: we go though all "page inode cache", if
there are no such page in cache we load it into cache, and change b_blocknr.
Signed-off-by: Evgeniy Dushistov <dushistov@mail.ru>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs/ufs/inode.c')
-rw-r--r-- | fs/ufs/inode.c | 44 |
1 files changed, 25 insertions, 19 deletions
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 2b2366360e5a..ea2267316a72 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -172,9 +172,10 @@ static void ufs_clear_block(struct inode *inode, struct buffer_head *bh) | |||
172 | sync_dirty_buffer(bh); | 172 | sync_dirty_buffer(bh); |
173 | } | 173 | } |
174 | 174 | ||
175 | static struct buffer_head * ufs_inode_getfrag (struct inode *inode, | 175 | static struct buffer_head *ufs_inode_getfrag(struct inode *inode, |
176 | unsigned int fragment, unsigned int new_fragment, | 176 | unsigned int fragment, unsigned int new_fragment, |
177 | unsigned int required, int *err, int metadata, long *phys, int *new) | 177 | unsigned int required, int *err, int metadata, |
178 | long *phys, int *new, struct page *locked_page) | ||
178 | { | 179 | { |
179 | struct ufs_inode_info *ufsi = UFS_I(inode); | 180 | struct ufs_inode_info *ufsi = UFS_I(inode); |
180 | struct super_block * sb; | 181 | struct super_block * sb; |
@@ -232,7 +233,8 @@ repeat: | |||
232 | if (lastblockoff) { | 233 | if (lastblockoff) { |
233 | p2 = ufsi->i_u1.i_data + lastblock; | 234 | p2 = ufsi->i_u1.i_data + lastblock; |
234 | tmp = ufs_new_fragments (inode, p2, lastfrag, | 235 | tmp = ufs_new_fragments (inode, p2, lastfrag, |
235 | fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, err); | 236 | fs32_to_cpu(sb, *p2), uspi->s_fpb - lastblockoff, |
237 | err, locked_page); | ||
236 | if (!tmp) { | 238 | if (!tmp) { |
237 | if (lastfrag != ufsi->i_lastfrag) | 239 | if (lastfrag != ufsi->i_lastfrag) |
238 | goto repeat; | 240 | goto repeat; |
@@ -244,14 +246,16 @@ repeat: | |||
244 | } | 246 | } |
245 | goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; | 247 | goal = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock]) + uspi->s_fpb; |
246 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, | 248 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, |
247 | goal, required + blockoff, err); | 249 | goal, required + blockoff, |
250 | err, locked_page); | ||
248 | } | 251 | } |
249 | /* | 252 | /* |
250 | * We will extend last allocated block | 253 | * We will extend last allocated block |
251 | */ | 254 | */ |
252 | else if (lastblock == block) { | 255 | else if (lastblock == block) { |
253 | tmp = ufs_new_fragments (inode, p, fragment - (blockoff - lastblockoff), | 256 | tmp = ufs_new_fragments(inode, p, fragment - (blockoff - lastblockoff), |
254 | fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), err); | 257 | fs32_to_cpu(sb, *p), required + (blockoff - lastblockoff), |
258 | err, locked_page); | ||
255 | } | 259 | } |
256 | /* | 260 | /* |
257 | * We will allocate new block before last allocated block | 261 | * We will allocate new block before last allocated block |
@@ -259,8 +263,8 @@ repeat: | |||
259 | else /* (lastblock > block) */ { | 263 | else /* (lastblock > block) */ { |
260 | if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) | 264 | if (lastblock && (tmp = fs32_to_cpu(sb, ufsi->i_u1.i_data[lastblock-1]))) |
261 | goal = tmp + uspi->s_fpb; | 265 | goal = tmp + uspi->s_fpb; |
262 | tmp = ufs_new_fragments (inode, p, fragment - blockoff, | 266 | tmp = ufs_new_fragments(inode, p, fragment - blockoff, |
263 | goal, uspi->s_fpb, err); | 267 | goal, uspi->s_fpb, err, locked_page); |
264 | } | 268 | } |
265 | if (!tmp) { | 269 | if (!tmp) { |
266 | if ((!blockoff && *p) || | 270 | if ((!blockoff && *p) || |
@@ -303,9 +307,10 @@ repeat2: | |||
303 | */ | 307 | */ |
304 | } | 308 | } |
305 | 309 | ||
306 | static struct buffer_head * ufs_block_getfrag (struct inode *inode, | 310 | static struct buffer_head *ufs_block_getfrag(struct inode *inode, struct buffer_head *bh, |
307 | struct buffer_head *bh, unsigned int fragment, unsigned int new_fragment, | 311 | unsigned int fragment, unsigned int new_fragment, |
308 | unsigned int blocksize, int * err, int metadata, long *phys, int *new) | 312 | unsigned int blocksize, int * err, int metadata, |
313 | long *phys, int *new, struct page *locked_page) | ||
309 | { | 314 | { |
310 | struct super_block * sb; | 315 | struct super_block * sb; |
311 | struct ufs_sb_private_info * uspi; | 316 | struct ufs_sb_private_info * uspi; |
@@ -350,7 +355,8 @@ repeat: | |||
350 | goal = tmp + uspi->s_fpb; | 355 | goal = tmp + uspi->s_fpb; |
351 | else | 356 | else |
352 | goal = bh->b_blocknr + uspi->s_fpb; | 357 | goal = bh->b_blocknr + uspi->s_fpb; |
353 | tmp = ufs_new_fragments (inode, p, ufs_blknum(new_fragment), goal, uspi->s_fpb, err); | 358 | tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment), goal, |
359 | uspi->s_fpb, err, locked_page); | ||
354 | if (!tmp) { | 360 | if (!tmp) { |
355 | if (fs32_to_cpu(sb, *p)) | 361 | if (fs32_to_cpu(sb, *p)) |
356 | goto repeat; | 362 | goto repeat; |
@@ -424,15 +430,15 @@ int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_hea | |||
424 | * it much more readable: | 430 | * it much more readable: |
425 | */ | 431 | */ |
426 | #define GET_INODE_DATABLOCK(x) \ | 432 | #define GET_INODE_DATABLOCK(x) \ |
427 | ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new) | 433 | ufs_inode_getfrag(inode, x, fragment, 1, &err, 0, &phys, &new, bh_result->b_page) |
428 | #define GET_INODE_PTR(x) \ | 434 | #define GET_INODE_PTR(x) \ |
429 | ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL) | 435 | ufs_inode_getfrag(inode, x, fragment, uspi->s_fpb, &err, 1, NULL, NULL, bh_result->b_page) |
430 | #define GET_INDIRECT_DATABLOCK(x) \ | 436 | #define GET_INDIRECT_DATABLOCK(x) \ |
431 | ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ | 437 | ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ |
432 | &err, 0, &phys, &new); | 438 | &err, 0, &phys, &new, bh_result->b_page); |
433 | #define GET_INDIRECT_PTR(x) \ | 439 | #define GET_INDIRECT_PTR(x) \ |
434 | ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ | 440 | ufs_block_getfrag(inode, bh, x, fragment, sb->s_blocksize, \ |
435 | &err, 1, NULL, NULL); | 441 | &err, 1, NULL, NULL, bh_result->b_page); |
436 | 442 | ||
437 | if (ptr < UFS_NDIR_FRAGMENT) { | 443 | if (ptr < UFS_NDIR_FRAGMENT) { |
438 | bh = GET_INODE_DATABLOCK(ptr); | 444 | bh = GET_INODE_DATABLOCK(ptr); |