diff options
| author | Theodore Ts'o <tytso@mit.edu> | 2014-09-04 18:06:25 -0400 |
|---|---|---|
| committer | Theodore Ts'o <tytso@mit.edu> | 2014-09-04 18:06:25 -0400 |
| commit | a521100231f816f8cdd9c8e77da14ff1e42c2b17 (patch) | |
| tree | 66dccba9ffc883f1a47fa9683f055bcdae54131e | |
| parent | eb68d0e2fc5a4e5c06324ea5f485fccbae626d05 (diff) | |
ext4: pass allocation_request struct to ext4_(alloc,splice)_branch
Instead of initializing the allocation_request structure in
ext4_alloc_branch(), set it up in ext4_ind_map_blocks(), and then pass
it to ext4_alloc_branch() and ext4_splice_branch().
This allows ext4_ind_map_blocks to pass flags in the allocation
request structure without having to add Yet Another argument to
ext4_alloc_branch().
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Reviewed-by: Jan Kara <jack@suse.cz>
| -rw-r--r-- | fs/ext4/indirect.c | 82 |
1 files changed, 38 insertions, 44 deletions
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index e75f840000a0..69af0cd64724 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c | |||
| @@ -318,34 +318,22 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, | |||
| 318 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain | 318 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain |
| 319 | * as described above and return 0. | 319 | * as described above and return 0. |
| 320 | */ | 320 | */ |
| 321 | static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | 321 | static int ext4_alloc_branch(handle_t *handle, |
| 322 | ext4_lblk_t iblock, int indirect_blks, | 322 | struct ext4_allocation_request *ar, |
| 323 | int *blks, ext4_fsblk_t goal, | 323 | int indirect_blks, ext4_lblk_t *offsets, |
| 324 | ext4_lblk_t *offsets, Indirect *branch) | 324 | Indirect *branch) |
| 325 | { | 325 | { |
| 326 | struct ext4_allocation_request ar; | ||
| 327 | struct buffer_head * bh; | 326 | struct buffer_head * bh; |
| 328 | ext4_fsblk_t b, new_blocks[4]; | 327 | ext4_fsblk_t b, new_blocks[4]; |
| 329 | __le32 *p; | 328 | __le32 *p; |
| 330 | int i, j, err, len = 1; | 329 | int i, j, err, len = 1; |
| 331 | 330 | ||
| 332 | /* | ||
| 333 | * Set up for the direct block allocation | ||
| 334 | */ | ||
| 335 | memset(&ar, 0, sizeof(ar)); | ||
| 336 | ar.inode = inode; | ||
| 337 | ar.len = *blks; | ||
| 338 | ar.logical = iblock; | ||
| 339 | if (S_ISREG(inode->i_mode)) | ||
| 340 | ar.flags = EXT4_MB_HINT_DATA; | ||
| 341 | |||
| 342 | for (i = 0; i <= indirect_blks; i++) { | 331 | for (i = 0; i <= indirect_blks; i++) { |
| 343 | if (i == indirect_blks) { | 332 | if (i == indirect_blks) { |
| 344 | ar.goal = goal; | 333 | new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); |
| 345 | new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err); | ||
| 346 | } else | 334 | } else |
| 347 | goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode, | 335 | ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, |
| 348 | goal, 0, NULL, &err); | 336 | ar->inode, ar->goal, 0, NULL, &err); |
| 349 | if (err) { | 337 | if (err) { |
| 350 | i--; | 338 | i--; |
| 351 | goto failed; | 339 | goto failed; |
| @@ -354,7 +342,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
| 354 | if (i == 0) | 342 | if (i == 0) |
| 355 | continue; | 343 | continue; |
| 356 | 344 | ||
| 357 | bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]); | 345 | bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]); |
| 358 | if (unlikely(!bh)) { | 346 | if (unlikely(!bh)) { |
| 359 | err = -ENOMEM; | 347 | err = -ENOMEM; |
| 360 | goto failed; | 348 | goto failed; |
| @@ -372,7 +360,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
| 372 | b = new_blocks[i]; | 360 | b = new_blocks[i]; |
| 373 | 361 | ||
| 374 | if (i == indirect_blks) | 362 | if (i == indirect_blks) |
| 375 | len = ar.len; | 363 | len = ar->len; |
| 376 | for (j = 0; j < len; j++) | 364 | for (j = 0; j < len; j++) |
| 377 | *p++ = cpu_to_le32(b++); | 365 | *p++ = cpu_to_le32(b++); |
| 378 | 366 | ||
| @@ -381,11 +369,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
| 381 | unlock_buffer(bh); | 369 | unlock_buffer(bh); |
| 382 | 370 | ||
| 383 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 371 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
| 384 | err = ext4_handle_dirty_metadata(handle, inode, bh); | 372 | err = ext4_handle_dirty_metadata(handle, ar->inode, bh); |
| 385 | if (err) | 373 | if (err) |
| 386 | goto failed; | 374 | goto failed; |
| 387 | } | 375 | } |
| 388 | *blks = ar.len; | ||
| 389 | return 0; | 376 | return 0; |
| 390 | failed: | 377 | failed: |
| 391 | for (; i >= 0; i--) { | 378 | for (; i >= 0; i--) { |
| @@ -396,10 +383,10 @@ failed: | |||
| 396 | * existing before ext4_alloc_branch() was called. | 383 | * existing before ext4_alloc_branch() was called. |
| 397 | */ | 384 | */ |
| 398 | if (i > 0 && i != indirect_blks && branch[i].bh) | 385 | if (i > 0 && i != indirect_blks && branch[i].bh) |
| 399 | ext4_forget(handle, 1, inode, branch[i].bh, | 386 | ext4_forget(handle, 1, ar->inode, branch[i].bh, |
| 400 | branch[i].bh->b_blocknr); | 387 | branch[i].bh->b_blocknr); |
| 401 | ext4_free_blocks(handle, inode, NULL, new_blocks[i], | 388 | ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i], |
| 402 | (i == indirect_blks) ? ar.len : 1, 0); | 389 | (i == indirect_blks) ? ar->len : 1, 0); |
| 403 | } | 390 | } |
| 404 | return err; | 391 | return err; |
| 405 | } | 392 | } |
| @@ -419,9 +406,9 @@ failed: | |||
| 419 | * inode (->i_blocks, etc.). In case of success we end up with the full | 406 | * inode (->i_blocks, etc.). In case of success we end up with the full |
| 420 | * chain to new block and return 0. | 407 | * chain to new block and return 0. |
| 421 | */ | 408 | */ |
| 422 | static int ext4_splice_branch(handle_t *handle, struct inode *inode, | 409 | static int ext4_splice_branch(handle_t *handle, |
| 423 | ext4_lblk_t block, Indirect *where, int num, | 410 | struct ext4_allocation_request *ar, |
| 424 | int blks) | 411 | Indirect *where, int num) |
| 425 | { | 412 | { |
| 426 | int i; | 413 | int i; |
| 427 | int err = 0; | 414 | int err = 0; |
| @@ -446,9 +433,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, | |||
| 446 | * Update the host buffer_head or inode to point to more just allocated | 433 | * Update the host buffer_head or inode to point to more just allocated |
| 447 | * direct blocks blocks | 434 | * direct blocks blocks |
| 448 | */ | 435 | */ |
| 449 | if (num == 0 && blks > 1) { | 436 | if (num == 0 && ar->len > 1) { |
| 450 | current_block = le32_to_cpu(where->key) + 1; | 437 | current_block = le32_to_cpu(where->key) + 1; |
| 451 | for (i = 1; i < blks; i++) | 438 | for (i = 1; i < ar->len; i++) |
| 452 | *(where->p + i) = cpu_to_le32(current_block++); | 439 | *(where->p + i) = cpu_to_le32(current_block++); |
| 453 | } | 440 | } |
| 454 | 441 | ||
| @@ -465,14 +452,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, | |||
| 465 | */ | 452 | */ |
| 466 | jbd_debug(5, "splicing indirect only\n"); | 453 | jbd_debug(5, "splicing indirect only\n"); |
| 467 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); | 454 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); |
| 468 | err = ext4_handle_dirty_metadata(handle, inode, where->bh); | 455 | err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); |
| 469 | if (err) | 456 | if (err) |
| 470 | goto err_out; | 457 | goto err_out; |
| 471 | } else { | 458 | } else { |
| 472 | /* | 459 | /* |
| 473 | * OK, we spliced it into the inode itself on a direct block. | 460 | * OK, we spliced it into the inode itself on a direct block. |
| 474 | */ | 461 | */ |
| 475 | ext4_mark_inode_dirty(handle, inode); | 462 | ext4_mark_inode_dirty(handle, ar->inode); |
| 476 | jbd_debug(5, "splicing direct\n"); | 463 | jbd_debug(5, "splicing direct\n"); |
| 477 | } | 464 | } |
| 478 | return err; | 465 | return err; |
| @@ -484,11 +471,11 @@ err_out: | |||
| 484 | * need to revoke the block, which is why we don't | 471 | * need to revoke the block, which is why we don't |
| 485 | * need to set EXT4_FREE_BLOCKS_METADATA. | 472 | * need to set EXT4_FREE_BLOCKS_METADATA. |
| 486 | */ | 473 | */ |
| 487 | ext4_free_blocks(handle, inode, where[i].bh, 0, 1, | 474 | ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1, |
| 488 | EXT4_FREE_BLOCKS_FORGET); | 475 | EXT4_FREE_BLOCKS_FORGET); |
| 489 | } | 476 | } |
| 490 | ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), | 477 | ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key), |
| 491 | blks, 0); | 478 | ar->len, 0); |
| 492 | 479 | ||
| 493 | return err; | 480 | return err; |
| 494 | } | 481 | } |
| @@ -525,11 +512,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
| 525 | struct ext4_map_blocks *map, | 512 | struct ext4_map_blocks *map, |
| 526 | int flags) | 513 | int flags) |
| 527 | { | 514 | { |
| 515 | struct ext4_allocation_request ar; | ||
| 528 | int err = -EIO; | 516 | int err = -EIO; |
| 529 | ext4_lblk_t offsets[4]; | 517 | ext4_lblk_t offsets[4]; |
| 530 | Indirect chain[4]; | 518 | Indirect chain[4]; |
| 531 | Indirect *partial; | 519 | Indirect *partial; |
| 532 | ext4_fsblk_t goal; | ||
| 533 | int indirect_blks; | 520 | int indirect_blks; |
| 534 | int blocks_to_boundary = 0; | 521 | int blocks_to_boundary = 0; |
| 535 | int depth; | 522 | int depth; |
| @@ -579,7 +566,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
| 579 | return -ENOSPC; | 566 | return -ENOSPC; |
| 580 | } | 567 | } |
| 581 | 568 | ||
| 582 | goal = ext4_find_goal(inode, map->m_lblk, partial); | 569 | /* Set up for the direct block allocation */ |
| 570 | memset(&ar, 0, sizeof(ar)); | ||
| 571 | ar.inode = inode; | ||
| 572 | ar.logical = map->m_lblk; | ||
| 573 | if (S_ISREG(inode->i_mode)) | ||
| 574 | ar.flags = EXT4_MB_HINT_DATA; | ||
| 575 | |||
| 576 | ar.goal = ext4_find_goal(inode, map->m_lblk, partial); | ||
| 583 | 577 | ||
| 584 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 578 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
| 585 | indirect_blks = (chain + depth) - partial - 1; | 579 | indirect_blks = (chain + depth) - partial - 1; |
| @@ -588,13 +582,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
| 588 | * Next look up the indirect map to count the totoal number of | 582 | * Next look up the indirect map to count the totoal number of |
| 589 | * direct blocks to allocate for this branch. | 583 | * direct blocks to allocate for this branch. |
| 590 | */ | 584 | */ |
| 591 | count = ext4_blks_to_allocate(partial, indirect_blks, | 585 | ar.len = ext4_blks_to_allocate(partial, indirect_blks, |
| 592 | map->m_len, blocks_to_boundary); | 586 | map->m_len, blocks_to_boundary); |
| 587 | |||
| 593 | /* | 588 | /* |
| 594 | * Block out ext4_truncate while we alter the tree | 589 | * Block out ext4_truncate while we alter the tree |
| 595 | */ | 590 | */ |
| 596 | err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, | 591 | err = ext4_alloc_branch(handle, &ar, indirect_blks, |
| 597 | &count, goal, | ||
| 598 | offsets + (partial - chain), partial); | 592 | offsets + (partial - chain), partial); |
| 599 | 593 | ||
| 600 | /* | 594 | /* |
| @@ -605,14 +599,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
| 605 | * may need to return -EAGAIN upwards in the worst case. --sct | 599 | * may need to return -EAGAIN upwards in the worst case. --sct |
| 606 | */ | 600 | */ |
| 607 | if (!err) | 601 | if (!err) |
| 608 | err = ext4_splice_branch(handle, inode, map->m_lblk, | 602 | err = ext4_splice_branch(handle, &ar, partial, indirect_blks); |
| 609 | partial, indirect_blks, count); | ||
| 610 | if (err) | 603 | if (err) |
| 611 | goto cleanup; | 604 | goto cleanup; |
| 612 | 605 | ||
| 613 | map->m_flags |= EXT4_MAP_NEW; | 606 | map->m_flags |= EXT4_MAP_NEW; |
| 614 | 607 | ||
| 615 | ext4_update_inode_fsync_trans(handle, inode, 1); | 608 | ext4_update_inode_fsync_trans(handle, inode, 1); |
| 609 | count = ar.len; | ||
| 616 | got_it: | 610 | got_it: |
| 617 | map->m_flags |= EXT4_MAP_MAPPED; | 611 | map->m_flags |= EXT4_MAP_MAPPED; |
| 618 | map->m_pblk = le32_to_cpu(chain[depth-1].key); | 612 | map->m_pblk = le32_to_cpu(chain[depth-1].key); |
