diff options
Diffstat (limited to 'fs/ext4/indirect.c')
-rw-r--r-- | fs/ext4/indirect.c | 86 |
1 files changed, 42 insertions, 44 deletions
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index e75f840000a0..36b369697a13 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c | |||
@@ -318,34 +318,24 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks, | |||
318 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain | 318 | * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain |
319 | * as described above and return 0. | 319 | * as described above and return 0. |
320 | */ | 320 | */ |
321 | static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | 321 | static int ext4_alloc_branch(handle_t *handle, |
322 | ext4_lblk_t iblock, int indirect_blks, | 322 | struct ext4_allocation_request *ar, |
323 | int *blks, ext4_fsblk_t goal, | 323 | int indirect_blks, ext4_lblk_t *offsets, |
324 | ext4_lblk_t *offsets, Indirect *branch) | 324 | Indirect *branch) |
325 | { | 325 | { |
326 | struct ext4_allocation_request ar; | ||
327 | struct buffer_head * bh; | 326 | struct buffer_head * bh; |
328 | ext4_fsblk_t b, new_blocks[4]; | 327 | ext4_fsblk_t b, new_blocks[4]; |
329 | __le32 *p; | 328 | __le32 *p; |
330 | int i, j, err, len = 1; | 329 | int i, j, err, len = 1; |
331 | 330 | ||
332 | /* | ||
333 | * Set up for the direct block allocation | ||
334 | */ | ||
335 | memset(&ar, 0, sizeof(ar)); | ||
336 | ar.inode = inode; | ||
337 | ar.len = *blks; | ||
338 | ar.logical = iblock; | ||
339 | if (S_ISREG(inode->i_mode)) | ||
340 | ar.flags = EXT4_MB_HINT_DATA; | ||
341 | |||
342 | for (i = 0; i <= indirect_blks; i++) { | 331 | for (i = 0; i <= indirect_blks; i++) { |
343 | if (i == indirect_blks) { | 332 | if (i == indirect_blks) { |
344 | ar.goal = goal; | 333 | new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err); |
345 | new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err); | ||
346 | } else | 334 | } else |
347 | goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode, | 335 | ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle, |
348 | goal, 0, NULL, &err); | 336 | ar->inode, ar->goal, |
337 | ar->flags & EXT4_MB_DELALLOC_RESERVED, | ||
338 | NULL, &err); | ||
349 | if (err) { | 339 | if (err) { |
350 | i--; | 340 | i--; |
351 | goto failed; | 341 | goto failed; |
@@ -354,7 +344,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
354 | if (i == 0) | 344 | if (i == 0) |
355 | continue; | 345 | continue; |
356 | 346 | ||
357 | bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]); | 347 | bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]); |
358 | if (unlikely(!bh)) { | 348 | if (unlikely(!bh)) { |
359 | err = -ENOMEM; | 349 | err = -ENOMEM; |
360 | goto failed; | 350 | goto failed; |
@@ -372,7 +362,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
372 | b = new_blocks[i]; | 362 | b = new_blocks[i]; |
373 | 363 | ||
374 | if (i == indirect_blks) | 364 | if (i == indirect_blks) |
375 | len = ar.len; | 365 | len = ar->len; |
376 | for (j = 0; j < len; j++) | 366 | for (j = 0; j < len; j++) |
377 | *p++ = cpu_to_le32(b++); | 367 | *p++ = cpu_to_le32(b++); |
378 | 368 | ||
@@ -381,11 +371,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode, | |||
381 | unlock_buffer(bh); | 371 | unlock_buffer(bh); |
382 | 372 | ||
383 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); | 373 | BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); |
384 | err = ext4_handle_dirty_metadata(handle, inode, bh); | 374 | err = ext4_handle_dirty_metadata(handle, ar->inode, bh); |
385 | if (err) | 375 | if (err) |
386 | goto failed; | 376 | goto failed; |
387 | } | 377 | } |
388 | *blks = ar.len; | ||
389 | return 0; | 378 | return 0; |
390 | failed: | 379 | failed: |
391 | for (; i >= 0; i--) { | 380 | for (; i >= 0; i--) { |
@@ -396,10 +385,10 @@ failed: | |||
396 | * existing before ext4_alloc_branch() was called. | 385 | * existing before ext4_alloc_branch() was called. |
397 | */ | 386 | */ |
398 | if (i > 0 && i != indirect_blks && branch[i].bh) | 387 | if (i > 0 && i != indirect_blks && branch[i].bh) |
399 | ext4_forget(handle, 1, inode, branch[i].bh, | 388 | ext4_forget(handle, 1, ar->inode, branch[i].bh, |
400 | branch[i].bh->b_blocknr); | 389 | branch[i].bh->b_blocknr); |
401 | ext4_free_blocks(handle, inode, NULL, new_blocks[i], | 390 | ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i], |
402 | (i == indirect_blks) ? ar.len : 1, 0); | 391 | (i == indirect_blks) ? ar->len : 1, 0); |
403 | } | 392 | } |
404 | return err; | 393 | return err; |
405 | } | 394 | } |
@@ -419,9 +408,9 @@ failed: | |||
419 | * inode (->i_blocks, etc.). In case of success we end up with the full | 408 | * inode (->i_blocks, etc.). In case of success we end up with the full |
420 | * chain to new block and return 0. | 409 | * chain to new block and return 0. |
421 | */ | 410 | */ |
422 | static int ext4_splice_branch(handle_t *handle, struct inode *inode, | 411 | static int ext4_splice_branch(handle_t *handle, |
423 | ext4_lblk_t block, Indirect *where, int num, | 412 | struct ext4_allocation_request *ar, |
424 | int blks) | 413 | Indirect *where, int num) |
425 | { | 414 | { |
426 | int i; | 415 | int i; |
427 | int err = 0; | 416 | int err = 0; |
@@ -446,9 +435,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, | |||
446 | * Update the host buffer_head or inode to point to more just allocated | 435 | * Update the host buffer_head or inode to point to more just allocated |
447 | * direct blocks blocks | 436 | * direct blocks blocks |
448 | */ | 437 | */ |
449 | if (num == 0 && blks > 1) { | 438 | if (num == 0 && ar->len > 1) { |
450 | current_block = le32_to_cpu(where->key) + 1; | 439 | current_block = le32_to_cpu(where->key) + 1; |
451 | for (i = 1; i < blks; i++) | 440 | for (i = 1; i < ar->len; i++) |
452 | *(where->p + i) = cpu_to_le32(current_block++); | 441 | *(where->p + i) = cpu_to_le32(current_block++); |
453 | } | 442 | } |
454 | 443 | ||
@@ -465,14 +454,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode, | |||
465 | */ | 454 | */ |
466 | jbd_debug(5, "splicing indirect only\n"); | 455 | jbd_debug(5, "splicing indirect only\n"); |
467 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); | 456 | BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); |
468 | err = ext4_handle_dirty_metadata(handle, inode, where->bh); | 457 | err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh); |
469 | if (err) | 458 | if (err) |
470 | goto err_out; | 459 | goto err_out; |
471 | } else { | 460 | } else { |
472 | /* | 461 | /* |
473 | * OK, we spliced it into the inode itself on a direct block. | 462 | * OK, we spliced it into the inode itself on a direct block. |
474 | */ | 463 | */ |
475 | ext4_mark_inode_dirty(handle, inode); | 464 | ext4_mark_inode_dirty(handle, ar->inode); |
476 | jbd_debug(5, "splicing direct\n"); | 465 | jbd_debug(5, "splicing direct\n"); |
477 | } | 466 | } |
478 | return err; | 467 | return err; |
@@ -484,11 +473,11 @@ err_out: | |||
484 | * need to revoke the block, which is why we don't | 473 | * need to revoke the block, which is why we don't |
485 | * need to set EXT4_FREE_BLOCKS_METADATA. | 474 | * need to set EXT4_FREE_BLOCKS_METADATA. |
486 | */ | 475 | */ |
487 | ext4_free_blocks(handle, inode, where[i].bh, 0, 1, | 476 | ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1, |
488 | EXT4_FREE_BLOCKS_FORGET); | 477 | EXT4_FREE_BLOCKS_FORGET); |
489 | } | 478 | } |
490 | ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), | 479 | ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key), |
491 | blks, 0); | 480 | ar->len, 0); |
492 | 481 | ||
493 | return err; | 482 | return err; |
494 | } | 483 | } |
@@ -525,11 +514,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
525 | struct ext4_map_blocks *map, | 514 | struct ext4_map_blocks *map, |
526 | int flags) | 515 | int flags) |
527 | { | 516 | { |
517 | struct ext4_allocation_request ar; | ||
528 | int err = -EIO; | 518 | int err = -EIO; |
529 | ext4_lblk_t offsets[4]; | 519 | ext4_lblk_t offsets[4]; |
530 | Indirect chain[4]; | 520 | Indirect chain[4]; |
531 | Indirect *partial; | 521 | Indirect *partial; |
532 | ext4_fsblk_t goal; | ||
533 | int indirect_blks; | 522 | int indirect_blks; |
534 | int blocks_to_boundary = 0; | 523 | int blocks_to_boundary = 0; |
535 | int depth; | 524 | int depth; |
@@ -579,7 +568,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
579 | return -ENOSPC; | 568 | return -ENOSPC; |
580 | } | 569 | } |
581 | 570 | ||
582 | goal = ext4_find_goal(inode, map->m_lblk, partial); | 571 | /* Set up for the direct block allocation */ |
572 | memset(&ar, 0, sizeof(ar)); | ||
573 | ar.inode = inode; | ||
574 | ar.logical = map->m_lblk; | ||
575 | if (S_ISREG(inode->i_mode)) | ||
576 | ar.flags = EXT4_MB_HINT_DATA; | ||
577 | if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) | ||
578 | ar.flags |= EXT4_MB_DELALLOC_RESERVED; | ||
579 | |||
580 | ar.goal = ext4_find_goal(inode, map->m_lblk, partial); | ||
583 | 581 | ||
584 | /* the number of blocks need to allocate for [d,t]indirect blocks */ | 582 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
585 | indirect_blks = (chain + depth) - partial - 1; | 583 | indirect_blks = (chain + depth) - partial - 1; |
@@ -588,13 +586,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
588 | * Next look up the indirect map to count the totoal number of | 586 | * Next look up the indirect map to count the totoal number of |
589 | * direct blocks to allocate for this branch. | 587 | * direct blocks to allocate for this branch. |
590 | */ | 588 | */ |
591 | count = ext4_blks_to_allocate(partial, indirect_blks, | 589 | ar.len = ext4_blks_to_allocate(partial, indirect_blks, |
592 | map->m_len, blocks_to_boundary); | 590 | map->m_len, blocks_to_boundary); |
591 | |||
593 | /* | 592 | /* |
594 | * Block out ext4_truncate while we alter the tree | 593 | * Block out ext4_truncate while we alter the tree |
595 | */ | 594 | */ |
596 | err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, | 595 | err = ext4_alloc_branch(handle, &ar, indirect_blks, |
597 | &count, goal, | ||
598 | offsets + (partial - chain), partial); | 596 | offsets + (partial - chain), partial); |
599 | 597 | ||
600 | /* | 598 | /* |
@@ -605,14 +603,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode, | |||
605 | * may need to return -EAGAIN upwards in the worst case. --sct | 603 | * may need to return -EAGAIN upwards in the worst case. --sct |
606 | */ | 604 | */ |
607 | if (!err) | 605 | if (!err) |
608 | err = ext4_splice_branch(handle, inode, map->m_lblk, | 606 | err = ext4_splice_branch(handle, &ar, partial, indirect_blks); |
609 | partial, indirect_blks, count); | ||
610 | if (err) | 607 | if (err) |
611 | goto cleanup; | 608 | goto cleanup; |
612 | 609 | ||
613 | map->m_flags |= EXT4_MAP_NEW; | 610 | map->m_flags |= EXT4_MAP_NEW; |
614 | 611 | ||
615 | ext4_update_inode_fsync_trans(handle, inode, 1); | 612 | ext4_update_inode_fsync_trans(handle, inode, 1); |
613 | count = ar.len; | ||
616 | got_it: | 614 | got_it: |
617 | map->m_flags |= EXT4_MAP_MAPPED; | 615 | map->m_flags |= EXT4_MAP_MAPPED; |
618 | map->m_pblk = le32_to_cpu(chain[depth-1].key); | 616 | map->m_pblk = le32_to_cpu(chain[depth-1].key); |