aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c86
1 files changed, 46 insertions, 40 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index f463dce42515..d3a1974c91d5 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -172,8 +172,8 @@ xfs_buf_stale(
172struct xfs_buf * 172struct xfs_buf *
173xfs_buf_alloc( 173xfs_buf_alloc(
174 struct xfs_buftarg *target, 174 struct xfs_buftarg *target,
175 xfs_off_t range_base, 175 xfs_daddr_t blkno,
176 size_t range_length, 176 size_t numblks,
177 xfs_buf_flags_t flags) 177 xfs_buf_flags_t flags)
178{ 178{
179 struct xfs_buf *bp; 179 struct xfs_buf *bp;
@@ -196,14 +196,21 @@ xfs_buf_alloc(
196 sema_init(&bp->b_sema, 0); /* held, no waiters */ 196 sema_init(&bp->b_sema, 0); /* held, no waiters */
197 XB_SET_OWNER(bp); 197 XB_SET_OWNER(bp);
198 bp->b_target = target; 198 bp->b_target = target;
199 bp->b_file_offset = range_base; 199 bp->b_file_offset = blkno << BBSHIFT;
200 /* 200 /*
201 * Set buffer_length and count_desired to the same value initially. 201 * Set buffer_length and count_desired to the same value initially.
202 * I/O routines should use count_desired, which will be the same in 202 * I/O routines should use count_desired, which will be the same in
203 * most cases but may be reset (e.g. XFS recovery). 203 * most cases but may be reset (e.g. XFS recovery).
204 */ 204 */
205 bp->b_buffer_length = bp->b_count_desired = range_length; 205 bp->b_buffer_length = bp->b_count_desired = numblks << BBSHIFT;
206 bp->b_flags = flags; 206 bp->b_flags = flags;
207
208 /*
209 * We do not set the block number here in the buffer because we have not
210 * finished initialising the buffer. We insert the buffer into the cache
211 * in this state, so this ensures that we are unable to do IO on a
212 * buffer that hasn't been fully initialised.
213 */
207 bp->b_bn = XFS_BUF_DADDR_NULL; 214 bp->b_bn = XFS_BUF_DADDR_NULL;
208 atomic_set(&bp->b_pin_count, 0); 215 atomic_set(&bp->b_pin_count, 0);
209 init_waitqueue_head(&bp->b_waiters); 216 init_waitqueue_head(&bp->b_waiters);
@@ -426,29 +433,29 @@ _xfs_buf_map_pages(
426 */ 433 */
427xfs_buf_t * 434xfs_buf_t *
428_xfs_buf_find( 435_xfs_buf_find(
429 xfs_buftarg_t *btp, /* block device target */ 436 struct xfs_buftarg *btp,
430 xfs_off_t ioff, /* starting offset of range */ 437 xfs_daddr_t blkno,
431 size_t isize, /* length of range */ 438 size_t numblks,
432 xfs_buf_flags_t flags, 439 xfs_buf_flags_t flags,
433 xfs_buf_t *new_bp) 440 xfs_buf_t *new_bp)
434{ 441{
435 xfs_off_t range_base; 442 xfs_off_t offset;
436 size_t range_length; 443 size_t numbytes;
437 struct xfs_perag *pag; 444 struct xfs_perag *pag;
438 struct rb_node **rbp; 445 struct rb_node **rbp;
439 struct rb_node *parent; 446 struct rb_node *parent;
440 xfs_buf_t *bp; 447 xfs_buf_t *bp;
441 448
442 range_base = (ioff << BBSHIFT); 449 offset = BBTOB(blkno);
443 range_length = (isize << BBSHIFT); 450 numbytes = BBTOB(numblks);
444 451
445 /* Check for IOs smaller than the sector size / not sector aligned */ 452 /* Check for IOs smaller than the sector size / not sector aligned */
446 ASSERT(!(range_length < (1 << btp->bt_sshift))); 453 ASSERT(!(numbytes < (1 << btp->bt_sshift)));
447 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); 454 ASSERT(!(offset & (xfs_off_t)btp->bt_smask));
448 455
449 /* get tree root */ 456 /* get tree root */
450 pag = xfs_perag_get(btp->bt_mount, 457 pag = xfs_perag_get(btp->bt_mount,
451 xfs_daddr_to_agno(btp->bt_mount, ioff)); 458 xfs_daddr_to_agno(btp->bt_mount, blkno));
452 459
453 /* walk tree */ 460 /* walk tree */
454 spin_lock(&pag->pag_buf_lock); 461 spin_lock(&pag->pag_buf_lock);
@@ -459,9 +466,9 @@ _xfs_buf_find(
459 parent = *rbp; 466 parent = *rbp;
460 bp = rb_entry(parent, struct xfs_buf, b_rbnode); 467 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
461 468
462 if (range_base < bp->b_file_offset) 469 if (offset < bp->b_file_offset)
463 rbp = &(*rbp)->rb_left; 470 rbp = &(*rbp)->rb_left;
464 else if (range_base > bp->b_file_offset) 471 else if (offset > bp->b_file_offset)
465 rbp = &(*rbp)->rb_right; 472 rbp = &(*rbp)->rb_right;
466 else { 473 else {
467 /* 474 /*
@@ -472,7 +479,7 @@ _xfs_buf_find(
472 * reallocating a busy extent. Skip this buffer and 479 * reallocating a busy extent. Skip this buffer and
473 * continue searching to the right for an exact match. 480 * continue searching to the right for an exact match.
474 */ 481 */
475 if (bp->b_buffer_length != range_length) { 482 if (bp->b_buffer_length != numbytes) {
476 ASSERT(bp->b_flags & XBF_STALE); 483 ASSERT(bp->b_flags & XBF_STALE);
477 rbp = &(*rbp)->rb_right; 484 rbp = &(*rbp)->rb_right;
478 continue; 485 continue;
@@ -532,21 +539,20 @@ found:
532 */ 539 */
533struct xfs_buf * 540struct xfs_buf *
534xfs_buf_get( 541xfs_buf_get(
535 xfs_buftarg_t *target,/* target for buffer */ 542 xfs_buftarg_t *target,
536 xfs_off_t ioff, /* starting offset of range */ 543 xfs_daddr_t blkno,
537 size_t isize, /* length of range */ 544 size_t numblks,
538 xfs_buf_flags_t flags) 545 xfs_buf_flags_t flags)
539{ 546{
540 struct xfs_buf *bp; 547 struct xfs_buf *bp;
541 struct xfs_buf *new_bp; 548 struct xfs_buf *new_bp;
542 int error = 0; 549 int error = 0;
543 550
544 bp = _xfs_buf_find(target, ioff, isize, flags, NULL); 551 bp = _xfs_buf_find(target, blkno, numblks, flags, NULL);
545 if (likely(bp)) 552 if (likely(bp))
546 goto found; 553 goto found;
547 554
548 new_bp = xfs_buf_alloc(target, ioff << BBSHIFT, isize << BBSHIFT, 555 new_bp = xfs_buf_alloc(target, blkno, numblks, flags);
549 flags);
550 if (unlikely(!new_bp)) 556 if (unlikely(!new_bp))
551 return NULL; 557 return NULL;
552 558
@@ -556,7 +562,7 @@ xfs_buf_get(
556 return NULL; 562 return NULL;
557 } 563 }
558 564
559 bp = _xfs_buf_find(target, ioff, isize, flags, new_bp); 565 bp = _xfs_buf_find(target, blkno, numblks, flags, new_bp);
560 if (!bp) { 566 if (!bp) {
561 xfs_buf_free(new_bp); 567 xfs_buf_free(new_bp);
562 return NULL; 568 return NULL;
@@ -569,7 +575,7 @@ xfs_buf_get(
569 * Now we have a workable buffer, fill in the block number so 575 * Now we have a workable buffer, fill in the block number so
570 * that we can do IO on it. 576 * that we can do IO on it.
571 */ 577 */
572 bp->b_bn = ioff; 578 bp->b_bn = blkno;
573 bp->b_count_desired = bp->b_buffer_length; 579 bp->b_count_desired = bp->b_buffer_length;
574 580
575found: 581found:
@@ -613,15 +619,15 @@ _xfs_buf_read(
613xfs_buf_t * 619xfs_buf_t *
614xfs_buf_read( 620xfs_buf_read(
615 xfs_buftarg_t *target, 621 xfs_buftarg_t *target,
616 xfs_off_t ioff, 622 xfs_daddr_t blkno,
617 size_t isize, 623 size_t numblks,
618 xfs_buf_flags_t flags) 624 xfs_buf_flags_t flags)
619{ 625{
620 xfs_buf_t *bp; 626 xfs_buf_t *bp;
621 627
622 flags |= XBF_READ; 628 flags |= XBF_READ;
623 629
624 bp = xfs_buf_get(target, ioff, isize, flags); 630 bp = xfs_buf_get(target, blkno, numblks, flags);
625 if (bp) { 631 if (bp) {
626 trace_xfs_buf_read(bp, flags, _RET_IP_); 632 trace_xfs_buf_read(bp, flags, _RET_IP_);
627 633
@@ -656,13 +662,13 @@ xfs_buf_read(
656void 662void
657xfs_buf_readahead( 663xfs_buf_readahead(
658 xfs_buftarg_t *target, 664 xfs_buftarg_t *target,
659 xfs_off_t ioff, 665 xfs_daddr_t blkno,
660 size_t isize) 666 size_t numblks)
661{ 667{
662 if (bdi_read_congested(target->bt_bdi)) 668 if (bdi_read_congested(target->bt_bdi))
663 return; 669 return;
664 670
665 xfs_buf_read(target, ioff, isize, 671 xfs_buf_read(target, blkno, numblks,
666 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK); 672 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
667} 673}
668 674
@@ -672,16 +678,15 @@ xfs_buf_readahead(
672 */ 678 */
673struct xfs_buf * 679struct xfs_buf *
674xfs_buf_read_uncached( 680xfs_buf_read_uncached(
675 struct xfs_mount *mp,
676 struct xfs_buftarg *target, 681 struct xfs_buftarg *target,
677 xfs_daddr_t daddr, 682 xfs_daddr_t daddr,
678 size_t length, 683 size_t numblks,
679 int flags) 684 int flags)
680{ 685{
681 xfs_buf_t *bp; 686 xfs_buf_t *bp;
682 int error; 687 int error;
683 688
684 bp = xfs_buf_get_uncached(target, length, flags); 689 bp = xfs_buf_get_uncached(target, numblks, flags);
685 if (!bp) 690 if (!bp)
686 return NULL; 691 return NULL;
687 692
@@ -689,7 +694,7 @@ xfs_buf_read_uncached(
689 XFS_BUF_SET_ADDR(bp, daddr); 694 XFS_BUF_SET_ADDR(bp, daddr);
690 XFS_BUF_READ(bp); 695 XFS_BUF_READ(bp);
691 696
692 xfsbdstrat(mp, bp); 697 xfsbdstrat(target->bt_mount, bp);
693 error = xfs_buf_iowait(bp); 698 error = xfs_buf_iowait(bp);
694 if (error) { 699 if (error) {
695 xfs_buf_relse(bp); 700 xfs_buf_relse(bp);
@@ -705,7 +710,7 @@ xfs_buf_read_uncached(
705void 710void
706xfs_buf_set_empty( 711xfs_buf_set_empty(
707 struct xfs_buf *bp, 712 struct xfs_buf *bp,
708 size_t len) 713 size_t numblks)
709{ 714{
710 if (bp->b_pages) 715 if (bp->b_pages)
711 _xfs_buf_free_pages(bp); 716 _xfs_buf_free_pages(bp);
@@ -714,7 +719,7 @@ xfs_buf_set_empty(
714 bp->b_page_count = 0; 719 bp->b_page_count = 0;
715 bp->b_addr = NULL; 720 bp->b_addr = NULL;
716 bp->b_file_offset = 0; 721 bp->b_file_offset = 0;
717 bp->b_buffer_length = bp->b_count_desired = len; 722 bp->b_buffer_length = bp->b_count_desired = numblks << BBSHIFT;
718 bp->b_bn = XFS_BUF_DADDR_NULL; 723 bp->b_bn = XFS_BUF_DADDR_NULL;
719 bp->b_flags &= ~XBF_MAPPED; 724 bp->b_flags &= ~XBF_MAPPED;
720} 725}
@@ -776,17 +781,18 @@ xfs_buf_associate_memory(
776xfs_buf_t * 781xfs_buf_t *
777xfs_buf_get_uncached( 782xfs_buf_get_uncached(
778 struct xfs_buftarg *target, 783 struct xfs_buftarg *target,
779 size_t len, 784 size_t numblks,
780 int flags) 785 int flags)
781{ 786{
782 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; 787 unsigned long page_count;
783 int error, i; 788 int error, i;
784 xfs_buf_t *bp; 789 xfs_buf_t *bp;
785 790
786 bp = xfs_buf_alloc(target, 0, len, 0); 791 bp = xfs_buf_alloc(target, 0, numblks, 0);
787 if (unlikely(bp == NULL)) 792 if (unlikely(bp == NULL))
788 goto fail; 793 goto fail;
789 794
795 page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
790 error = _xfs_buf_get_pages(bp, page_count, 0); 796 error = _xfs_buf_get_pages(bp, page_count, 0);
791 if (error) 797 if (error)
792 goto fail_free_buf; 798 goto fail_free_buf;