aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c240
1 files changed, 125 insertions, 115 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 286e36e21dae..aa1d353def29 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -188,8 +188,8 @@ _xfs_buf_initialize(
188 atomic_set(&bp->b_hold, 1); 188 atomic_set(&bp->b_hold, 1);
189 init_completion(&bp->b_iowait); 189 init_completion(&bp->b_iowait);
190 INIT_LIST_HEAD(&bp->b_list); 190 INIT_LIST_HEAD(&bp->b_list);
191 INIT_LIST_HEAD(&bp->b_hash_list); 191 RB_CLEAR_NODE(&bp->b_rbnode);
192 init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */ 192 sema_init(&bp->b_sema, 0); /* held, no waiters */
193 XB_SET_OWNER(bp); 193 XB_SET_OWNER(bp);
194 bp->b_target = target; 194 bp->b_target = target;
195 bp->b_file_offset = range_base; 195 bp->b_file_offset = range_base;
@@ -262,8 +262,6 @@ xfs_buf_free(
262{ 262{
263 trace_xfs_buf_free(bp, _RET_IP_); 263 trace_xfs_buf_free(bp, _RET_IP_);
264 264
265 ASSERT(list_empty(&bp->b_hash_list));
266
267 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) { 265 if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
268 uint i; 266 uint i;
269 267
@@ -422,8 +420,10 @@ _xfs_buf_find(
422{ 420{
423 xfs_off_t range_base; 421 xfs_off_t range_base;
424 size_t range_length; 422 size_t range_length;
425 xfs_bufhash_t *hash; 423 struct xfs_perag *pag;
426 xfs_buf_t *bp, *n; 424 struct rb_node **rbp;
425 struct rb_node *parent;
426 xfs_buf_t *bp;
427 427
428 range_base = (ioff << BBSHIFT); 428 range_base = (ioff << BBSHIFT);
429 range_length = (isize << BBSHIFT); 429 range_length = (isize << BBSHIFT);
@@ -432,14 +432,37 @@ _xfs_buf_find(
432 ASSERT(!(range_length < (1 << btp->bt_sshift))); 432 ASSERT(!(range_length < (1 << btp->bt_sshift)));
433 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask)); 433 ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
434 434
435 hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)]; 435 /* get tree root */
436 436 pag = xfs_perag_get(btp->bt_mount,
437 spin_lock(&hash->bh_lock); 437 xfs_daddr_to_agno(btp->bt_mount, ioff));
438 438
439 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) { 439 /* walk tree */
440 ASSERT(btp == bp->b_target); 440 spin_lock(&pag->pag_buf_lock);
441 if (bp->b_file_offset == range_base && 441 rbp = &pag->pag_buf_tree.rb_node;
442 bp->b_buffer_length == range_length) { 442 parent = NULL;
443 bp = NULL;
444 while (*rbp) {
445 parent = *rbp;
446 bp = rb_entry(parent, struct xfs_buf, b_rbnode);
447
448 if (range_base < bp->b_file_offset)
449 rbp = &(*rbp)->rb_left;
450 else if (range_base > bp->b_file_offset)
451 rbp = &(*rbp)->rb_right;
452 else {
453 /*
454 * found a block offset match. If the range doesn't
455 * match, the only way this is allowed is if the buffer
456 * in the cache is stale and the transaction that made
457 * it stale has not yet committed. i.e. we are
458 * reallocating a busy extent. Skip this buffer and
459 * continue searching to the right for an exact match.
460 */
461 if (bp->b_buffer_length != range_length) {
462 ASSERT(bp->b_flags & XBF_STALE);
463 rbp = &(*rbp)->rb_right;
464 continue;
465 }
443 atomic_inc(&bp->b_hold); 466 atomic_inc(&bp->b_hold);
444 goto found; 467 goto found;
445 } 468 }
@@ -449,17 +472,21 @@ _xfs_buf_find(
449 if (new_bp) { 472 if (new_bp) {
450 _xfs_buf_initialize(new_bp, btp, range_base, 473 _xfs_buf_initialize(new_bp, btp, range_base,
451 range_length, flags); 474 range_length, flags);
452 new_bp->b_hash = hash; 475 rb_link_node(&new_bp->b_rbnode, parent, rbp);
453 list_add(&new_bp->b_hash_list, &hash->bh_list); 476 rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
477 /* the buffer keeps the perag reference until it is freed */
478 new_bp->b_pag = pag;
479 spin_unlock(&pag->pag_buf_lock);
454 } else { 480 } else {
455 XFS_STATS_INC(xb_miss_locked); 481 XFS_STATS_INC(xb_miss_locked);
482 spin_unlock(&pag->pag_buf_lock);
483 xfs_perag_put(pag);
456 } 484 }
457
458 spin_unlock(&hash->bh_lock);
459 return new_bp; 485 return new_bp;
460 486
461found: 487found:
462 spin_unlock(&hash->bh_lock); 488 spin_unlock(&pag->pag_buf_lock);
489 xfs_perag_put(pag);
463 490
464 /* Attempt to get the semaphore without sleeping, 491 /* Attempt to get the semaphore without sleeping,
465 * if this does not work then we need to drop the 492 * if this does not work then we need to drop the
@@ -625,8 +652,7 @@ void
625xfs_buf_readahead( 652xfs_buf_readahead(
626 xfs_buftarg_t *target, 653 xfs_buftarg_t *target,
627 xfs_off_t ioff, 654 xfs_off_t ioff,
628 size_t isize, 655 size_t isize)
629 xfs_buf_flags_t flags)
630{ 656{
631 struct backing_dev_info *bdi; 657 struct backing_dev_info *bdi;
632 658
@@ -634,8 +660,42 @@ xfs_buf_readahead(
634 if (bdi_read_congested(bdi)) 660 if (bdi_read_congested(bdi))
635 return; 661 return;
636 662
637 flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD); 663 xfs_buf_read(target, ioff, isize,
638 xfs_buf_read(target, ioff, isize, flags); 664 XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
665}
666
667/*
668 * Read an uncached buffer from disk. Allocates and returns a locked
669 * buffer containing the disk contents or nothing.
670 */
671struct xfs_buf *
672xfs_buf_read_uncached(
673 struct xfs_mount *mp,
674 struct xfs_buftarg *target,
675 xfs_daddr_t daddr,
676 size_t length,
677 int flags)
678{
679 xfs_buf_t *bp;
680 int error;
681
682 bp = xfs_buf_get_uncached(target, length, flags);
683 if (!bp)
684 return NULL;
685
686 /* set up the buffer for a read IO */
687 xfs_buf_lock(bp);
688 XFS_BUF_SET_ADDR(bp, daddr);
689 XFS_BUF_READ(bp);
690 XFS_BUF_BUSY(bp);
691
692 xfsbdstrat(mp, bp);
693 error = xfs_buf_iowait(bp);
694 if (error || bp->b_error) {
695 xfs_buf_relse(bp);
696 return NULL;
697 }
698 return bp;
639} 699}
640 700
641xfs_buf_t * 701xfs_buf_t *
@@ -707,9 +767,10 @@ xfs_buf_associate_memory(
707} 767}
708 768
709xfs_buf_t * 769xfs_buf_t *
710xfs_buf_get_noaddr( 770xfs_buf_get_uncached(
771 struct xfs_buftarg *target,
711 size_t len, 772 size_t len,
712 xfs_buftarg_t *target) 773 int flags)
713{ 774{
714 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT; 775 unsigned long page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
715 int error, i; 776 int error, i;
@@ -725,7 +786,7 @@ xfs_buf_get_noaddr(
725 goto fail_free_buf; 786 goto fail_free_buf;
726 787
727 for (i = 0; i < page_count; i++) { 788 for (i = 0; i < page_count; i++) {
728 bp->b_pages[i] = alloc_page(GFP_KERNEL); 789 bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
729 if (!bp->b_pages[i]) 790 if (!bp->b_pages[i])
730 goto fail_free_mem; 791 goto fail_free_mem;
731 } 792 }
@@ -740,7 +801,7 @@ xfs_buf_get_noaddr(
740 801
741 xfs_buf_unlock(bp); 802 xfs_buf_unlock(bp);
742 803
743 trace_xfs_buf_get_noaddr(bp, _RET_IP_); 804 trace_xfs_buf_get_uncached(bp, _RET_IP_);
744 return bp; 805 return bp;
745 806
746 fail_free_mem: 807 fail_free_mem:
@@ -774,29 +835,30 @@ void
774xfs_buf_rele( 835xfs_buf_rele(
775 xfs_buf_t *bp) 836 xfs_buf_t *bp)
776{ 837{
777 xfs_bufhash_t *hash = bp->b_hash; 838 struct xfs_perag *pag = bp->b_pag;
778 839
779 trace_xfs_buf_rele(bp, _RET_IP_); 840 trace_xfs_buf_rele(bp, _RET_IP_);
780 841
781 if (unlikely(!hash)) { 842 if (!pag) {
782 ASSERT(!bp->b_relse); 843 ASSERT(!bp->b_relse);
844 ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
783 if (atomic_dec_and_test(&bp->b_hold)) 845 if (atomic_dec_and_test(&bp->b_hold))
784 xfs_buf_free(bp); 846 xfs_buf_free(bp);
785 return; 847 return;
786 } 848 }
787 849
850 ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
788 ASSERT(atomic_read(&bp->b_hold) > 0); 851 ASSERT(atomic_read(&bp->b_hold) > 0);
789 if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) { 852 if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
790 if (bp->b_relse) { 853 if (bp->b_relse) {
791 atomic_inc(&bp->b_hold); 854 atomic_inc(&bp->b_hold);
792 spin_unlock(&hash->bh_lock); 855 spin_unlock(&pag->pag_buf_lock);
793 (*(bp->b_relse)) (bp); 856 bp->b_relse(bp);
794 } else if (bp->b_flags & XBF_FS_MANAGED) {
795 spin_unlock(&hash->bh_lock);
796 } else { 857 } else {
797 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); 858 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
798 list_del_init(&bp->b_hash_list); 859 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
799 spin_unlock(&hash->bh_lock); 860 spin_unlock(&pag->pag_buf_lock);
861 xfs_perag_put(pag);
800 xfs_buf_free(bp); 862 xfs_buf_free(bp);
801 } 863 }
802 } 864 }
@@ -859,7 +921,7 @@ xfs_buf_lock(
859 trace_xfs_buf_lock(bp, _RET_IP_); 921 trace_xfs_buf_lock(bp, _RET_IP_);
860 922
861 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE)) 923 if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
862 xfs_log_force(bp->b_mount, 0); 924 xfs_log_force(bp->b_target->bt_mount, 0);
863 if (atomic_read(&bp->b_io_remaining)) 925 if (atomic_read(&bp->b_io_remaining))
864 blk_run_address_space(bp->b_target->bt_mapping); 926 blk_run_address_space(bp->b_target->bt_mapping);
865 down(&bp->b_sema); 927 down(&bp->b_sema);
@@ -924,19 +986,7 @@ xfs_buf_iodone_work(
924 xfs_buf_t *bp = 986 xfs_buf_t *bp =
925 container_of(work, xfs_buf_t, b_iodone_work); 987 container_of(work, xfs_buf_t, b_iodone_work);
926 988
927 /* 989 if (bp->b_iodone)
928 * We can get an EOPNOTSUPP to ordered writes. Here we clear the
929 * ordered flag and reissue them. Because we can't tell the higher
930 * layers directly that they should not issue ordered I/O anymore, they
931 * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
932 */
933 if ((bp->b_error == EOPNOTSUPP) &&
934 (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
935 trace_xfs_buf_ordered_retry(bp, _RET_IP_);
936 bp->b_flags &= ~XBF_ORDERED;
937 bp->b_flags |= _XFS_BARRIER_FAILED;
938 xfs_buf_iorequest(bp);
939 } else if (bp->b_iodone)
940 (*(bp->b_iodone))(bp); 990 (*(bp->b_iodone))(bp);
941 else if (bp->b_flags & XBF_ASYNC) 991 else if (bp->b_flags & XBF_ASYNC)
942 xfs_buf_relse(bp); 992 xfs_buf_relse(bp);
@@ -982,7 +1032,6 @@ xfs_bwrite(
982{ 1032{
983 int error; 1033 int error;
984 1034
985 bp->b_mount = mp;
986 bp->b_flags |= XBF_WRITE; 1035 bp->b_flags |= XBF_WRITE;
987 bp->b_flags &= ~(XBF_ASYNC | XBF_READ); 1036 bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
988 1037
@@ -1003,8 +1052,6 @@ xfs_bdwrite(
1003{ 1052{
1004 trace_xfs_buf_bdwrite(bp, _RET_IP_); 1053 trace_xfs_buf_bdwrite(bp, _RET_IP_);
1005 1054
1006 bp->b_mount = mp;
1007
1008 bp->b_flags &= ~XBF_READ; 1055 bp->b_flags &= ~XBF_READ;
1009 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC); 1056 bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);
1010 1057
@@ -1013,7 +1060,7 @@ xfs_bdwrite(
1013 1060
1014/* 1061/*
1015 * Called when we want to stop a buffer from getting written or read. 1062 * Called when we want to stop a buffer from getting written or read.
1016 * We attach the EIO error, muck with its flags, and call biodone 1063 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1017 * so that the proper iodone callbacks get called. 1064 * so that the proper iodone callbacks get called.
1018 */ 1065 */
1019STATIC int 1066STATIC int
@@ -1030,21 +1077,21 @@ xfs_bioerror(
1030 XFS_BUF_ERROR(bp, EIO); 1077 XFS_BUF_ERROR(bp, EIO);
1031 1078
1032 /* 1079 /*
1033 * We're calling biodone, so delete XBF_DONE flag. 1080 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1034 */ 1081 */
1035 XFS_BUF_UNREAD(bp); 1082 XFS_BUF_UNREAD(bp);
1036 XFS_BUF_UNDELAYWRITE(bp); 1083 XFS_BUF_UNDELAYWRITE(bp);
1037 XFS_BUF_UNDONE(bp); 1084 XFS_BUF_UNDONE(bp);
1038 XFS_BUF_STALE(bp); 1085 XFS_BUF_STALE(bp);
1039 1086
1040 xfs_biodone(bp); 1087 xfs_buf_ioend(bp, 0);
1041 1088
1042 return EIO; 1089 return EIO;
1043} 1090}
1044 1091
1045/* 1092/*
1046 * Same as xfs_bioerror, except that we are releasing the buffer 1093 * Same as xfs_bioerror, except that we are releasing the buffer
1047 * here ourselves, and avoiding the biodone call. 1094 * here ourselves, and avoiding the xfs_buf_ioend call.
1048 * This is meant for userdata errors; metadata bufs come with 1095 * This is meant for userdata errors; metadata bufs come with
1049 * iodone functions attached, so that we can track down errors. 1096 * iodone functions attached, so that we can track down errors.
1050 */ 1097 */
@@ -1093,7 +1140,7 @@ int
1093xfs_bdstrat_cb( 1140xfs_bdstrat_cb(
1094 struct xfs_buf *bp) 1141 struct xfs_buf *bp)
1095{ 1142{
1096 if (XFS_FORCED_SHUTDOWN(bp->b_mount)) { 1143 if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1097 trace_xfs_bdstrat_shut(bp, _RET_IP_); 1144 trace_xfs_bdstrat_shut(bp, _RET_IP_);
1098 /* 1145 /*
1099 * Metadata write that didn't get logged but 1146 * Metadata write that didn't get logged but
@@ -1195,7 +1242,7 @@ _xfs_buf_ioapply(
1195 1242
1196 if (bp->b_flags & XBF_ORDERED) { 1243 if (bp->b_flags & XBF_ORDERED) {
1197 ASSERT(!(bp->b_flags & XBF_READ)); 1244 ASSERT(!(bp->b_flags & XBF_READ));
1198 rw = WRITE_BARRIER; 1245 rw = WRITE_FLUSH_FUA;
1199 } else if (bp->b_flags & XBF_LOG_BUFFER) { 1246 } else if (bp->b_flags & XBF_LOG_BUFFER) {
1200 ASSERT(!(bp->b_flags & XBF_READ_AHEAD)); 1247 ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
1201 bp->b_flags &= ~_XBF_RUN_QUEUES; 1248 bp->b_flags &= ~_XBF_RUN_QUEUES;
@@ -1399,62 +1446,24 @@ xfs_buf_iomove(
1399 */ 1446 */
1400void 1447void
1401xfs_wait_buftarg( 1448xfs_wait_buftarg(
1402 xfs_buftarg_t *btp) 1449 struct xfs_buftarg *btp)
1403{
1404 xfs_buf_t *bp, *n;
1405 xfs_bufhash_t *hash;
1406 uint i;
1407
1408 for (i = 0; i < (1 << btp->bt_hashshift); i++) {
1409 hash = &btp->bt_hash[i];
1410again:
1411 spin_lock(&hash->bh_lock);
1412 list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
1413 ASSERT(btp == bp->b_target);
1414 if (!(bp->b_flags & XBF_FS_MANAGED)) {
1415 spin_unlock(&hash->bh_lock);
1416 /*
1417 * Catch superblock reference count leaks
1418 * immediately
1419 */
1420 BUG_ON(bp->b_bn == 0);
1421 delay(100);
1422 goto again;
1423 }
1424 }
1425 spin_unlock(&hash->bh_lock);
1426 }
1427}
1428
1429/*
1430 * Allocate buffer hash table for a given target.
1431 * For devices containing metadata (i.e. not the log/realtime devices)
1432 * we need to allocate a much larger hash table.
1433 */
1434STATIC void
1435xfs_alloc_bufhash(
1436 xfs_buftarg_t *btp,
1437 int external)
1438{ 1450{
1439 unsigned int i; 1451 struct xfs_perag *pag;
1452 uint i;
1440 1453
1441 btp->bt_hashshift = external ? 3 : 12; /* 8 or 4096 buckets */ 1454 for (i = 0; i < btp->bt_mount->m_sb.sb_agcount; i++) {
1442 btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) * 1455 pag = xfs_perag_get(btp->bt_mount, i);
1443 sizeof(xfs_bufhash_t)); 1456 spin_lock(&pag->pag_buf_lock);
1444 for (i = 0; i < (1 << btp->bt_hashshift); i++) { 1457 while (rb_first(&pag->pag_buf_tree)) {
1445 spin_lock_init(&btp->bt_hash[i].bh_lock); 1458 spin_unlock(&pag->pag_buf_lock);
1446 INIT_LIST_HEAD(&btp->bt_hash[i].bh_list); 1459 delay(100);
1460 spin_lock(&pag->pag_buf_lock);
1461 }
1462 spin_unlock(&pag->pag_buf_lock);
1463 xfs_perag_put(pag);
1447 } 1464 }
1448} 1465}
1449 1466
1450STATIC void
1451xfs_free_bufhash(
1452 xfs_buftarg_t *btp)
1453{
1454 kmem_free_large(btp->bt_hash);
1455 btp->bt_hash = NULL;
1456}
1457
1458/* 1467/*
1459 * buftarg list for delwrite queue processing 1468 * buftarg list for delwrite queue processing
1460 */ 1469 */
@@ -1487,7 +1496,6 @@ xfs_free_buftarg(
1487 xfs_flush_buftarg(btp, 1); 1496 xfs_flush_buftarg(btp, 1);
1488 if (mp->m_flags & XFS_MOUNT_BARRIER) 1497 if (mp->m_flags & XFS_MOUNT_BARRIER)
1489 xfs_blkdev_issue_flush(btp); 1498 xfs_blkdev_issue_flush(btp);
1490 xfs_free_bufhash(btp);
1491 iput(btp->bt_mapping->host); 1499 iput(btp->bt_mapping->host);
1492 1500
1493 /* Unregister the buftarg first so that we don't get a 1501 /* Unregister the buftarg first so that we don't get a
@@ -1572,6 +1580,7 @@ xfs_mapping_buftarg(
1572 XFS_BUFTARG_NAME(btp)); 1580 XFS_BUFTARG_NAME(btp));
1573 return ENOMEM; 1581 return ENOMEM;
1574 } 1582 }
1583 inode->i_ino = get_next_ino();
1575 inode->i_mode = S_IFBLK; 1584 inode->i_mode = S_IFBLK;
1576 inode->i_bdev = bdev; 1585 inode->i_bdev = bdev;
1577 inode->i_rdev = bdev->bd_dev; 1586 inode->i_rdev = bdev->bd_dev;
@@ -1609,6 +1618,7 @@ out_error:
1609 1618
1610xfs_buftarg_t * 1619xfs_buftarg_t *
1611xfs_alloc_buftarg( 1620xfs_alloc_buftarg(
1621 struct xfs_mount *mp,
1612 struct block_device *bdev, 1622 struct block_device *bdev,
1613 int external, 1623 int external,
1614 const char *fsname) 1624 const char *fsname)
@@ -1617,6 +1627,7 @@ xfs_alloc_buftarg(
1617 1627
1618 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1628 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
1619 1629
1630 btp->bt_mount = mp;
1620 btp->bt_dev = bdev->bd_dev; 1631 btp->bt_dev = bdev->bd_dev;
1621 btp->bt_bdev = bdev; 1632 btp->bt_bdev = bdev;
1622 if (xfs_setsize_buftarg_early(btp, bdev)) 1633 if (xfs_setsize_buftarg_early(btp, bdev))
@@ -1625,7 +1636,6 @@ xfs_alloc_buftarg(
1625 goto error; 1636 goto error;
1626 if (xfs_alloc_delwrite_queue(btp, fsname)) 1637 if (xfs_alloc_delwrite_queue(btp, fsname))
1627 goto error; 1638 goto error;
1628 xfs_alloc_bufhash(btp, external);
1629 return btp; 1639 return btp;
1630 1640
1631error: 1641error:
@@ -1771,7 +1781,6 @@ xfs_buf_delwri_split(
1771 INIT_LIST_HEAD(list); 1781 INIT_LIST_HEAD(list);
1772 spin_lock(dwlk); 1782 spin_lock(dwlk);
1773 list_for_each_entry_safe(bp, n, dwq, b_list) { 1783 list_for_each_entry_safe(bp, n, dwq, b_list) {
1774 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1775 ASSERT(bp->b_flags & XBF_DELWRI); 1784 ASSERT(bp->b_flags & XBF_DELWRI);
1776 1785
1777 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) { 1786 if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1785,6 +1794,7 @@ xfs_buf_delwri_split(
1785 _XBF_RUN_QUEUES); 1794 _XBF_RUN_QUEUES);
1786 bp->b_flags |= XBF_WRITE; 1795 bp->b_flags |= XBF_WRITE;
1787 list_move_tail(&bp->b_list, list); 1796 list_move_tail(&bp->b_list, list);
1797 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1788 } else 1798 } else
1789 skipped++; 1799 skipped++;
1790 } 1800 }
@@ -1916,7 +1926,7 @@ xfs_flush_buftarg(
1916 bp = list_first_entry(&wait_list, struct xfs_buf, b_list); 1926 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1917 1927
1918 list_del_init(&bp->b_list); 1928 list_del_init(&bp->b_list);
1919 xfs_iowait(bp); 1929 xfs_buf_iowait(bp);
1920 xfs_buf_relse(bp); 1930 xfs_buf_relse(bp);
1921 } 1931 }
1922 } 1932 }
@@ -1933,7 +1943,7 @@ xfs_buf_init(void)
1933 goto out; 1943 goto out;
1934 1944
1935 xfslogd_workqueue = alloc_workqueue("xfslogd", 1945 xfslogd_workqueue = alloc_workqueue("xfslogd",
1936 WQ_RESCUER | WQ_HIGHPRI, 1); 1946 WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1937 if (!xfslogd_workqueue) 1947 if (!xfslogd_workqueue)
1938 goto out_free_buf_zone; 1948 goto out_free_buf_zone;
1939 1949