aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_buf.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2012-04-23 01:58:39 -0400
committerBen Myers <bpm@sgi.com>2012-05-14 17:20:31 -0400
commit43ff2122e6492bcc88b065c433453dce88223b30 (patch)
tree0f762cfb753edd73402b8830e0927d9efba30c61 /fs/xfs/xfs_buf.c
parent960c60af8b9481595e68875e79b2602e73169c29 (diff)
xfs: on-stack delayed write buffer lists
Queue delwri buffers on a local on-stack list instead of a per-buftarg one, and write back the buffers per-process instead of by waking up xfsbufd. This is now easily doable given that we have very few places left that write delwri buffers: - log recovery: Only done at mount time, and already forcing out the buffers synchronously using xfs_flush_buftarg - quotacheck: Same story. - dquot reclaim: Writes out dirty dquots on the LRU under memory pressure. We might want to look into doing more of this via xfsaild, but it's already more optimal than the synchronous inode reclaim that writes each buffer synchronously. - xfsaild: This is the main beneficiary of the change. By keeping a local list of buffers to write we reduce latency of writing out buffers, and more importably we can remove all the delwri list promotions which were hitting the buffer cache hard under sustained metadata loads. The implementation is very straight forward - xfs_buf_delwri_queue now gets a new list_head pointer that it adds the delwri buffers to, and all callers need to eventually submit the list using xfs_buf_delwi_submit or xfs_buf_delwi_submit_nowait. Buffers that already are on a delwri list are skipped in xfs_buf_delwri_queue, assuming they already are on another delwri list. The biggest change to pass down the buffer list was done to the AIL pushing. Now that we operate on buffers the trylock, push and pushbuf log item methods are merged into a single push routine, which tries to lock the item, and if possible add the buffer that needs writeback to the buffer list. This leads to much simpler code than the previous split but requires the individual IOP_PUSH instances to unlock and reacquire the AIL around calls to blocking routines. Given that xfsailds now also handle writing out buffers, the conditions for log forcing and the sleep times needed some small changes. The most important one is that we consider an AIL busy as long we still have buffers to push, and the other one is that we do increment the pushed LSN for buffers that are under flushing at this moment, but still count them towards the stuck items for restart purposes. Without this we could hammer on stuck items without ever forcing the log and not make progress under heavy random delete workloads on fast flash storage devices. [ Dave Chinner: - rebase on previous patches. - improved comments for XBF_DELWRI_Q handling - fix XBF_ASYNC handling in queue submission (test 106 failure) - rename delwri submit function buffer list parameters for clarity - xfs_efd_item_push() should return XFS_ITEM_PINNED ] Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Mark Tinguely <tinguely@sgi.com> Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs/xfs_buf.c')
-rw-r--r--fs/xfs/xfs_buf.c341
1 files changed, 131 insertions, 210 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 6819b5163e33..b82fc5c67fed 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -42,7 +42,6 @@
42#include "xfs_trace.h" 42#include "xfs_trace.h"
43 43
44static kmem_zone_t *xfs_buf_zone; 44static kmem_zone_t *xfs_buf_zone;
45STATIC int xfsbufd(void *);
46 45
47static struct workqueue_struct *xfslogd_workqueue; 46static struct workqueue_struct *xfslogd_workqueue;
48 47
@@ -144,8 +143,17 @@ void
144xfs_buf_stale( 143xfs_buf_stale(
145 struct xfs_buf *bp) 144 struct xfs_buf *bp)
146{ 145{
146 ASSERT(xfs_buf_islocked(bp));
147
147 bp->b_flags |= XBF_STALE; 148 bp->b_flags |= XBF_STALE;
148 xfs_buf_delwri_dequeue(bp); 149
150 /*
151 * Clear the delwri status so that a delwri queue walker will not
152 * flush this buffer to disk now that it is stale. The delwri queue has
153 * a reference to the buffer, so this is safe to do.
154 */
155 bp->b_flags &= ~_XBF_DELWRI_Q;
156
149 atomic_set(&(bp)->b_lru_ref, 0); 157 atomic_set(&(bp)->b_lru_ref, 0);
150 if (!list_empty(&bp->b_lru)) { 158 if (!list_empty(&bp->b_lru)) {
151 struct xfs_buftarg *btp = bp->b_target; 159 struct xfs_buftarg *btp = bp->b_target;
@@ -592,10 +600,10 @@ _xfs_buf_read(
592{ 600{
593 int status; 601 int status;
594 602
595 ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE))); 603 ASSERT(!(flags & XBF_WRITE));
596 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL); 604 ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
597 605
598 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD); 606 bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
599 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD); 607 bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
600 608
601 status = xfs_buf_iorequest(bp); 609 status = xfs_buf_iorequest(bp);
@@ -855,7 +863,7 @@ xfs_buf_rele(
855 spin_unlock(&pag->pag_buf_lock); 863 spin_unlock(&pag->pag_buf_lock);
856 } else { 864 } else {
857 xfs_buf_lru_del(bp); 865 xfs_buf_lru_del(bp);
858 ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q))); 866 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
859 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree); 867 rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
860 spin_unlock(&pag->pag_buf_lock); 868 spin_unlock(&pag->pag_buf_lock);
861 xfs_perag_put(pag); 869 xfs_perag_put(pag);
@@ -915,13 +923,6 @@ xfs_buf_lock(
915 trace_xfs_buf_lock_done(bp, _RET_IP_); 923 trace_xfs_buf_lock_done(bp, _RET_IP_);
916} 924}
917 925
918/*
919 * Releases the lock on the buffer object.
920 * If the buffer is marked delwri but is not queued, do so before we
921 * unlock the buffer as we need to set flags correctly. We also need to
922 * take a reference for the delwri queue because the unlocker is going to
923 * drop their's and they don't know we just queued it.
924 */
925void 926void
926xfs_buf_unlock( 927xfs_buf_unlock(
927 struct xfs_buf *bp) 928 struct xfs_buf *bp)
@@ -1019,10 +1020,11 @@ xfs_bwrite(
1019{ 1020{
1020 int error; 1021 int error;
1021 1022
1023 ASSERT(xfs_buf_islocked(bp));
1024
1022 bp->b_flags |= XBF_WRITE; 1025 bp->b_flags |= XBF_WRITE;
1023 bp->b_flags &= ~(XBF_ASYNC | XBF_READ); 1026 bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
1024 1027
1025 xfs_buf_delwri_dequeue(bp);
1026 xfs_bdstrat_cb(bp); 1028 xfs_bdstrat_cb(bp);
1027 1029
1028 error = xfs_buf_iowait(bp); 1030 error = xfs_buf_iowait(bp);
@@ -1254,7 +1256,7 @@ xfs_buf_iorequest(
1254{ 1256{
1255 trace_xfs_buf_iorequest(bp, _RET_IP_); 1257 trace_xfs_buf_iorequest(bp, _RET_IP_);
1256 1258
1257 ASSERT(!(bp->b_flags & XBF_DELWRI)); 1259 ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1258 1260
1259 if (bp->b_flags & XBF_WRITE) 1261 if (bp->b_flags & XBF_WRITE)
1260 xfs_buf_wait_unpin(bp); 1262 xfs_buf_wait_unpin(bp);
@@ -1435,11 +1437,9 @@ xfs_free_buftarg(
1435{ 1437{
1436 unregister_shrinker(&btp->bt_shrinker); 1438 unregister_shrinker(&btp->bt_shrinker);
1437 1439
1438 xfs_flush_buftarg(btp, 1);
1439 if (mp->m_flags & XFS_MOUNT_BARRIER) 1440 if (mp->m_flags & XFS_MOUNT_BARRIER)
1440 xfs_blkdev_issue_flush(btp); 1441 xfs_blkdev_issue_flush(btp);
1441 1442
1442 kthread_stop(btp->bt_task);
1443 kmem_free(btp); 1443 kmem_free(btp);
1444} 1444}
1445 1445
@@ -1491,20 +1491,6 @@ xfs_setsize_buftarg(
1491 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1); 1491 return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
1492} 1492}
1493 1493
1494STATIC int
1495xfs_alloc_delwri_queue(
1496 xfs_buftarg_t *btp,
1497 const char *fsname)
1498{
1499 INIT_LIST_HEAD(&btp->bt_delwri_queue);
1500 spin_lock_init(&btp->bt_delwri_lock);
1501 btp->bt_flags = 0;
1502 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1503 if (IS_ERR(btp->bt_task))
1504 return PTR_ERR(btp->bt_task);
1505 return 0;
1506}
1507
1508xfs_buftarg_t * 1494xfs_buftarg_t *
1509xfs_alloc_buftarg( 1495xfs_alloc_buftarg(
1510 struct xfs_mount *mp, 1496 struct xfs_mount *mp,
@@ -1527,8 +1513,6 @@ xfs_alloc_buftarg(
1527 spin_lock_init(&btp->bt_lru_lock); 1513 spin_lock_init(&btp->bt_lru_lock);
1528 if (xfs_setsize_buftarg_early(btp, bdev)) 1514 if (xfs_setsize_buftarg_early(btp, bdev))
1529 goto error; 1515 goto error;
1530 if (xfs_alloc_delwri_queue(btp, fsname))
1531 goto error;
1532 btp->bt_shrinker.shrink = xfs_buftarg_shrink; 1516 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1533 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1517 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1534 register_shrinker(&btp->bt_shrinker); 1518 register_shrinker(&btp->bt_shrinker);
@@ -1539,125 +1523,52 @@ error:
1539 return NULL; 1523 return NULL;
1540} 1524}
1541 1525
1542
1543/* 1526/*
1544 * Delayed write buffer handling 1527 * Add a buffer to the delayed write list.
1528 *
1529 * This queues a buffer for writeout if it hasn't already been. Note that
1530 * neither this routine nor the buffer list submission functions perform
1531 * any internal synchronization. It is expected that the lists are thread-local
1532 * to the callers.
1533 *
1534 * Returns true if we queued up the buffer, or false if it already had
1535 * been on the buffer list.
1545 */ 1536 */
1546void 1537bool
1547xfs_buf_delwri_queue( 1538xfs_buf_delwri_queue(
1548 xfs_buf_t *bp) 1539 struct xfs_buf *bp,
1540 struct list_head *list)
1549{ 1541{
1550 struct xfs_buftarg *btp = bp->b_target; 1542 ASSERT(xfs_buf_islocked(bp));
1551
1552 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1553
1554 ASSERT(!(bp->b_flags & XBF_READ)); 1543 ASSERT(!(bp->b_flags & XBF_READ));
1555 1544
1556 spin_lock(&btp->bt_delwri_lock); 1545 /*
1557 if (!list_empty(&bp->b_list)) { 1546 * If the buffer is already marked delwri it already is queued up
1558 /* if already in the queue, move it to the tail */ 1547 * by someone else for imediate writeout. Just ignore it in that
1559 ASSERT(bp->b_flags & _XBF_DELWRI_Q); 1548 * case.
1560 list_move_tail(&bp->b_list, &btp->bt_delwri_queue); 1549 */
1561 } else { 1550 if (bp->b_flags & _XBF_DELWRI_Q) {
1562 /* start xfsbufd as it is about to have something to do */ 1551 trace_xfs_buf_delwri_queued(bp, _RET_IP_);
1563 if (list_empty(&btp->bt_delwri_queue)) 1552 return false;
1564 wake_up_process(bp->b_target->bt_task);
1565
1566 atomic_inc(&bp->b_hold);
1567 bp->b_flags |= XBF_DELWRI | _XBF_DELWRI_Q | XBF_ASYNC;
1568 list_add_tail(&bp->b_list, &btp->bt_delwri_queue);
1569 }
1570 bp->b_queuetime = jiffies;
1571 spin_unlock(&btp->bt_delwri_lock);
1572}
1573
1574void
1575xfs_buf_delwri_dequeue(
1576 xfs_buf_t *bp)
1577{
1578 int dequeued = 0;
1579
1580 spin_lock(&bp->b_target->bt_delwri_lock);
1581 if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
1582 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1583 list_del_init(&bp->b_list);
1584 dequeued = 1;
1585 } 1553 }
1586 bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1587 spin_unlock(&bp->b_target->bt_delwri_lock);
1588
1589 if (dequeued)
1590 xfs_buf_rele(bp);
1591
1592 trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
1593}
1594 1554
1595/* 1555 trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1596 * If a delwri buffer needs to be pushed before it has aged out, then promote
1597 * it to the head of the delwri queue so that it will be flushed on the next
1598 * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
1599 * than the age currently needed to flush the buffer. Hence the next time the
1600 * xfsbufd sees it is guaranteed to be considered old enough to flush.
1601 */
1602void
1603xfs_buf_delwri_promote(
1604 struct xfs_buf *bp)
1605{
1606 struct xfs_buftarg *btp = bp->b_target;
1607 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
1608
1609 ASSERT(bp->b_flags & XBF_DELWRI);
1610 ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1611 1556
1612 /* 1557 /*
1613 * Check the buffer age before locking the delayed write queue as we 1558 * If a buffer gets written out synchronously or marked stale while it
1614 * don't need to promote buffers that are already past the flush age. 1559 * is on a delwri list we lazily remove it. To do this, the other party
1560 * clears the _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
1561 * It remains referenced and on the list. In a rare corner case it
1562 * might get readded to a delwri list after the synchronous writeout, in
1563 * which case we need just need to re-add the flag here.
1615 */ 1564 */
1616 if (bp->b_queuetime < jiffies - age) 1565 bp->b_flags |= _XBF_DELWRI_Q;
1617 return; 1566 if (list_empty(&bp->b_list)) {
1618 bp->b_queuetime = jiffies - age; 1567 atomic_inc(&bp->b_hold);
1619 spin_lock(&btp->bt_delwri_lock); 1568 list_add_tail(&bp->b_list, list);
1620 list_move(&bp->b_list, &btp->bt_delwri_queue);
1621 spin_unlock(&btp->bt_delwri_lock);
1622}
1623
1624/*
1625 * Move as many buffers as specified to the supplied list
1626 * idicating if we skipped any buffers to prevent deadlocks.
1627 */
1628STATIC int
1629xfs_buf_delwri_split(
1630 xfs_buftarg_t *target,
1631 struct list_head *list,
1632 unsigned long age)
1633{
1634 xfs_buf_t *bp, *n;
1635 int skipped = 0;
1636 int force;
1637
1638 force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1639 INIT_LIST_HEAD(list);
1640 spin_lock(&target->bt_delwri_lock);
1641 list_for_each_entry_safe(bp, n, &target->bt_delwri_queue, b_list) {
1642 ASSERT(bp->b_flags & XBF_DELWRI);
1643
1644 if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
1645 if (!force &&
1646 time_before(jiffies, bp->b_queuetime + age)) {
1647 xfs_buf_unlock(bp);
1648 break;
1649 }
1650
1651 bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
1652 bp->b_flags |= XBF_WRITE;
1653 list_move_tail(&bp->b_list, list);
1654 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1655 } else
1656 skipped++;
1657 } 1569 }
1658 1570
1659 spin_unlock(&target->bt_delwri_lock); 1571 return true;
1660 return skipped;
1661} 1572}
1662 1573
1663/* 1574/*
@@ -1683,99 +1594,109 @@ xfs_buf_cmp(
1683 return 0; 1594 return 0;
1684} 1595}
1685 1596
1686STATIC int 1597static int
1687xfsbufd( 1598__xfs_buf_delwri_submit(
1688 void *data) 1599 struct list_head *buffer_list,
1600 struct list_head *io_list,
1601 bool wait)
1689{ 1602{
1690 xfs_buftarg_t *target = (xfs_buftarg_t *)data; 1603 struct blk_plug plug;
1691 1604 struct xfs_buf *bp, *n;
1692 current->flags |= PF_MEMALLOC; 1605 int pinned = 0;
1693 1606
1694 set_freezable(); 1607 list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1608 if (!wait) {
1609 if (xfs_buf_ispinned(bp)) {
1610 pinned++;
1611 continue;
1612 }
1613 if (!xfs_buf_trylock(bp))
1614 continue;
1615 } else {
1616 xfs_buf_lock(bp);
1617 }
1695 1618
1696 do { 1619 /*
1697 long age = xfs_buf_age_centisecs * msecs_to_jiffies(10); 1620 * Someone else might have written the buffer synchronously or
1698 long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10); 1621 * marked it stale in the meantime. In that case only the
1699 struct list_head tmp; 1622 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
1700 struct blk_plug plug; 1623 * reference and remove it from the list here.
1624 */
1625 if (!(bp->b_flags & _XBF_DELWRI_Q)) {
1626 list_del_init(&bp->b_list);
1627 xfs_buf_relse(bp);
1628 continue;
1629 }
1701 1630
1702 if (unlikely(freezing(current))) 1631 list_move_tail(&bp->b_list, io_list);
1703 try_to_freeze(); 1632 trace_xfs_buf_delwri_split(bp, _RET_IP_);
1633 }
1704 1634
1705 /* sleep for a long time if there is nothing to do. */ 1635 list_sort(NULL, io_list, xfs_buf_cmp);
1706 if (list_empty(&target->bt_delwri_queue))
1707 tout = MAX_SCHEDULE_TIMEOUT;
1708 schedule_timeout_interruptible(tout);
1709 1636
1710 xfs_buf_delwri_split(target, &tmp, age); 1637 blk_start_plug(&plug);
1711 list_sort(NULL, &tmp, xfs_buf_cmp); 1638 list_for_each_entry_safe(bp, n, io_list, b_list) {
1639 bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
1640 bp->b_flags |= XBF_WRITE;
1712 1641
1713 blk_start_plug(&plug); 1642 if (!wait) {
1714 while (!list_empty(&tmp)) { 1643 bp->b_flags |= XBF_ASYNC;
1715 struct xfs_buf *bp;
1716 bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1717 list_del_init(&bp->b_list); 1644 list_del_init(&bp->b_list);
1718 xfs_bdstrat_cb(bp);
1719 } 1645 }
1720 blk_finish_plug(&plug); 1646 xfs_bdstrat_cb(bp);
1721 } while (!kthread_should_stop()); 1647 }
1648 blk_finish_plug(&plug);
1722 1649
1723 return 0; 1650 return pinned;
1724} 1651}
1725 1652
1726/* 1653/*
1727 * Go through all incore buffers, and release buffers if they belong to 1654 * Write out a buffer list asynchronously.
1728 * the given device. This is used in filesystem error handling to 1655 *
1729 * preserve the consistency of its metadata. 1656 * This will take the @buffer_list, write all non-locked and non-pinned buffers
1657 * out and not wait for I/O completion on any of the buffers. This interface
1658 * is only safely useable for callers that can track I/O completion by higher
1659 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
1660 * function.
1730 */ 1661 */
1731int 1662int
1732xfs_flush_buftarg( 1663xfs_buf_delwri_submit_nowait(
1733 xfs_buftarg_t *target, 1664 struct list_head *buffer_list)
1734 int wait)
1735{ 1665{
1736 xfs_buf_t *bp; 1666 LIST_HEAD (io_list);
1737 int pincount = 0; 1667 return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
1738 LIST_HEAD(tmp_list); 1668}
1739 LIST_HEAD(wait_list);
1740 struct blk_plug plug;
1741 1669
1742 flush_workqueue(xfslogd_workqueue); 1670/*
1671 * Write out a buffer list synchronously.
1672 *
1673 * This will take the @buffer_list, write all buffers out and wait for I/O
1674 * completion on all of the buffers. @buffer_list is consumed by the function,
1675 * so callers must have some other way of tracking buffers if they require such
1676 * functionality.
1677 */
1678int
1679xfs_buf_delwri_submit(
1680 struct list_head *buffer_list)
1681{
1682 LIST_HEAD (io_list);
1683 int error = 0, error2;
1684 struct xfs_buf *bp;
1743 1685
1744 set_bit(XBT_FORCE_FLUSH, &target->bt_flags); 1686 __xfs_buf_delwri_submit(buffer_list, &io_list, true);
1745 pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
1746 1687
1747 /* 1688 /* Wait for IO to complete. */
1748 * Dropped the delayed write list lock, now walk the temporary list. 1689 while (!list_empty(&io_list)) {
1749 * All I/O is issued async and then if we need to wait for completion 1690 bp = list_first_entry(&io_list, struct xfs_buf, b_list);
1750 * we do that after issuing all the IO.
1751 */
1752 list_sort(NULL, &tmp_list, xfs_buf_cmp);
1753 1691
1754 blk_start_plug(&plug);
1755 while (!list_empty(&tmp_list)) {
1756 bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1757 ASSERT(target == bp->b_target);
1758 list_del_init(&bp->b_list); 1692 list_del_init(&bp->b_list);
1759 if (wait) { 1693 error2 = xfs_buf_iowait(bp);
1760 bp->b_flags &= ~XBF_ASYNC; 1694 xfs_buf_relse(bp);
1761 list_add(&bp->b_list, &wait_list); 1695 if (!error)
1762 } 1696 error = error2;
1763 xfs_bdstrat_cb(bp);
1764 }
1765 blk_finish_plug(&plug);
1766
1767 if (wait) {
1768 /* Wait for IO to complete. */
1769 while (!list_empty(&wait_list)) {
1770 bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1771
1772 list_del_init(&bp->b_list);
1773 xfs_buf_iowait(bp);
1774 xfs_buf_relse(bp);
1775 }
1776 } 1697 }
1777 1698
1778 return pincount; 1699 return error;
1779} 1700}
1780 1701
1781int __init 1702int __init