aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/linux-2.6/xfs_buf.c
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2010-11-30 01:27:57 -0500
committerDave Chinner <david@fromorbit.com>2010-11-30 01:27:57 -0500
commitff57ab21995a8636cfc72efeebb09cc6034d756f (patch)
tree675374b134689029d70ca1e0200866cf2f906c44 /fs/xfs/linux-2.6/xfs_buf.c
parent1a427ab0c1b205d1bda8da0b77ea9d295ac23c57 (diff)
xfs: convert xfsbud shrinker to a per-buftarg shrinker.
Before we introduce per-buftarg LRU lists, split the shrinker implementation into per-buftarg shrinker callbacks. At the moment we wake all the xfsbufds to run the delayed write queues to free the dirty buffers and make their pages available for reclaim. However, with an LRU, we want to be able to free clean, unused buffers as well, so we need to separate the xfsbufd from the shrinker callbacks. Signed-off-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Alex Elder <aelder@sgi.com>
Diffstat (limited to 'fs/xfs/linux-2.6/xfs_buf.c')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c89
1 files changed, 24 insertions, 65 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 4c5deb6e9e31..0a00d7a2fc23 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -44,12 +44,7 @@
44 44
45static kmem_zone_t *xfs_buf_zone; 45static kmem_zone_t *xfs_buf_zone;
46STATIC int xfsbufd(void *); 46STATIC int xfsbufd(void *);
47STATIC int xfsbufd_wakeup(struct shrinker *, int, gfp_t);
48STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int); 47STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
49static struct shrinker xfs_buf_shake = {
50 .shrink = xfsbufd_wakeup,
51 .seeks = DEFAULT_SEEKS,
52};
53 48
54static struct workqueue_struct *xfslogd_workqueue; 49static struct workqueue_struct *xfslogd_workqueue;
55struct workqueue_struct *xfsdatad_workqueue; 50struct workqueue_struct *xfsdatad_workqueue;
@@ -337,7 +332,6 @@ _xfs_buf_lookup_pages(
337 __func__, gfp_mask); 332 __func__, gfp_mask);
338 333
339 XFS_STATS_INC(xb_page_retries); 334 XFS_STATS_INC(xb_page_retries);
340 xfsbufd_wakeup(NULL, 0, gfp_mask);
341 congestion_wait(BLK_RW_ASYNC, HZ/50); 335 congestion_wait(BLK_RW_ASYNC, HZ/50);
342 goto retry; 336 goto retry;
343 } 337 }
@@ -1461,28 +1455,23 @@ xfs_wait_buftarg(
1461 } 1455 }
1462} 1456}
1463 1457
1464/* 1458int
1465 * buftarg list for delwrite queue processing 1459xfs_buftarg_shrink(
1466 */ 1460 struct shrinker *shrink,
1467static LIST_HEAD(xfs_buftarg_list); 1461 int nr_to_scan,
1468static DEFINE_SPINLOCK(xfs_buftarg_lock); 1462 gfp_t mask)
1469
1470STATIC void
1471xfs_register_buftarg(
1472 xfs_buftarg_t *btp)
1473{
1474 spin_lock(&xfs_buftarg_lock);
1475 list_add(&btp->bt_list, &xfs_buftarg_list);
1476 spin_unlock(&xfs_buftarg_lock);
1477}
1478
1479STATIC void
1480xfs_unregister_buftarg(
1481 xfs_buftarg_t *btp)
1482{ 1463{
1483 spin_lock(&xfs_buftarg_lock); 1464 struct xfs_buftarg *btp = container_of(shrink,
1484 list_del(&btp->bt_list); 1465 struct xfs_buftarg, bt_shrinker);
1485 spin_unlock(&xfs_buftarg_lock); 1466 if (nr_to_scan) {
1467 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1468 return -1;
1469 if (list_empty(&btp->bt_delwrite_queue))
1470 return -1;
1471 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1472 wake_up_process(btp->bt_task);
1473 }
1474 return list_empty(&btp->bt_delwrite_queue) ? -1 : 1;
1486} 1475}
1487 1476
1488void 1477void
@@ -1490,17 +1479,14 @@ xfs_free_buftarg(
1490 struct xfs_mount *mp, 1479 struct xfs_mount *mp,
1491 struct xfs_buftarg *btp) 1480 struct xfs_buftarg *btp)
1492{ 1481{
1482 unregister_shrinker(&btp->bt_shrinker);
1483
1493 xfs_flush_buftarg(btp, 1); 1484 xfs_flush_buftarg(btp, 1);
1494 if (mp->m_flags & XFS_MOUNT_BARRIER) 1485 if (mp->m_flags & XFS_MOUNT_BARRIER)
1495 xfs_blkdev_issue_flush(btp); 1486 xfs_blkdev_issue_flush(btp);
1496 iput(btp->bt_mapping->host); 1487 iput(btp->bt_mapping->host);
1497 1488
1498 /* Unregister the buftarg first so that we don't get a
1499 * wakeup finding a non-existent task
1500 */
1501 xfs_unregister_buftarg(btp);
1502 kthread_stop(btp->bt_task); 1489 kthread_stop(btp->bt_task);
1503
1504 kmem_free(btp); 1490 kmem_free(btp);
1505} 1491}
1506 1492
@@ -1597,20 +1583,13 @@ xfs_alloc_delwrite_queue(
1597 xfs_buftarg_t *btp, 1583 xfs_buftarg_t *btp,
1598 const char *fsname) 1584 const char *fsname)
1599{ 1585{
1600 int error = 0;
1601
1602 INIT_LIST_HEAD(&btp->bt_list);
1603 INIT_LIST_HEAD(&btp->bt_delwrite_queue); 1586 INIT_LIST_HEAD(&btp->bt_delwrite_queue);
1604 spin_lock_init(&btp->bt_delwrite_lock); 1587 spin_lock_init(&btp->bt_delwrite_lock);
1605 btp->bt_flags = 0; 1588 btp->bt_flags = 0;
1606 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname); 1589 btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1607 if (IS_ERR(btp->bt_task)) { 1590 if (IS_ERR(btp->bt_task))
1608 error = PTR_ERR(btp->bt_task); 1591 return PTR_ERR(btp->bt_task);
1609 goto out_error; 1592 return 0;
1610 }
1611 xfs_register_buftarg(btp);
1612out_error:
1613 return error;
1614} 1593}
1615 1594
1616xfs_buftarg_t * 1595xfs_buftarg_t *
@@ -1633,6 +1612,9 @@ xfs_alloc_buftarg(
1633 goto error; 1612 goto error;
1634 if (xfs_alloc_delwrite_queue(btp, fsname)) 1613 if (xfs_alloc_delwrite_queue(btp, fsname))
1635 goto error; 1614 goto error;
1615 btp->bt_shrinker.shrink = xfs_buftarg_shrink;
1616 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1617 register_shrinker(&btp->bt_shrinker);
1636 return btp; 1618 return btp;
1637 1619
1638error: 1620error:
@@ -1737,27 +1719,6 @@ xfs_buf_runall_queues(
1737 flush_workqueue(queue); 1719 flush_workqueue(queue);
1738} 1720}
1739 1721
1740STATIC int
1741xfsbufd_wakeup(
1742 struct shrinker *shrink,
1743 int priority,
1744 gfp_t mask)
1745{
1746 xfs_buftarg_t *btp;
1747
1748 spin_lock(&xfs_buftarg_lock);
1749 list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1750 if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1751 continue;
1752 if (list_empty(&btp->bt_delwrite_queue))
1753 continue;
1754 set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1755 wake_up_process(btp->bt_task);
1756 }
1757 spin_unlock(&xfs_buftarg_lock);
1758 return 0;
1759}
1760
1761/* 1722/*
1762 * Move as many buffers as specified to the supplied list 1723 * Move as many buffers as specified to the supplied list
1763 * idicating if we skipped any buffers to prevent deadlocks. 1724 * idicating if we skipped any buffers to prevent deadlocks.
@@ -1952,7 +1913,6 @@ xfs_buf_init(void)
1952 if (!xfsconvertd_workqueue) 1913 if (!xfsconvertd_workqueue)
1953 goto out_destroy_xfsdatad_workqueue; 1914 goto out_destroy_xfsdatad_workqueue;
1954 1915
1955 register_shrinker(&xfs_buf_shake);
1956 return 0; 1916 return 0;
1957 1917
1958 out_destroy_xfsdatad_workqueue: 1918 out_destroy_xfsdatad_workqueue:
@@ -1968,7 +1928,6 @@ xfs_buf_init(void)
1968void 1928void
1969xfs_buf_terminate(void) 1929xfs_buf_terminate(void)
1970{ 1930{
1971 unregister_shrinker(&xfs_buf_shake);
1972 destroy_workqueue(xfsconvertd_workqueue); 1931 destroy_workqueue(xfsconvertd_workqueue);
1973 destroy_workqueue(xfsdatad_workqueue); 1932 destroy_workqueue(xfsdatad_workqueue);
1974 destroy_workqueue(xfslogd_workqueue); 1933 destroy_workqueue(xfslogd_workqueue);