aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs
diff options
context:
space:
mode:
authorDave Chinner <dchinner@redhat.com>2013-08-27 20:18:05 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:31 -0400
commite80dfa19976b884db1ac2bc5d7d6ca0a4027bd1c (patch)
treefaccabaf8cb113de9419344870273b5115372ff5 /fs/xfs
parent9b17c62382dd2e7507984b9890bf44e070cdd8bb (diff)
xfs: convert buftarg LRU to generic code
Convert the buftarg LRU to use the new generic LRU list and take advantage of the functionality it supplies to make the buffer cache shrinker node aware. Signed-off-by: Glauber Costa <glommer@openvz.org> Signed-off-by: Dave Chinner <dchinner@redhat.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/xfs')
-rw-r--r--fs/xfs/xfs_buf.c170
-rw-r--r--fs/xfs/xfs_buf.h5
2 files changed, 82 insertions, 93 deletions
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c06823fe10d3..665ff792d665 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -86,20 +86,14 @@ xfs_buf_vmap_len(
86 * The LRU takes a new reference to the buffer so that it will only be freed 86 * The LRU takes a new reference to the buffer so that it will only be freed
87 * once the shrinker takes the buffer off the LRU. 87 * once the shrinker takes the buffer off the LRU.
88 */ 88 */
89STATIC void 89static void
90xfs_buf_lru_add( 90xfs_buf_lru_add(
91 struct xfs_buf *bp) 91 struct xfs_buf *bp)
92{ 92{
93 struct xfs_buftarg *btp = bp->b_target; 93 if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
94
95 spin_lock(&btp->bt_lru_lock);
96 if (list_empty(&bp->b_lru)) {
97 atomic_inc(&bp->b_hold);
98 list_add_tail(&bp->b_lru, &btp->bt_lru);
99 btp->bt_lru_nr++;
100 bp->b_lru_flags &= ~_XBF_LRU_DISPOSE; 94 bp->b_lru_flags &= ~_XBF_LRU_DISPOSE;
95 atomic_inc(&bp->b_hold);
101 } 96 }
102 spin_unlock(&btp->bt_lru_lock);
103} 97}
104 98
105/* 99/*
@@ -108,24 +102,13 @@ xfs_buf_lru_add(
108 * The unlocked check is safe here because it only occurs when there are not 102 * The unlocked check is safe here because it only occurs when there are not
109 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there 103 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
110 * to optimise the shrinker removing the buffer from the LRU and calling 104 * to optimise the shrinker removing the buffer from the LRU and calling
111 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the 105 * xfs_buf_free().
112 * bt_lru_lock.
113 */ 106 */
114STATIC void 107static void
115xfs_buf_lru_del( 108xfs_buf_lru_del(
116 struct xfs_buf *bp) 109 struct xfs_buf *bp)
117{ 110{
118 struct xfs_buftarg *btp = bp->b_target; 111 list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
119
120 if (list_empty(&bp->b_lru))
121 return;
122
123 spin_lock(&btp->bt_lru_lock);
124 if (!list_empty(&bp->b_lru)) {
125 list_del_init(&bp->b_lru);
126 btp->bt_lru_nr--;
127 }
128 spin_unlock(&btp->bt_lru_lock);
129} 112}
130 113
131/* 114/*
@@ -152,18 +135,10 @@ xfs_buf_stale(
152 bp->b_flags &= ~_XBF_DELWRI_Q; 135 bp->b_flags &= ~_XBF_DELWRI_Q;
153 136
154 atomic_set(&(bp)->b_lru_ref, 0); 137 atomic_set(&(bp)->b_lru_ref, 0);
155 if (!list_empty(&bp->b_lru)) { 138 if (!(bp->b_lru_flags & _XBF_LRU_DISPOSE) &&
156 struct xfs_buftarg *btp = bp->b_target; 139 (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
157 140 atomic_dec(&bp->b_hold);
158 spin_lock(&btp->bt_lru_lock); 141
159 if (!list_empty(&bp->b_lru) &&
160 !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) {
161 list_del_init(&bp->b_lru);
162 btp->bt_lru_nr--;
163 atomic_dec(&bp->b_hold);
164 }
165 spin_unlock(&btp->bt_lru_lock);
166 }
167 ASSERT(atomic_read(&bp->b_hold) >= 1); 142 ASSERT(atomic_read(&bp->b_hold) >= 1);
168} 143}
169 144
@@ -1502,83 +1477,97 @@ xfs_buf_iomove(
1502 * returned. These buffers will have an elevated hold count, so wait on those 1477 * returned. These buffers will have an elevated hold count, so wait on those
1503 * while freeing all the buffers only held by the LRU. 1478 * while freeing all the buffers only held by the LRU.
1504 */ 1479 */
1505void 1480static enum lru_status
1506xfs_wait_buftarg( 1481xfs_buftarg_wait_rele(
1507 struct xfs_buftarg *btp) 1482 struct list_head *item,
1483 spinlock_t *lru_lock,
1484 void *arg)
1485
1508{ 1486{
1509 struct xfs_buf *bp; 1487 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1510 1488
1511restart: 1489 if (atomic_read(&bp->b_hold) > 1) {
1512 spin_lock(&btp->bt_lru_lock); 1490 /* need to wait */
1513 while (!list_empty(&btp->bt_lru)) { 1491 trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1514 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru); 1492 spin_unlock(lru_lock);
1515 if (atomic_read(&bp->b_hold) > 1) { 1493 delay(100);
1516 trace_xfs_buf_wait_buftarg(bp, _RET_IP_); 1494 } else {
1517 list_move_tail(&bp->b_lru, &btp->bt_lru);
1518 spin_unlock(&btp->bt_lru_lock);
1519 delay(100);
1520 goto restart;
1521 }
1522 /* 1495 /*
1523 * clear the LRU reference count so the buffer doesn't get 1496 * clear the LRU reference count so the buffer doesn't get
1524 * ignored in xfs_buf_rele(). 1497 * ignored in xfs_buf_rele().
1525 */ 1498 */
1526 atomic_set(&bp->b_lru_ref, 0); 1499 atomic_set(&bp->b_lru_ref, 0);
1527 spin_unlock(&btp->bt_lru_lock); 1500 spin_unlock(lru_lock);
1528 xfs_buf_rele(bp); 1501 xfs_buf_rele(bp);
1529 spin_lock(&btp->bt_lru_lock);
1530 } 1502 }
1531 spin_unlock(&btp->bt_lru_lock); 1503
1504 spin_lock(lru_lock);
1505 return LRU_RETRY;
1532} 1506}
1533 1507
1534int 1508void
1535xfs_buftarg_shrink( 1509xfs_wait_buftarg(
1510 struct xfs_buftarg *btp)
1511{
1512 while (list_lru_count(&btp->bt_lru))
1513 list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1514 NULL, LONG_MAX);
1515}
1516
1517static enum lru_status
1518xfs_buftarg_isolate(
1519 struct list_head *item,
1520 spinlock_t *lru_lock,
1521 void *arg)
1522{
1523 struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
1524 struct list_head *dispose = arg;
1525
1526 /*
1527 * Decrement the b_lru_ref count unless the value is already
1528 * zero. If the value is already zero, we need to reclaim the
1529 * buffer, otherwise it gets another trip through the LRU.
1530 */
1531 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0))
1532 return LRU_ROTATE;
1533
1534 bp->b_lru_flags |= _XBF_LRU_DISPOSE;
1535 list_move(item, dispose);
1536 return LRU_REMOVED;
1537}
1538
1539static long
1540xfs_buftarg_shrink_scan(
1536 struct shrinker *shrink, 1541 struct shrinker *shrink,
1537 struct shrink_control *sc) 1542 struct shrink_control *sc)
1538{ 1543{
1539 struct xfs_buftarg *btp = container_of(shrink, 1544 struct xfs_buftarg *btp = container_of(shrink,
1540 struct xfs_buftarg, bt_shrinker); 1545 struct xfs_buftarg, bt_shrinker);
1541 struct xfs_buf *bp;
1542 int nr_to_scan = sc->nr_to_scan;
1543 LIST_HEAD(dispose); 1546 LIST_HEAD(dispose);
1547 long freed;
1548 unsigned long nr_to_scan = sc->nr_to_scan;
1544 1549
1545 if (!nr_to_scan) 1550 freed = list_lru_walk_node(&btp->bt_lru, sc->nid, xfs_buftarg_isolate,
1546 return btp->bt_lru_nr; 1551 &dispose, &nr_to_scan);
1547
1548 spin_lock(&btp->bt_lru_lock);
1549 while (!list_empty(&btp->bt_lru)) {
1550 if (nr_to_scan-- <= 0)
1551 break;
1552
1553 bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
1554
1555 /*
1556 * Decrement the b_lru_ref count unless the value is already
1557 * zero. If the value is already zero, we need to reclaim the
1558 * buffer, otherwise it gets another trip through the LRU.
1559 */
1560 if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
1561 list_move_tail(&bp->b_lru, &btp->bt_lru);
1562 continue;
1563 }
1564
1565 /*
1566 * remove the buffer from the LRU now to avoid needing another
1567 * lock round trip inside xfs_buf_rele().
1568 */
1569 list_move(&bp->b_lru, &dispose);
1570 btp->bt_lru_nr--;
1571 bp->b_lru_flags |= _XBF_LRU_DISPOSE;
1572 }
1573 spin_unlock(&btp->bt_lru_lock);
1574 1552
1575 while (!list_empty(&dispose)) { 1553 while (!list_empty(&dispose)) {
1554 struct xfs_buf *bp;
1576 bp = list_first_entry(&dispose, struct xfs_buf, b_lru); 1555 bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
1577 list_del_init(&bp->b_lru); 1556 list_del_init(&bp->b_lru);
1578 xfs_buf_rele(bp); 1557 xfs_buf_rele(bp);
1579 } 1558 }
1580 1559
1581 return btp->bt_lru_nr; 1560 return freed;
1561}
1562
1563static long
1564xfs_buftarg_shrink_count(
1565 struct shrinker *shrink,
1566 struct shrink_control *sc)
1567{
1568 struct xfs_buftarg *btp = container_of(shrink,
1569 struct xfs_buftarg, bt_shrinker);
1570 return list_lru_count_node(&btp->bt_lru, sc->nid);
1582} 1571}
1583 1572
1584void 1573void
@@ -1660,12 +1649,13 @@ xfs_alloc_buftarg(
1660 if (!btp->bt_bdi) 1649 if (!btp->bt_bdi)
1661 goto error; 1650 goto error;
1662 1651
1663 INIT_LIST_HEAD(&btp->bt_lru); 1652 list_lru_init(&btp->bt_lru);
1664 spin_lock_init(&btp->bt_lru_lock);
1665 if (xfs_setsize_buftarg_early(btp, bdev)) 1653 if (xfs_setsize_buftarg_early(btp, bdev))
1666 goto error; 1654 goto error;
1667 btp->bt_shrinker.shrink = xfs_buftarg_shrink; 1655 btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
1656 btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1668 btp->bt_shrinker.seeks = DEFAULT_SEEKS; 1657 btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1658 btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1669 register_shrinker(&btp->bt_shrinker); 1659 register_shrinker(&btp->bt_shrinker);
1670 return btp; 1660 return btp;
1671 1661
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 433a12ed7b17..5ec7d35a77ea 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -25,6 +25,7 @@
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/buffer_head.h> 26#include <linux/buffer_head.h>
27#include <linux/uio.h> 27#include <linux/uio.h>
28#include <linux/list_lru.h>
28 29
29/* 30/*
30 * Base types 31 * Base types
@@ -92,9 +93,7 @@ typedef struct xfs_buftarg {
92 93
93 /* LRU control structures */ 94 /* LRU control structures */
94 struct shrinker bt_shrinker; 95 struct shrinker bt_shrinker;
95 struct list_head bt_lru; 96 struct list_lru bt_lru;
96 spinlock_t bt_lru_lock;
97 unsigned int bt_lru_nr;
98} xfs_buftarg_t; 97} xfs_buftarg_t;
99 98
100struct xfs_buf; 99struct xfs_buf;