aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:10 -0500
commit3f97b163207c67a3b35931494ad3db1de66356f0 (patch)
tree012012aafb687ebfb7e794e2bcd0c1728212fafc
parent2a4db7eb9391a544ff58f4fa11d35246e87c87af (diff)
list_lru: add helpers to isolate items
Currently, the isolate callback passed to the list_lru_walk family of functions is supposed to just delete an item from the list upon returning LRU_REMOVED or LRU_REMOVED_RETRY, while nr_items counter is fixed by __list_lru_walk_one after the callback returns. Since the callback is allowed to drop the lock after removing an item (it has to return LRU_REMOVED_RETRY then), the nr_items can be less than the actual number of elements on the list even if we check them under the lock. This makes it difficult to move items from one list_lru_one to another, which is required for per-memcg list_lru reparenting - we can't just splice the lists, we have to move entries one by one. This patch therefore introduces helpers that must be used by callback functions to isolate items instead of raw list_del/list_move. These are list_lru_isolate and list_lru_isolate_move. They not only remove the entry from the list, but also fix the nr_items counter, making sure nr_items always reflects the actual number of elements on the list if checked under the appropriate lock. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/dcache.c21
-rw-r--r--fs/gfs2/quota.c5
-rw-r--r--fs/inode.c8
-rw-r--r--fs/xfs/xfs_buf.c6
-rw-r--r--fs/xfs/xfs_qm.c5
-rw-r--r--include/linux/list_lru.h9
-rw-r--r--mm/list_lru.c19
-rw-r--r--mm/workingset.c3
8 files changed, 50 insertions, 26 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 56c5da89f58a..d04be762b216 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -400,19 +400,20 @@ static void d_shrink_add(struct dentry *dentry, struct list_head *list)
400 * LRU lists entirely, while shrink_move moves it to the indicated 400 * LRU lists entirely, while shrink_move moves it to the indicated
401 * private list. 401 * private list.
402 */ 402 */
403static void d_lru_isolate(struct dentry *dentry) 403static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
404{ 404{
405 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 405 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
406 dentry->d_flags &= ~DCACHE_LRU_LIST; 406 dentry->d_flags &= ~DCACHE_LRU_LIST;
407 this_cpu_dec(nr_dentry_unused); 407 this_cpu_dec(nr_dentry_unused);
408 list_del_init(&dentry->d_lru); 408 list_lru_isolate(lru, &dentry->d_lru);
409} 409}
410 410
411static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list) 411static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
412 struct list_head *list)
412{ 413{
413 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 414 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
414 dentry->d_flags |= DCACHE_SHRINK_LIST; 415 dentry->d_flags |= DCACHE_SHRINK_LIST;
415 list_move_tail(&dentry->d_lru, list); 416 list_lru_isolate_move(lru, &dentry->d_lru, list);
416} 417}
417 418
418/* 419/*
@@ -869,8 +870,8 @@ static void shrink_dentry_list(struct list_head *list)
869 } 870 }
870} 871}
871 872
872static enum lru_status 873static enum lru_status dentry_lru_isolate(struct list_head *item,
873dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) 874 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
874{ 875{
875 struct list_head *freeable = arg; 876 struct list_head *freeable = arg;
876 struct dentry *dentry = container_of(item, struct dentry, d_lru); 877 struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -890,7 +891,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
890 * another pass through the LRU. 891 * another pass through the LRU.
891 */ 892 */
892 if (dentry->d_lockref.count) { 893 if (dentry->d_lockref.count) {
893 d_lru_isolate(dentry); 894 d_lru_isolate(lru, dentry);
894 spin_unlock(&dentry->d_lock); 895 spin_unlock(&dentry->d_lock);
895 return LRU_REMOVED; 896 return LRU_REMOVED;
896 } 897 }
@@ -921,7 +922,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
921 return LRU_ROTATE; 922 return LRU_ROTATE;
922 } 923 }
923 924
924 d_lru_shrink_move(dentry, freeable); 925 d_lru_shrink_move(lru, dentry, freeable);
925 spin_unlock(&dentry->d_lock); 926 spin_unlock(&dentry->d_lock);
926 927
927 return LRU_REMOVED; 928 return LRU_REMOVED;
@@ -951,7 +952,7 @@ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
951} 952}
952 953
953static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, 954static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
954 spinlock_t *lru_lock, void *arg) 955 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
955{ 956{
956 struct list_head *freeable = arg; 957 struct list_head *freeable = arg;
957 struct dentry *dentry = container_of(item, struct dentry, d_lru); 958 struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -964,7 +965,7 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
964 if (!spin_trylock(&dentry->d_lock)) 965 if (!spin_trylock(&dentry->d_lock))
965 return LRU_SKIP; 966 return LRU_SKIP;
966 967
967 d_lru_shrink_move(dentry, freeable); 968 d_lru_shrink_move(lru, dentry, freeable);
968 spin_unlock(&dentry->d_lock); 969 spin_unlock(&dentry->d_lock);
969 970
970 return LRU_REMOVED; 971 return LRU_REMOVED;
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c15d6b216d0b..3aa17d4d1cfc 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -145,7 +145,8 @@ static void gfs2_qd_dispose(struct list_head *list)
145} 145}
146 146
147 147
148static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock, void *arg) 148static enum lru_status gfs2_qd_isolate(struct list_head *item,
149 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
149{ 150{
150 struct list_head *dispose = arg; 151 struct list_head *dispose = arg;
151 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru); 152 struct gfs2_quota_data *qd = list_entry(item, struct gfs2_quota_data, qd_lru);
@@ -155,7 +156,7 @@ static enum lru_status gfs2_qd_isolate(struct list_head *item, spinlock_t *lock,
155 156
156 if (qd->qd_lockref.count == 0) { 157 if (qd->qd_lockref.count == 0) {
157 lockref_mark_dead(&qd->qd_lockref); 158 lockref_mark_dead(&qd->qd_lockref);
158 list_move(&qd->qd_lru, dispose); 159 list_lru_isolate_move(lru, &qd->qd_lru, dispose);
159 } 160 }
160 161
161 spin_unlock(&qd->qd_lockref.lock); 162 spin_unlock(&qd->qd_lockref.lock);
diff --git a/fs/inode.c b/fs/inode.c
index 524a32c2b0c6..86c612b92c6f 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -685,8 +685,8 @@ int invalidate_inodes(struct super_block *sb, bool kill_dirty)
685 * LRU does not have strict ordering. Hence we don't want to reclaim inodes 685 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
686 * with this flag set because they are the inodes that are out of order. 686 * with this flag set because they are the inodes that are out of order.
687 */ 687 */
688static enum lru_status 688static enum lru_status inode_lru_isolate(struct list_head *item,
689inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) 689 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
690{ 690{
691 struct list_head *freeable = arg; 691 struct list_head *freeable = arg;
692 struct inode *inode = container_of(item, struct inode, i_lru); 692 struct inode *inode = container_of(item, struct inode, i_lru);
@@ -704,7 +704,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
704 */ 704 */
705 if (atomic_read(&inode->i_count) || 705 if (atomic_read(&inode->i_count) ||
706 (inode->i_state & ~I_REFERENCED)) { 706 (inode->i_state & ~I_REFERENCED)) {
707 list_del_init(&inode->i_lru); 707 list_lru_isolate(lru, &inode->i_lru);
708 spin_unlock(&inode->i_lock); 708 spin_unlock(&inode->i_lock);
709 this_cpu_dec(nr_unused); 709 this_cpu_dec(nr_unused);
710 return LRU_REMOVED; 710 return LRU_REMOVED;
@@ -738,7 +738,7 @@ inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
738 738
739 WARN_ON(inode->i_state & I_NEW); 739 WARN_ON(inode->i_state & I_NEW);
740 inode->i_state |= I_FREEING; 740 inode->i_state |= I_FREEING;
741 list_move(&inode->i_lru, freeable); 741 list_lru_isolate_move(lru, &inode->i_lru, freeable);
742 spin_unlock(&inode->i_lock); 742 spin_unlock(&inode->i_lock);
743 743
744 this_cpu_dec(nr_unused); 744 this_cpu_dec(nr_unused);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 15c9d224c721..1790b00bea7a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1488,6 +1488,7 @@ xfs_buf_iomove(
1488static enum lru_status 1488static enum lru_status
1489xfs_buftarg_wait_rele( 1489xfs_buftarg_wait_rele(
1490 struct list_head *item, 1490 struct list_head *item,
1491 struct list_lru_one *lru,
1491 spinlock_t *lru_lock, 1492 spinlock_t *lru_lock,
1492 void *arg) 1493 void *arg)
1493 1494
@@ -1509,7 +1510,7 @@ xfs_buftarg_wait_rele(
1509 */ 1510 */
1510 atomic_set(&bp->b_lru_ref, 0); 1511 atomic_set(&bp->b_lru_ref, 0);
1511 bp->b_state |= XFS_BSTATE_DISPOSE; 1512 bp->b_state |= XFS_BSTATE_DISPOSE;
1512 list_move(item, dispose); 1513 list_lru_isolate_move(lru, item, dispose);
1513 spin_unlock(&bp->b_lock); 1514 spin_unlock(&bp->b_lock);
1514 return LRU_REMOVED; 1515 return LRU_REMOVED;
1515} 1516}
@@ -1546,6 +1547,7 @@ xfs_wait_buftarg(
1546static enum lru_status 1547static enum lru_status
1547xfs_buftarg_isolate( 1548xfs_buftarg_isolate(
1548 struct list_head *item, 1549 struct list_head *item,
1550 struct list_lru_one *lru,
1549 spinlock_t *lru_lock, 1551 spinlock_t *lru_lock,
1550 void *arg) 1552 void *arg)
1551{ 1553{
@@ -1569,7 +1571,7 @@ xfs_buftarg_isolate(
1569 } 1571 }
1570 1572
1571 bp->b_state |= XFS_BSTATE_DISPOSE; 1573 bp->b_state |= XFS_BSTATE_DISPOSE;
1572 list_move(item, dispose); 1574 list_lru_isolate_move(lru, item, dispose);
1573 spin_unlock(&bp->b_lock); 1575 spin_unlock(&bp->b_lock);
1574 return LRU_REMOVED; 1576 return LRU_REMOVED;
1575} 1577}
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 4f4b1274e144..53cc2aaf8d2b 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -430,6 +430,7 @@ struct xfs_qm_isolate {
430static enum lru_status 430static enum lru_status
431xfs_qm_dquot_isolate( 431xfs_qm_dquot_isolate(
432 struct list_head *item, 432 struct list_head *item,
433 struct list_lru_one *lru,
433 spinlock_t *lru_lock, 434 spinlock_t *lru_lock,
434 void *arg) 435 void *arg)
435 __releases(lru_lock) __acquires(lru_lock) 436 __releases(lru_lock) __acquires(lru_lock)
@@ -450,7 +451,7 @@ xfs_qm_dquot_isolate(
450 XFS_STATS_INC(xs_qm_dqwants); 451 XFS_STATS_INC(xs_qm_dqwants);
451 452
452 trace_xfs_dqreclaim_want(dqp); 453 trace_xfs_dqreclaim_want(dqp);
453 list_del_init(&dqp->q_lru); 454 list_lru_isolate(lru, &dqp->q_lru);
454 XFS_STATS_DEC(xs_qm_dquot_unused); 455 XFS_STATS_DEC(xs_qm_dquot_unused);
455 return LRU_REMOVED; 456 return LRU_REMOVED;
456 } 457 }
@@ -494,7 +495,7 @@ xfs_qm_dquot_isolate(
494 xfs_dqunlock(dqp); 495 xfs_dqunlock(dqp);
495 496
496 ASSERT(dqp->q_nrefs == 0); 497 ASSERT(dqp->q_nrefs == 0);
497 list_move_tail(&dqp->q_lru, &isol->dispose); 498 list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
498 XFS_STATS_DEC(xs_qm_dquot_unused); 499 XFS_STATS_DEC(xs_qm_dquot_unused);
499 trace_xfs_dqreclaim_done(dqp); 500 trace_xfs_dqreclaim_done(dqp);
500 XFS_STATS_INC(xs_qm_dqreclaims); 501 XFS_STATS_INC(xs_qm_dqreclaims);
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 305b598abac2..7edf9c9ab9eb 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -125,8 +125,13 @@ static inline unsigned long list_lru_count(struct list_lru *lru)
125 return count; 125 return count;
126} 126}
127 127
128typedef enum lru_status 128void list_lru_isolate(struct list_lru_one *list, struct list_head *item);
129(*list_lru_walk_cb)(struct list_head *item, spinlock_t *lock, void *cb_arg); 129void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
130 struct list_head *head);
131
132typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
133 struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
134
130/** 135/**
131 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items. 136 * list_lru_walk_one: walk a list_lru, isolating and disposing freeable items.
132 * @lru: the lru pointer. 137 * @lru: the lru pointer.
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 79aee70c3b9d..8d9d168c6c38 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -132,6 +132,21 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item)
132} 132}
133EXPORT_SYMBOL_GPL(list_lru_del); 133EXPORT_SYMBOL_GPL(list_lru_del);
134 134
135void list_lru_isolate(struct list_lru_one *list, struct list_head *item)
136{
137 list_del_init(item);
138 list->nr_items--;
139}
140EXPORT_SYMBOL_GPL(list_lru_isolate);
141
142void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
143 struct list_head *head)
144{
145 list_move(item, head);
146 list->nr_items--;
147}
148EXPORT_SYMBOL_GPL(list_lru_isolate_move);
149
135static unsigned long __list_lru_count_one(struct list_lru *lru, 150static unsigned long __list_lru_count_one(struct list_lru *lru,
136 int nid, int memcg_idx) 151 int nid, int memcg_idx)
137{ 152{
@@ -194,13 +209,11 @@ restart:
194 break; 209 break;
195 --*nr_to_walk; 210 --*nr_to_walk;
196 211
197 ret = isolate(item, &nlru->lock, cb_arg); 212 ret = isolate(item, l, &nlru->lock, cb_arg);
198 switch (ret) { 213 switch (ret) {
199 case LRU_REMOVED_RETRY: 214 case LRU_REMOVED_RETRY:
200 assert_spin_locked(&nlru->lock); 215 assert_spin_locked(&nlru->lock);
201 case LRU_REMOVED: 216 case LRU_REMOVED:
202 l->nr_items--;
203 WARN_ON_ONCE(l->nr_items < 0);
204 isolated++; 217 isolated++;
205 /* 218 /*
206 * If the lru lock has been dropped, our list 219 * If the lru lock has been dropped, our list
diff --git a/mm/workingset.c b/mm/workingset.c
index d4fa7fb10a52..aa017133744b 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -302,6 +302,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
302} 302}
303 303
304static enum lru_status shadow_lru_isolate(struct list_head *item, 304static enum lru_status shadow_lru_isolate(struct list_head *item,
305 struct list_lru_one *lru,
305 spinlock_t *lru_lock, 306 spinlock_t *lru_lock,
306 void *arg) 307 void *arg)
307{ 308{
@@ -332,7 +333,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
332 goto out; 333 goto out;
333 } 334 }
334 335
335 list_del_init(item); 336 list_lru_isolate(lru, item);
336 spin_unlock(lru_lock); 337 spin_unlock(lru_lock);
337 338
338 /* 339 /*