aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2015-02-12 17:59:35 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:10 -0500
commit3f97b163207c67a3b35931494ad3db1de66356f0 (patch)
tree012012aafb687ebfb7e794e2bcd0c1728212fafc /fs/dcache.c
parent2a4db7eb9391a544ff58f4fa11d35246e87c87af (diff)
list_lru: add helpers to isolate items
Currently, the isolate callback passed to the list_lru_walk family of functions is supposed to just delete an item from the list upon returning LRU_REMOVED or LRU_REMOVED_RETRY, while nr_items counter is fixed by __list_lru_walk_one after the callback returns. Since the callback is allowed to drop the lock after removing an item (it has to return LRU_REMOVED_RETRY then), the nr_items can be less than the actual number of elements on the list even if we check them under the lock. This makes it difficult to move items from one list_lru_one to another, which is required for per-memcg list_lru reparenting - we can't just splice the lists, we have to move entries one by one. This patch therefore introduces helpers that must be used by callback functions to isolate items instead of raw list_del/list_move. These are list_lru_isolate and list_lru_isolate_move. They not only remove the entry from the list, but also fix the nr_items counter, making sure nr_items always reflects the actual number of elements on the list if checked under the appropriate lock. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Chinner <david@fromorbit.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 56c5da89f58a..d04be762b216 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -400,19 +400,20 @@ static void d_shrink_add(struct dentry *dentry, struct list_head *list)
400 * LRU lists entirely, while shrink_move moves it to the indicated 400 * LRU lists entirely, while shrink_move moves it to the indicated
401 * private list. 401 * private list.
402 */ 402 */
403static void d_lru_isolate(struct dentry *dentry) 403static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
404{ 404{
405 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 405 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
406 dentry->d_flags &= ~DCACHE_LRU_LIST; 406 dentry->d_flags &= ~DCACHE_LRU_LIST;
407 this_cpu_dec(nr_dentry_unused); 407 this_cpu_dec(nr_dentry_unused);
408 list_del_init(&dentry->d_lru); 408 list_lru_isolate(lru, &dentry->d_lru);
409} 409}
410 410
411static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list) 411static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
412 struct list_head *list)
412{ 413{
413 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); 414 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
414 dentry->d_flags |= DCACHE_SHRINK_LIST; 415 dentry->d_flags |= DCACHE_SHRINK_LIST;
415 list_move_tail(&dentry->d_lru, list); 416 list_lru_isolate_move(lru, &dentry->d_lru, list);
416} 417}
417 418
418/* 419/*
@@ -869,8 +870,8 @@ static void shrink_dentry_list(struct list_head *list)
869 } 870 }
870} 871}
871 872
872static enum lru_status 873static enum lru_status dentry_lru_isolate(struct list_head *item,
873dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) 874 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
874{ 875{
875 struct list_head *freeable = arg; 876 struct list_head *freeable = arg;
876 struct dentry *dentry = container_of(item, struct dentry, d_lru); 877 struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -890,7 +891,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
890 * another pass through the LRU. 891 * another pass through the LRU.
891 */ 892 */
892 if (dentry->d_lockref.count) { 893 if (dentry->d_lockref.count) {
893 d_lru_isolate(dentry); 894 d_lru_isolate(lru, dentry);
894 spin_unlock(&dentry->d_lock); 895 spin_unlock(&dentry->d_lock);
895 return LRU_REMOVED; 896 return LRU_REMOVED;
896 } 897 }
@@ -921,7 +922,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
921 return LRU_ROTATE; 922 return LRU_ROTATE;
922 } 923 }
923 924
924 d_lru_shrink_move(dentry, freeable); 925 d_lru_shrink_move(lru, dentry, freeable);
925 spin_unlock(&dentry->d_lock); 926 spin_unlock(&dentry->d_lock);
926 927
927 return LRU_REMOVED; 928 return LRU_REMOVED;
@@ -951,7 +952,7 @@ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
951} 952}
952 953
953static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, 954static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
954 spinlock_t *lru_lock, void *arg) 955 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
955{ 956{
956 struct list_head *freeable = arg; 957 struct list_head *freeable = arg;
957 struct dentry *dentry = container_of(item, struct dentry, d_lru); 958 struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -964,7 +965,7 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
964 if (!spin_trylock(&dentry->d_lock)) 965 if (!spin_trylock(&dentry->d_lock))
965 return LRU_SKIP; 966 return LRU_SKIP;
966 967
967 d_lru_shrink_move(dentry, freeable); 968 d_lru_shrink_move(lru, dentry, freeable);
968 spin_unlock(&dentry->d_lock); 969 spin_unlock(&dentry->d_lock);
969 970
970 return LRU_REMOVED; 971 return LRU_REMOVED;