aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dcache.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c170
1 files changed, 78 insertions, 92 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 77d466b13fef..38a4a03499a2 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -37,6 +37,7 @@
37#include <linux/rculist_bl.h> 37#include <linux/rculist_bl.h>
38#include <linux/prefetch.h> 38#include <linux/prefetch.h>
39#include <linux/ratelimit.h> 39#include <linux/ratelimit.h>
40#include <linux/list_lru.h>
40#include "internal.h" 41#include "internal.h"
41#include "mount.h" 42#include "mount.h"
42 43
@@ -356,28 +357,17 @@ static void dentry_unlink_inode(struct dentry * dentry)
356} 357}
357 358
358/* 359/*
359 * dentry_lru_(add|del|move_list) must be called with d_lock held. 360 * dentry_lru_(add|del)_list) must be called with d_lock held.
360 */ 361 */
361static void dentry_lru_add(struct dentry *dentry) 362static void dentry_lru_add(struct dentry *dentry)
362{ 363{
363 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { 364 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
364 spin_lock(&dentry->d_sb->s_dentry_lru_lock); 365 if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
366 this_cpu_inc(nr_dentry_unused);
365 dentry->d_flags |= DCACHE_LRU_LIST; 367 dentry->d_flags |= DCACHE_LRU_LIST;
366 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
367 dentry->d_sb->s_nr_dentry_unused++;
368 this_cpu_inc(nr_dentry_unused);
369 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
370 } 368 }
371} 369}
372 370
373static void __dentry_lru_del(struct dentry *dentry)
374{
375 list_del_init(&dentry->d_lru);
376 dentry->d_flags &= ~DCACHE_LRU_LIST;
377 dentry->d_sb->s_nr_dentry_unused--;
378 this_cpu_dec(nr_dentry_unused);
379}
380
381/* 371/*
382 * Remove a dentry with references from the LRU. 372 * Remove a dentry with references from the LRU.
383 * 373 *
@@ -393,27 +383,9 @@ static void dentry_lru_del(struct dentry *dentry)
393 return; 383 return;
394 } 384 }
395 385
396 if (!list_empty(&dentry->d_lru)) { 386 if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
397 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
398 __dentry_lru_del(dentry);
399 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
400 }
401}
402
403static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
404{
405 BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST);
406
407 spin_lock(&dentry->d_sb->s_dentry_lru_lock);
408 if (list_empty(&dentry->d_lru)) {
409 dentry->d_flags |= DCACHE_LRU_LIST;
410 list_add_tail(&dentry->d_lru, list);
411 } else {
412 list_move_tail(&dentry->d_lru, list);
413 dentry->d_sb->s_nr_dentry_unused--;
414 this_cpu_dec(nr_dentry_unused); 387 this_cpu_dec(nr_dentry_unused);
415 } 388 dentry->d_flags &= ~DCACHE_LRU_LIST;
416 spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
417} 389}
418 390
419/** 391/**
@@ -901,12 +873,72 @@ static void shrink_dentry_list(struct list_head *list)
901 rcu_read_unlock(); 873 rcu_read_unlock();
902} 874}
903 875
876static enum lru_status
877dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
878{
879 struct list_head *freeable = arg;
880 struct dentry *dentry = container_of(item, struct dentry, d_lru);
881
882
883 /*
884 * we are inverting the lru lock/dentry->d_lock here,
885 * so use a trylock. If we fail to get the lock, just skip
886 * it
887 */
888 if (!spin_trylock(&dentry->d_lock))
889 return LRU_SKIP;
890
891 /*
892 * Referenced dentries are still in use. If they have active
893 * counts, just remove them from the LRU. Otherwise give them
894 * another pass through the LRU.
895 */
896 if (dentry->d_lockref.count) {
897 list_del_init(&dentry->d_lru);
898 spin_unlock(&dentry->d_lock);
899 return LRU_REMOVED;
900 }
901
902 if (dentry->d_flags & DCACHE_REFERENCED) {
903 dentry->d_flags &= ~DCACHE_REFERENCED;
904 spin_unlock(&dentry->d_lock);
905
906 /*
907 * The list move itself will be made by the common LRU code. At
908 * this point, we've dropped the dentry->d_lock but keep the
909 * lru lock. This is safe to do, since every list movement is
910 * protected by the lru lock even if both locks are held.
911 *
912 * This is guaranteed by the fact that all LRU management
913 * functions are intermediated by the LRU API calls like
914 * list_lru_add and list_lru_del. List movement in this file
915 * only ever occur through this functions or through callbacks
916 * like this one, that are called from the LRU API.
917 *
918 * The only exceptions to this are functions like
919 * shrink_dentry_list, and code that first checks for the
920 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
921 * operating only with stack provided lists after they are
922 * properly isolated from the main list. It is thus, always a
923 * local access.
924 */
925 return LRU_ROTATE;
926 }
927
928 dentry->d_flags |= DCACHE_SHRINK_LIST;
929 list_move_tail(&dentry->d_lru, freeable);
930 this_cpu_dec(nr_dentry_unused);
931 spin_unlock(&dentry->d_lock);
932
933 return LRU_REMOVED;
934}
935
904/** 936/**
905 * prune_dcache_sb - shrink the dcache 937 * prune_dcache_sb - shrink the dcache
906 * @sb: superblock 938 * @sb: superblock
907 * @count: number of entries to try to free 939 * @nr_to_scan : number of entries to try to free
908 * 940 *
909 * Attempt to shrink the superblock dcache LRU by @count entries. This is 941 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
910 * done when we need more memory an called from the superblock shrinker 942 * done when we need more memory an called from the superblock shrinker
911 * function. 943 * function.
912 * 944 *
@@ -915,45 +947,12 @@ static void shrink_dentry_list(struct list_head *list)
915 */ 947 */
916long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan) 948long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan)
917{ 949{
918 struct dentry *dentry; 950 LIST_HEAD(dispose);
919 LIST_HEAD(referenced); 951 long freed;
920 LIST_HEAD(tmp);
921 long freed = 0;
922 952
923relock: 953 freed = list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate,
924 spin_lock(&sb->s_dentry_lru_lock); 954 &dispose, nr_to_scan);
925 while (!list_empty(&sb->s_dentry_lru)) { 955 shrink_dentry_list(&dispose);
926 dentry = list_entry(sb->s_dentry_lru.prev,
927 struct dentry, d_lru);
928 BUG_ON(dentry->d_sb != sb);
929
930 if (!spin_trylock(&dentry->d_lock)) {
931 spin_unlock(&sb->s_dentry_lru_lock);
932 cpu_relax();
933 goto relock;
934 }
935
936 if (dentry->d_flags & DCACHE_REFERENCED) {
937 dentry->d_flags &= ~DCACHE_REFERENCED;
938 list_move(&dentry->d_lru, &referenced);
939 spin_unlock(&dentry->d_lock);
940 } else {
941 list_move(&dentry->d_lru, &tmp);
942 dentry->d_flags |= DCACHE_SHRINK_LIST;
943 this_cpu_dec(nr_dentry_unused);
944 sb->s_nr_dentry_unused--;
945 spin_unlock(&dentry->d_lock);
946 freed++;
947 if (!--nr_to_scan)
948 break;
949 }
950 cond_resched_lock(&sb->s_dentry_lru_lock);
951 }
952 if (!list_empty(&referenced))
953 list_splice(&referenced, &sb->s_dentry_lru);
954 spin_unlock(&sb->s_dentry_lru_lock);
955
956 shrink_dentry_list(&tmp);
957 return freed; 956 return freed;
958} 957}
959 958
@@ -987,24 +986,10 @@ shrink_dcache_list(
987 */ 986 */
988void shrink_dcache_sb(struct super_block *sb) 987void shrink_dcache_sb(struct super_block *sb)
989{ 988{
990 LIST_HEAD(tmp); 989 long disposed;
991
992 spin_lock(&sb->s_dentry_lru_lock);
993 while (!list_empty(&sb->s_dentry_lru)) {
994 /*
995 * account for removal here so we don't need to handle it later
996 * even though the dentry is no longer on the lru list.
997 */
998 list_splice_init(&sb->s_dentry_lru, &tmp);
999 this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused);
1000 sb->s_nr_dentry_unused = 0;
1001 spin_unlock(&sb->s_dentry_lru_lock);
1002 990
1003 shrink_dcache_list(&tmp); 991 disposed = list_lru_dispose_all(&sb->s_dentry_lru, shrink_dcache_list);
1004 992 this_cpu_sub(nr_dentry_unused, disposed);
1005 spin_lock(&sb->s_dentry_lru_lock);
1006 }
1007 spin_unlock(&sb->s_dentry_lru_lock);
1008} 993}
1009EXPORT_SYMBOL(shrink_dcache_sb); 994EXPORT_SYMBOL(shrink_dcache_sb);
1010 995
@@ -1366,7 +1351,8 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1366 if (dentry->d_lockref.count) { 1351 if (dentry->d_lockref.count) {
1367 dentry_lru_del(dentry); 1352 dentry_lru_del(dentry);
1368 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { 1353 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1369 dentry_lru_move_list(dentry, &data->dispose); 1354 dentry_lru_del(dentry);
1355 list_add_tail(&dentry->d_lru, &data->dispose);
1370 dentry->d_flags |= DCACHE_SHRINK_LIST; 1356 dentry->d_flags |= DCACHE_SHRINK_LIST;
1371 data->found++; 1357 data->found++;
1372 ret = D_WALK_NORETRY; 1358 ret = D_WALK_NORETRY;