aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2018-02-23 21:54:18 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2018-03-29 15:07:02 -0400
commit3b3f09f48ba78c0634e929849860a6447d057eed (patch)
tree6f1ee9debc0343c409b2d6158e9f50c42127e8fe
parentc19457f0aed7fae73bb40e68ffcc72f36e3966a5 (diff)
get rid of trylock loop in locking dentries on shrink list
In case of trylock failure don't re-add to the list - drop the locks and carefully get them in the right order. For shrink_dentry_list(), somebody having grabbed a reference to dentry means that we can kick it off-list, so if we find dentry being modified under us we don't need to play silly buggers with retries anyway - off the list it is. The locking logics taken out into a helper of its own; lock_parent() is no longer used for dentries that can be killed under us. [fix from Eric Biggers folded] Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--fs/dcache.c104
1 files changed, 67 insertions, 37 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index 1684b6b262de..af8501489af5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -974,56 +974,86 @@ restart:
974} 974}
975EXPORT_SYMBOL(d_prune_aliases); 975EXPORT_SYMBOL(d_prune_aliases);
976 976
977static void shrink_dentry_list(struct list_head *list) 977/*
978 * Lock a dentry from shrink list.
979 * Note that dentry is *not* protected from concurrent dentry_kill(),
980 * d_delete(), etc. It is protected from freeing (by the fact of
981 * being on a shrink list), but everything else is fair game.
982 * Return false if dentry has been disrupted or grabbed, leaving
983 * the caller to kick it off-list. Otherwise, return true and have
984 * that dentry's inode and parent both locked.
985 */
986static bool shrink_lock_dentry(struct dentry *dentry)
978{ 987{
979 struct dentry *dentry, *parent; 988 struct inode *inode;
989 struct dentry *parent;
980 990
981 while (!list_empty(list)) { 991 if (dentry->d_lockref.count)
982 struct inode *inode; 992 return false;
983 dentry = list_entry(list->prev, struct dentry, d_lru); 993
994 inode = dentry->d_inode;
995 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
996 rcu_read_lock(); /* to protect inode */
997 spin_unlock(&dentry->d_lock);
998 spin_lock(&inode->i_lock);
984 spin_lock(&dentry->d_lock); 999 spin_lock(&dentry->d_lock);
985 parent = lock_parent(dentry); 1000 if (unlikely(dentry->d_lockref.count))
1001 goto out;
1002 /* changed inode means that somebody had grabbed it */
1003 if (unlikely(inode != dentry->d_inode))
1004 goto out;
1005 rcu_read_unlock();
1006 }
986 1007
987 /* 1008 parent = dentry->d_parent;
988 * The dispose list is isolated and dentries are not accounted 1009 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
989 * to the LRU here, so we can simply remove it from the list 1010 return true;
990 * here regardless of whether it is referenced or not.
991 */
992 d_shrink_del(dentry);
993 1011
994 /* 1012 rcu_read_lock(); /* to protect parent */
995 * We found an inuse dentry which was not removed from 1013 spin_unlock(&dentry->d_lock);
996 * the LRU because of laziness during lookup. Do not free it. 1014 parent = READ_ONCE(dentry->d_parent);
997 */ 1015 spin_lock(&parent->d_lock);
998 if (dentry->d_lockref.count > 0) { 1016 if (unlikely(parent != dentry->d_parent)) {
999 spin_unlock(&dentry->d_lock); 1017 spin_unlock(&parent->d_lock);
1000 if (parent) 1018 spin_lock(&dentry->d_lock);
1001 spin_unlock(&parent->d_lock); 1019 goto out;
1002 continue; 1020 }
1003 } 1021 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1022 if (likely(!dentry->d_lockref.count)) {
1023 rcu_read_unlock();
1024 return true;
1025 }
1026 spin_unlock(&parent->d_lock);
1027out:
1028 if (inode)
1029 spin_unlock(&inode->i_lock);
1030 rcu_read_unlock();
1031 return false;
1032}
1004 1033
1034static void shrink_dentry_list(struct list_head *list)
1035{
1036 while (!list_empty(list)) {
1037 struct dentry *dentry, *parent;
1038 struct inode *inode;
1005 1039
1006 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) { 1040 dentry = list_entry(list->prev, struct dentry, d_lru);
1007 bool can_free = dentry->d_flags & DCACHE_MAY_FREE; 1041 spin_lock(&dentry->d_lock);
1042 if (!shrink_lock_dentry(dentry)) {
1043 bool can_free = false;
1044 d_shrink_del(dentry);
1045 if (dentry->d_lockref.count < 0)
1046 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1008 spin_unlock(&dentry->d_lock); 1047 spin_unlock(&dentry->d_lock);
1009 if (parent)
1010 spin_unlock(&parent->d_lock);
1011 if (can_free) 1048 if (can_free)
1012 dentry_free(dentry); 1049 dentry_free(dentry);
1013 continue; 1050 continue;
1014 } 1051 }
1015 1052 d_shrink_del(dentry);
1016 inode = dentry->d_inode; 1053 parent = dentry->d_parent;
1017 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1018 d_shrink_add(dentry, list);
1019 spin_unlock(&dentry->d_lock);
1020 if (parent)
1021 spin_unlock(&parent->d_lock);
1022 continue;
1023 }
1024
1025 __dentry_kill(dentry); 1054 __dentry_kill(dentry);
1026 1055 if (parent == dentry)
1056 continue;
1027 /* 1057 /*
1028 * We need to prune ancestors too. This is necessary to prevent 1058 * We need to prune ancestors too. This is necessary to prevent
1029 * quadratic behavior of shrink_dcache_parent(), but is also 1059 * quadratic behavior of shrink_dcache_parent(), but is also