aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGlauber Costa <glommer@gmail.com>2013-08-27 20:18:03 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:31 -0400
commit4e717f5c1083995c334ced639cc77a75e9972567 (patch)
treef236061b46b4401913652b167798210132d611ad /mm
parent6a4f496fd2fc74fa036732ae52c184952d6e3e37 (diff)
list_lru: remove special case function list_lru_dispose_all.
The list_lru implementation has one function, list_lru_dispose_all, with only one user (the dentry code). At first, such function appears to make sense because we are really not interested in the result of isolating each dentry separately - all of them are going away anyway. However, it's implementation is buggy in the following way: When we call list_lru_dispose_all in fs/dcache.c, we scan all dentries marking them with DCACHE_SHRINK_LIST. However, this is done without the nlru->lock taken. The imediate result of that is that someone else may add or remove the dentry from the LRU at the same time. When list_lru_del happens in that scenario we will see an element that is not yet marked with DCACHE_SHRINK_LIST (even though it will be in the future) and obviously remove it from an lru where the element no longer is. Since list_lru_dispose_all will in effect count down nlru's nr_items and list_lru_del will do the same, this will lead to an imbalance. The solution for this would not be so simple: we can obviously just keep the lru_lock taken, but then we have no guarantees that we will be able to acquire the dentry lock (dentry->d_lock). To properly solve this, we need a communication mechanism between the lru and dentry code, so they can coordinate this with each other. Such mechanism already exists in the form of the list_lru_walk_cb callback. So it is possible to construct a dcache-side prune function that does the right thing only by calling list_lru_walk in a loop until no more dentries are available. With only one user, plus the fact that a sane solution for the problem would involve boucing between dcache and list_lru anyway, I see little justification to keep the special case list_lru_dispose_all in tree. Signed-off-by: Glauber Costa <glommer@openvz.org> Cc: Michal Hocko <mhocko@suse.cz> Acked-by: Dave Chinner <dchinner@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm')
-rw-r--r--mm/list_lru.c42
1 files changed, 0 insertions, 42 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 86cb55464f71..f91c24188573 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -112,48 +112,6 @@ restart:
112} 112}
113EXPORT_SYMBOL_GPL(list_lru_walk_node); 113EXPORT_SYMBOL_GPL(list_lru_walk_node);
114 114
115static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
116 list_lru_dispose_cb dispose)
117{
118 struct list_lru_node *nlru = &lru->node[nid];
119 LIST_HEAD(dispose_list);
120 unsigned long disposed = 0;
121
122 spin_lock(&nlru->lock);
123 while (!list_empty(&nlru->list)) {
124 list_splice_init(&nlru->list, &dispose_list);
125 disposed += nlru->nr_items;
126 nlru->nr_items = 0;
127 node_clear(nid, lru->active_nodes);
128 spin_unlock(&nlru->lock);
129
130 dispose(&dispose_list);
131
132 spin_lock(&nlru->lock);
133 }
134 spin_unlock(&nlru->lock);
135 return disposed;
136}
137
138unsigned long list_lru_dispose_all(struct list_lru *lru,
139 list_lru_dispose_cb dispose)
140{
141 unsigned long disposed;
142 unsigned long total = 0;
143 int nid;
144
145 do {
146 disposed = 0;
147 for_each_node_mask(nid, lru->active_nodes) {
148 disposed += list_lru_dispose_all_node(lru, nid,
149 dispose);
150 }
151 total += disposed;
152 } while (disposed != 0);
153
154 return total;
155}
156
157int list_lru_init(struct list_lru *lru) 115int list_lru_init(struct list_lru *lru)
158{ 116{
159 int i; 117 int i;