aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-08-17 18:49:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 19:20:32 -0400
commit6b51e88199ca4f75ff647eff28efd30bfcb08dc4 (patch)
tree6c1411444d28a3a77c36bfddb92576b43469245f
parent6e018968f8d384d84484eba8e4c90489a25d7095 (diff)
mm/list_lru: introduce list_lru_shrink_walk_irq()
Provide list_lru_shrink_walk_irq() and let it behave like list_lru_walk_one() except that it locks the spinlock with spin_lock_irq(). This is used by scan_shadow_nodes() because its lock nests within the i_pages lock which is acquired with IRQ. This change allows to use proper locking promitives instead hand crafted lock_irq_disable() plus spin_lock(). There is no EXPORT_SYMBOL provided because the current user is in-kernel only. Add list_lru_shrink_walk_irq() which acquires the spinlock with the proper locking primitives. Link: http://lkml.kernel.org/r/20180716111921.5365-5-bigeasy@linutronix.de Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/list_lru.h25
-rw-r--r--mm/list_lru.c15
-rw-r--r--mm/workingset.c8
3 files changed, 42 insertions, 6 deletions
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index d9c16f2f2f00..aa5efd9351eb 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -166,6 +166,23 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
166 int nid, struct mem_cgroup *memcg, 166 int nid, struct mem_cgroup *memcg,
167 list_lru_walk_cb isolate, void *cb_arg, 167 list_lru_walk_cb isolate, void *cb_arg,
168 unsigned long *nr_to_walk); 168 unsigned long *nr_to_walk);
169/**
170 * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
171 * @lru: the lru pointer.
172 * @nid: the node id to scan from.
173 * @memcg: the cgroup to scan from.
174 * @isolate: callback function that is resposible for deciding what to do with
175 * the item currently being scanned
176 * @cb_arg: opaque type that will be passed to @isolate
177 * @nr_to_walk: how many items to scan.
178 *
179 * Same as @list_lru_walk_one except that the spinlock is acquired with
180 * spin_lock_irq().
181 */
182unsigned long list_lru_walk_one_irq(struct list_lru *lru,
183 int nid, struct mem_cgroup *memcg,
184 list_lru_walk_cb isolate, void *cb_arg,
185 unsigned long *nr_to_walk);
169unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 186unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
170 list_lru_walk_cb isolate, void *cb_arg, 187 list_lru_walk_cb isolate, void *cb_arg,
171 unsigned long *nr_to_walk); 188 unsigned long *nr_to_walk);
@@ -179,6 +196,14 @@ list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
179} 196}
180 197
181static inline unsigned long 198static inline unsigned long
199list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
200 list_lru_walk_cb isolate, void *cb_arg)
201{
202 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
203 &sc->nr_to_scan);
204}
205
206static inline unsigned long
182list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, 207list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
183 void *cb_arg, unsigned long nr_to_walk) 208 void *cb_arg, unsigned long nr_to_walk)
184{ 209{
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f5c6a2d1ea66..5b30625fd365 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -282,6 +282,21 @@ list_lru_walk_one(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
282} 282}
283EXPORT_SYMBOL_GPL(list_lru_walk_one); 283EXPORT_SYMBOL_GPL(list_lru_walk_one);
284 284
285unsigned long
286list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg,
287 list_lru_walk_cb isolate, void *cb_arg,
288 unsigned long *nr_to_walk)
289{
290 struct list_lru_node *nlru = &lru->node[nid];
291 unsigned long ret;
292
293 spin_lock_irq(&nlru->lock);
294 ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg,
295 nr_to_walk);
296 spin_unlock_irq(&nlru->lock);
297 return ret;
298}
299
285unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 300unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
286 list_lru_walk_cb isolate, void *cb_arg, 301 list_lru_walk_cb isolate, void *cb_arg,
287 unsigned long *nr_to_walk) 302 unsigned long *nr_to_walk)
diff --git a/mm/workingset.c b/mm/workingset.c
index bc72ad029b3e..4516dd790129 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -483,13 +483,9 @@ out:
483static unsigned long scan_shadow_nodes(struct shrinker *shrinker, 483static unsigned long scan_shadow_nodes(struct shrinker *shrinker,
484 struct shrink_control *sc) 484 struct shrink_control *sc)
485{ 485{
486 unsigned long ret;
487
488 /* list_lru lock nests inside the IRQ-safe i_pages lock */ 486 /* list_lru lock nests inside the IRQ-safe i_pages lock */
489 local_irq_disable(); 487 return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate,
490 ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); 488 NULL);
491 local_irq_enable();
492 return ret;
493} 489}
494 490
495static struct shrinker workingset_shadow_shrinker = { 491static struct shrinker workingset_shadow_shrinker = {