diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2014-04-03 17:47:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 19:21:01 -0400 |
commit | 449dd6984d0e47643c04c807f609dd56d48d5bcc (patch) | |
tree | 69f4a0a90290b048e63effc617c8ec907e8d6696 /mm/workingset.c | |
parent | 139e561660fe11e0fc35e142a800df3dd7d03e9d (diff) |
mm: keep page cache radix tree nodes in check
Previously, page cache radix tree nodes were freed after reclaim emptied
out their page pointers. But now reclaim stores shadow entries in their
place, which are only reclaimed when the inodes themselves are
reclaimed. This is problematic for bigger files that are still in use
after they have a significant amount of their cache reclaimed, without
any of those pages actually refaulting. The shadow entries will just
sit there and waste memory. In the worst case, the shadow entries will
accumulate until the machine runs out of memory.
To get this under control, the VM will track radix tree nodes
exclusively containing shadow entries on a per-NUMA node list. Per-NUMA
rather than global because we expect the radix tree nodes themselves to
be allocated node-locally and we want to reduce cross-node references of
otherwise independent cache workloads. A simple shrinker will then
reclaim these nodes on memory pressure.
A few things need to be stored in the radix tree node to implement the
shadow node LRU and allow tree deletions coming from the list:
1. There is no index available that would describe the reverse path
from the node up to the tree root, which is needed to perform a
deletion. To solve this, encode in each node its offset inside the
parent. This can be stored in the unused upper bits of the same
member that stores the node's height at no extra space cost.
2. The number of shadow entries needs to be counted in addition to the
regular entries, to quickly detect when the node is ready to go to
the shadow node LRU list. The current entry count is an unsigned
int but the maximum number of entries is 64, so a shadow counter
can easily be stored in the unused upper bits.
3. Tree modification needs tree lock and tree root, which are located
in the address space, so store an address_space backpointer in the
node. The parent pointer of the node is in a union with the 2-word
rcu_head, so the backpointer comes at no extra cost as well.
4. The node needs to be linked to an LRU list, which requires a list
head inside the node. This does increase the size of the node, but
it does not change the number of objects that fit into a slab page.
[akpm@linux-foundation.org: export the right function]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan@kernel.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Bob Liu <bob.liu@oracle.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jan Kara <jack@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Luigi Semenzato <semenzato@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Metin Doslu <metin@citusdata.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Ozgun Erdogan <ozgun@citusdata.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Roman Gushchin <klamm@yandex-team.ru>
Cc: Ryan Mallon <rmallon@gmail.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/workingset.c')
-rw-r--r-- | mm/workingset.c | 161 |
1 files changed, 161 insertions, 0 deletions
diff --git a/mm/workingset.c b/mm/workingset.c index 8a6c7cff4923..f7216fa7da27 100644 --- a/mm/workingset.c +++ b/mm/workingset.c | |||
@@ -251,3 +251,164 @@ void workingset_activation(struct page *page) | |||
251 | { | 251 | { |
252 | atomic_long_inc(&page_zone(page)->inactive_age); | 252 | atomic_long_inc(&page_zone(page)->inactive_age); |
253 | } | 253 | } |
254 | |||
255 | /* | ||
256 | * Shadow entries reflect the share of the working set that does not | ||
257 | * fit into memory, so their number depends on the access pattern of | ||
258 | * the workload. In most cases, they will refault or get reclaimed | ||
259 | * along with the inode, but a (malicious) workload that streams | ||
260 | * through files with a total size several times that of available | ||
261 | * memory, while preventing the inodes from being reclaimed, can | ||
262 | * create excessive amounts of shadow nodes. To keep a lid on this, | ||
263 | * track shadow nodes and reclaim them when they grow way past the | ||
264 | * point where they would still be useful. | ||
265 | */ | ||
266 | |||
267 | struct list_lru workingset_shadow_nodes; | ||
268 | |||
269 | static unsigned long count_shadow_nodes(struct shrinker *shrinker, | ||
270 | struct shrink_control *sc) | ||
271 | { | ||
272 | unsigned long shadow_nodes; | ||
273 | unsigned long max_nodes; | ||
274 | unsigned long pages; | ||
275 | |||
276 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ | ||
277 | local_irq_disable(); | ||
278 | shadow_nodes = list_lru_count_node(&workingset_shadow_nodes, sc->nid); | ||
279 | local_irq_enable(); | ||
280 | |||
281 | pages = node_present_pages(sc->nid); | ||
282 | /* | ||
283 | * Active cache pages are limited to 50% of memory, and shadow | ||
284 | * entries that represent a refault distance bigger than that | ||
285 | * do not have any effect. Limit the number of shadow nodes | ||
286 | * such that shadow entries do not exceed the number of active | ||
287 | * cache pages, assuming a worst-case node population density | ||
288 | * of 1/8th on average. | ||
289 | * | ||
290 | * On 64-bit with 7 radix_tree_nodes per page and 64 slots | ||
291 | * each, this will reclaim shadow entries when they consume | ||
292 | * ~2% of available memory: | ||
293 | * | ||
294 | * PAGE_SIZE / radix_tree_nodes / node_entries / PAGE_SIZE | ||
295 | */ | ||
296 | max_nodes = pages >> (1 + RADIX_TREE_MAP_SHIFT - 3); | ||
297 | |||
298 | if (shadow_nodes <= max_nodes) | ||
299 | return 0; | ||
300 | |||
301 | return shadow_nodes - max_nodes; | ||
302 | } | ||
303 | |||
304 | static enum lru_status shadow_lru_isolate(struct list_head *item, | ||
305 | spinlock_t *lru_lock, | ||
306 | void *arg) | ||
307 | { | ||
308 | struct address_space *mapping; | ||
309 | struct radix_tree_node *node; | ||
310 | unsigned int i; | ||
311 | int ret; | ||
312 | |||
313 | /* | ||
314 | * Page cache insertions and deletions synchroneously maintain | ||
315 | * the shadow node LRU under the mapping->tree_lock and the | ||
316 | * lru_lock. Because the page cache tree is emptied before | ||
317 | * the inode can be destroyed, holding the lru_lock pins any | ||
318 | * address_space that has radix tree nodes on the LRU. | ||
319 | * | ||
320 | * We can then safely transition to the mapping->tree_lock to | ||
321 | * pin only the address_space of the particular node we want | ||
322 | * to reclaim, take the node off-LRU, and drop the lru_lock. | ||
323 | */ | ||
324 | |||
325 | node = container_of(item, struct radix_tree_node, private_list); | ||
326 | mapping = node->private_data; | ||
327 | |||
328 | /* Coming from the list, invert the lock order */ | ||
329 | if (!spin_trylock(&mapping->tree_lock)) { | ||
330 | spin_unlock(lru_lock); | ||
331 | ret = LRU_RETRY; | ||
332 | goto out; | ||
333 | } | ||
334 | |||
335 | list_del_init(item); | ||
336 | spin_unlock(lru_lock); | ||
337 | |||
338 | /* | ||
339 | * The nodes should only contain one or more shadow entries, | ||
340 | * no pages, so we expect to be able to remove them all and | ||
341 | * delete and free the empty node afterwards. | ||
342 | */ | ||
343 | |||
344 | BUG_ON(!node->count); | ||
345 | BUG_ON(node->count & RADIX_TREE_COUNT_MASK); | ||
346 | |||
347 | for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { | ||
348 | if (node->slots[i]) { | ||
349 | BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); | ||
350 | node->slots[i] = NULL; | ||
351 | BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); | ||
352 | node->count -= 1U << RADIX_TREE_COUNT_SHIFT; | ||
353 | BUG_ON(!mapping->nrshadows); | ||
354 | mapping->nrshadows--; | ||
355 | } | ||
356 | } | ||
357 | BUG_ON(node->count); | ||
358 | inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM); | ||
359 | if (!__radix_tree_delete_node(&mapping->page_tree, node)) | ||
360 | BUG(); | ||
361 | |||
362 | spin_unlock(&mapping->tree_lock); | ||
363 | ret = LRU_REMOVED_RETRY; | ||
364 | out: | ||
365 | local_irq_enable(); | ||
366 | cond_resched(); | ||
367 | local_irq_disable(); | ||
368 | spin_lock(lru_lock); | ||
369 | return ret; | ||
370 | } | ||
371 | |||
372 | static unsigned long scan_shadow_nodes(struct shrinker *shrinker, | ||
373 | struct shrink_control *sc) | ||
374 | { | ||
375 | unsigned long ret; | ||
376 | |||
377 | /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ | ||
378 | local_irq_disable(); | ||
379 | ret = list_lru_walk_node(&workingset_shadow_nodes, sc->nid, | ||
380 | shadow_lru_isolate, NULL, &sc->nr_to_scan); | ||
381 | local_irq_enable(); | ||
382 | return ret; | ||
383 | } | ||
384 | |||
385 | static struct shrinker workingset_shadow_shrinker = { | ||
386 | .count_objects = count_shadow_nodes, | ||
387 | .scan_objects = scan_shadow_nodes, | ||
388 | .seeks = DEFAULT_SEEKS, | ||
389 | .flags = SHRINKER_NUMA_AWARE, | ||
390 | }; | ||
391 | |||
392 | /* | ||
393 | * Our list_lru->lock is IRQ-safe as it nests inside the IRQ-safe | ||
394 | * mapping->tree_lock. | ||
395 | */ | ||
396 | static struct lock_class_key shadow_nodes_key; | ||
397 | |||
398 | static int __init workingset_init(void) | ||
399 | { | ||
400 | int ret; | ||
401 | |||
402 | ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); | ||
403 | if (ret) | ||
404 | goto err; | ||
405 | ret = register_shrinker(&workingset_shadow_shrinker); | ||
406 | if (ret) | ||
407 | goto err_list_lru; | ||
408 | return 0; | ||
409 | err_list_lru: | ||
410 | list_lru_destroy(&workingset_shadow_nodes); | ||
411 | err: | ||
412 | return ret; | ||
413 | } | ||
414 | module_init(workingset_init); | ||