diff options
author | David Rientjes <rientjes@google.com> | 2011-03-22 19:30:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 20:44:01 -0400 |
commit | ddd588b5dd55f14320379961e47683db4e4c1d90 (patch) | |
tree | 09de73c51c8c5e701e644236890a5d205ec3cdc9 /mm/page_alloc.c | |
parent | 94dcf29a11b3d20a28790598d701f98484a969da (diff) |
oom: suppress nodes that are not allowed from meminfo on oom kill
The oom killer is extremely verbose for machines with a large number of
cpus and/or nodes. This verbosity can often be harmful if it causes other
important messages to be scrolled from the kernel log and incurs a
signicant time delay, specifically for kernels with CONFIG_NODES_SHIFT >
8.
This patch causes only memory information to be displayed for nodes that
are allowed by current's cpuset when dumping the VM state. Information
for all other nodes is irrelevant to the oom condition; we don't care if
there's an abundance of memory elsewhere if we can't access it.
This only affects the behavior of dumping memory information when an oom
is triggered. Other dumps, such as for sysrq+m, still display the
unfiltered form when using the existing show_mem() interface.
Additionally, the per-cpu pageset statistics are extremely verbose in oom
killer output, so it is now suppressed. This removes
nodes_weight(current->mems_allowed) * (1 + nr_cpus)
lines from the oom killer output.
Callers may use __show_mem(SHOW_MEM_FILTER_NODES) to filter disallowed
nodes.
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 34 |
1 files changed, 33 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7945247b1e53..36be3ba4bbed 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2411,19 +2411,42 @@ void si_meminfo_node(struct sysinfo *val, int nid) | |||
2411 | } | 2411 | } |
2412 | #endif | 2412 | #endif |
2413 | 2413 | ||
2414 | /* | ||
2415 | * Determine whether the zone's node should be displayed or not, depending on | ||
2416 | * whether SHOW_MEM_FILTER_NODES was passed to __show_free_areas(). | ||
2417 | */ | ||
2418 | static bool skip_free_areas_zone(unsigned int flags, const struct zone *zone) | ||
2419 | { | ||
2420 | bool ret = false; | ||
2421 | |||
2422 | if (!(flags & SHOW_MEM_FILTER_NODES)) | ||
2423 | goto out; | ||
2424 | |||
2425 | get_mems_allowed(); | ||
2426 | ret = !node_isset(zone->zone_pgdat->node_id, | ||
2427 | cpuset_current_mems_allowed); | ||
2428 | put_mems_allowed(); | ||
2429 | out: | ||
2430 | return ret; | ||
2431 | } | ||
2432 | |||
2414 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 2433 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
2415 | 2434 | ||
2416 | /* | 2435 | /* |
2417 | * Show free area list (used inside shift_scroll-lock stuff) | 2436 | * Show free area list (used inside shift_scroll-lock stuff) |
2418 | * We also calculate the percentage fragmentation. We do this by counting the | 2437 | * We also calculate the percentage fragmentation. We do this by counting the |
2419 | * memory on each free list with the exception of the first item on the list. | 2438 | * memory on each free list with the exception of the first item on the list. |
2439 | * Suppresses nodes that are not allowed by current's cpuset if | ||
2440 | * SHOW_MEM_FILTER_NODES is passed. | ||
2420 | */ | 2441 | */ |
2421 | void show_free_areas(void) | 2442 | void __show_free_areas(unsigned int filter) |
2422 | { | 2443 | { |
2423 | int cpu; | 2444 | int cpu; |
2424 | struct zone *zone; | 2445 | struct zone *zone; |
2425 | 2446 | ||
2426 | for_each_populated_zone(zone) { | 2447 | for_each_populated_zone(zone) { |
2448 | if (skip_free_areas_zone(filter, zone)) | ||
2449 | continue; | ||
2427 | show_node(zone); | 2450 | show_node(zone); |
2428 | printk("%s per-cpu:\n", zone->name); | 2451 | printk("%s per-cpu:\n", zone->name); |
2429 | 2452 | ||
@@ -2465,6 +2488,8 @@ void show_free_areas(void) | |||
2465 | for_each_populated_zone(zone) { | 2488 | for_each_populated_zone(zone) { |
2466 | int i; | 2489 | int i; |
2467 | 2490 | ||
2491 | if (skip_free_areas_zone(filter, zone)) | ||
2492 | continue; | ||
2468 | show_node(zone); | 2493 | show_node(zone); |
2469 | printk("%s" | 2494 | printk("%s" |
2470 | " free:%lukB" | 2495 | " free:%lukB" |
@@ -2532,6 +2557,8 @@ void show_free_areas(void) | |||
2532 | for_each_populated_zone(zone) { | 2557 | for_each_populated_zone(zone) { |
2533 | unsigned long nr[MAX_ORDER], flags, order, total = 0; | 2558 | unsigned long nr[MAX_ORDER], flags, order, total = 0; |
2534 | 2559 | ||
2560 | if (skip_free_areas_zone(filter, zone)) | ||
2561 | continue; | ||
2535 | show_node(zone); | 2562 | show_node(zone); |
2536 | printk("%s: ", zone->name); | 2563 | printk("%s: ", zone->name); |
2537 | 2564 | ||
@@ -2551,6 +2578,11 @@ void show_free_areas(void) | |||
2551 | show_swap_cache_info(); | 2578 | show_swap_cache_info(); |
2552 | } | 2579 | } |
2553 | 2580 | ||
2581 | void show_free_areas(void) | ||
2582 | { | ||
2583 | __show_free_areas(0); | ||
2584 | } | ||
2585 | |||
2554 | static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) | 2586 | static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) |
2555 | { | 2587 | { |
2556 | zoneref->zone = zone; | 2588 | zoneref->zone = zone; |