diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2010-03-05 16:41:55 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-06 14:26:25 -0500 |
commit | 93e4a89a8c987189b168a530a331ef6d0fcf07a7 (patch) | |
tree | deb08017c0e4874539549d3ea9bf2d7b447a43be /mm/page_alloc.c | |
parent | fc91668eaf9e7ba61e867fc2218b7e9fb67faa4f (diff) |
mm: restore zone->all_unreclaimable to independence word
commit e815af95 ("change all_unreclaimable zone member to flags") changed
all_unreclaimable member to bit flag. But it had an undesireble side
effect. free_one_page() is one of most hot path in linux kernel and
increasing atomic ops in it can reduce kernel performance a bit.
Thus, this patch revert such commit partially. at least
all_unreclaimable shouldn't share memory word with other zone flags.
[akpm@linux-foundation.org: fix patch interaction]
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Huang Shijie <shijie8@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 80bcee0c5034..0734bedabd9c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -530,7 +530,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, | |||
530 | int batch_free = 0; | 530 | int batch_free = 0; |
531 | 531 | ||
532 | spin_lock(&zone->lock); | 532 | spin_lock(&zone->lock); |
533 | zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); | 533 | zone->all_unreclaimable = 0; |
534 | zone->pages_scanned = 0; | 534 | zone->pages_scanned = 0; |
535 | 535 | ||
536 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); | 536 | __mod_zone_page_state(zone, NR_FREE_PAGES, count); |
@@ -568,7 +568,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order, | |||
568 | int migratetype) | 568 | int migratetype) |
569 | { | 569 | { |
570 | spin_lock(&zone->lock); | 570 | spin_lock(&zone->lock); |
571 | zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); | 571 | zone->all_unreclaimable = 0; |
572 | zone->pages_scanned = 0; | 572 | zone->pages_scanned = 0; |
573 | 573 | ||
574 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); | 574 | __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); |
@@ -2262,7 +2262,7 @@ void show_free_areas(void) | |||
2262 | K(zone_page_state(zone, NR_BOUNCE)), | 2262 | K(zone_page_state(zone, NR_BOUNCE)), |
2263 | K(zone_page_state(zone, NR_WRITEBACK_TEMP)), | 2263 | K(zone_page_state(zone, NR_WRITEBACK_TEMP)), |
2264 | zone->pages_scanned, | 2264 | zone->pages_scanned, |
2265 | (zone_is_all_unreclaimable(zone) ? "yes" : "no") | 2265 | (zone->all_unreclaimable ? "yes" : "no") |
2266 | ); | 2266 | ); |
2267 | printk("lowmem_reserve[]:"); | 2267 | printk("lowmem_reserve[]:"); |
2268 | for (i = 0; i < MAX_NR_ZONES; i++) | 2268 | for (i = 0; i < MAX_NR_ZONES; i++) |