aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-03-05 16:41:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:25 -0500
commit93e4a89a8c987189b168a530a331ef6d0fcf07a7 (patch)
treedeb08017c0e4874539549d3ea9bf2d7b447a43be
parentfc91668eaf9e7ba61e867fc2218b7e9fb67faa4f (diff)
mm: restore zone->all_unreclaimable to independence word
commit e815af95 ("change all_unreclaimable zone member to flags") changed all_unreclaimable member to bit flag. But it had an undesireble side effect. free_one_page() is one of most hot path in linux kernel and increasing atomic ops in it can reduce kernel performance a bit. Thus, this patch revert such commit partially. at least all_unreclaimable shouldn't share memory word with other zone flags. [akpm@linux-foundation.org: fix patch interaction] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Huang Shijie <shijie8@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h7
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/vmscan.c22
-rw-r--r--mm/vmstat.c2
4 files changed, 14 insertions, 23 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a01a103341bd..bc209d8b7b5c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -306,6 +306,7 @@ struct zone {
306 * free areas of different sizes 306 * free areas of different sizes
307 */ 307 */
308 spinlock_t lock; 308 spinlock_t lock;
309 int all_unreclaimable; /* All pages pinned */
309#ifdef CONFIG_MEMORY_HOTPLUG 310#ifdef CONFIG_MEMORY_HOTPLUG
310 /* see spanned/present_pages for more description */ 311 /* see spanned/present_pages for more description */
311 seqlock_t span_seqlock; 312 seqlock_t span_seqlock;
@@ -417,7 +418,6 @@ struct zone {
417} ____cacheline_internodealigned_in_smp; 418} ____cacheline_internodealigned_in_smp;
418 419
419typedef enum { 420typedef enum {
420 ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */
421 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 421 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
422 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 422 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
423} zone_flags_t; 423} zone_flags_t;
@@ -437,11 +437,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
437 clear_bit(flag, &zone->flags); 437 clear_bit(flag, &zone->flags);
438} 438}
439 439
440static inline int zone_is_all_unreclaimable(const struct zone *zone)
441{
442 return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
443}
444
445static inline int zone_is_reclaim_locked(const struct zone *zone) 440static inline int zone_is_reclaim_locked(const struct zone *zone)
446{ 441{
447 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 442 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 80bcee0c5034..0734bedabd9c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -530,7 +530,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
530 int batch_free = 0; 530 int batch_free = 0;
531 531
532 spin_lock(&zone->lock); 532 spin_lock(&zone->lock);
533 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 533 zone->all_unreclaimable = 0;
534 zone->pages_scanned = 0; 534 zone->pages_scanned = 0;
535 535
536 __mod_zone_page_state(zone, NR_FREE_PAGES, count); 536 __mod_zone_page_state(zone, NR_FREE_PAGES, count);
@@ -568,7 +568,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
568 int migratetype) 568 int migratetype)
569{ 569{
570 spin_lock(&zone->lock); 570 spin_lock(&zone->lock);
571 zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); 571 zone->all_unreclaimable = 0;
572 zone->pages_scanned = 0; 572 zone->pages_scanned = 0;
573 573
574 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order); 574 __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
@@ -2262,7 +2262,7 @@ void show_free_areas(void)
2262 K(zone_page_state(zone, NR_BOUNCE)), 2262 K(zone_page_state(zone, NR_BOUNCE)),
2263 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 2263 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
2264 zone->pages_scanned, 2264 zone->pages_scanned,
2265 (zone_is_all_unreclaimable(zone) ? "yes" : "no") 2265 (zone->all_unreclaimable ? "yes" : "no")
2266 ); 2266 );
2267 printk("lowmem_reserve[]:"); 2267 printk("lowmem_reserve[]:");
2268 for (i = 0; i < MAX_NR_ZONES; i++) 2268 for (i = 0; i < MAX_NR_ZONES; i++)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bc0f8db8340f..5cbf64dd79c1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1699,8 +1699,7 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1699 continue; 1699 continue;
1700 note_zone_scanning_priority(zone, priority); 1700 note_zone_scanning_priority(zone, priority);
1701 1701
1702 if (zone_is_all_unreclaimable(zone) && 1702 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
1703 priority != DEF_PRIORITY)
1704 continue; /* Let kswapd poll it */ 1703 continue; /* Let kswapd poll it */
1705 sc->all_unreclaimable = 0; 1704 sc->all_unreclaimable = 0;
1706 } else { 1705 } else {
@@ -1927,7 +1926,7 @@ static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
1927 if (!populated_zone(zone)) 1926 if (!populated_zone(zone))
1928 continue; 1927 continue;
1929 1928
1930 if (zone_is_all_unreclaimable(zone)) 1929 if (zone->all_unreclaimable)
1931 continue; 1930 continue;
1932 1931
1933 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone), 1932 if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
@@ -2017,8 +2016,7 @@ loop_again:
2017 if (!populated_zone(zone)) 2016 if (!populated_zone(zone))
2018 continue; 2017 continue;
2019 2018
2020 if (zone_is_all_unreclaimable(zone) && 2019 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2021 priority != DEF_PRIORITY)
2022 continue; 2020 continue;
2023 2021
2024 /* 2022 /*
@@ -2061,8 +2059,7 @@ loop_again:
2061 if (!populated_zone(zone)) 2059 if (!populated_zone(zone))
2062 continue; 2060 continue;
2063 2061
2064 if (zone_is_all_unreclaimable(zone) && 2062 if (zone->all_unreclaimable && priority != DEF_PRIORITY)
2065 priority != DEF_PRIORITY)
2066 continue; 2063 continue;
2067 2064
2068 temp_priority[i] = priority; 2065 temp_priority[i] = priority;
@@ -2089,12 +2086,11 @@ loop_again:
2089 lru_pages); 2086 lru_pages);
2090 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2087 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2091 total_scanned += sc.nr_scanned; 2088 total_scanned += sc.nr_scanned;
2092 if (zone_is_all_unreclaimable(zone)) 2089 if (zone->all_unreclaimable)
2093 continue; 2090 continue;
2094 if (nr_slab == 0 && zone->pages_scanned >= 2091 if (nr_slab == 0 &&
2095 (zone_reclaimable_pages(zone) * 6)) 2092 zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
2096 zone_set_flag(zone, 2093 zone->all_unreclaimable = 1;
2097 ZONE_ALL_UNRECLAIMABLE);
2098 /* 2094 /*
2099 * If we've done a decent amount of scanning and 2095 * If we've done a decent amount of scanning and
2100 * the reclaim ratio is low, start doing writepage 2096 * the reclaim ratio is low, start doing writepage
@@ -2624,7 +2620,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2624 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) 2620 zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
2625 return ZONE_RECLAIM_FULL; 2621 return ZONE_RECLAIM_FULL;
2626 2622
2627 if (zone_is_all_unreclaimable(zone)) 2623 if (zone->all_unreclaimable)
2628 return ZONE_RECLAIM_FULL; 2624 return ZONE_RECLAIM_FULL;
2629 2625
2630 /* 2626 /*
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fc5aa183bc45..7f760cbc73f3 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -763,7 +763,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
763 "\n prev_priority: %i" 763 "\n prev_priority: %i"
764 "\n start_pfn: %lu" 764 "\n start_pfn: %lu"
765 "\n inactive_ratio: %u", 765 "\n inactive_ratio: %u",
766 zone_is_all_unreclaimable(zone), 766 zone->all_unreclaimable,
767 zone->prev_priority, 767 zone->prev_priority,
768 zone->zone_start_pfn, 768 zone->zone_start_pfn,
769 zone->inactive_ratio); 769 zone->inactive_ratio);