aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-07-17 07:03:12 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-17 13:22:59 -0400
commit2a1e274acf0b1c192face19a4be7c12d4503eaaf (patch)
treef7e98e1fe19d38bb10bf178fb8f8ed1789b659b2 /include
parent769848c03895b63e5662eb7e4ec8c4866f7d0183 (diff)
Create the ZONE_MOVABLE zone
The following 8 patches against 2.6.20-mm2 create a zone called ZONE_MOVABLE that is only usable by allocations that specify both __GFP_HIGHMEM and __GFP_MOVABLE. This has the effect of keeping all non-movable pages within a single memory partition while allowing movable allocations to be satisfied from either partition. The patches may be applied with the list-based anti-fragmentation patches that groups pages together based on mobility. The size of the zone is determined by a kernelcore= parameter specified at boot-time. This specifies how much memory is usable by non-movable allocations and the remainder is used for ZONE_MOVABLE. Any range of pages within ZONE_MOVABLE can be released by migrating the pages or by reclaiming. When selecting a zone to take pages from for ZONE_MOVABLE, there are two things to consider. First, only memory from the highest populated zone is used for ZONE_MOVABLE. On the x86, this is probably going to be ZONE_HIGHMEM but it would be ZONE_DMA on ppc64 or possibly ZONE_DMA32 on x86_64. Second, the amount of memory usable by the kernel will be spread evenly throughout NUMA nodes where possible. If the nodes are not of equal size, the amount of memory usable by the kernel on some nodes may be greater than others. By default, the zone is not as useful for hugetlb allocations because they are pinned and non-migratable (currently at least). A sysctl is provided that allows huge pages to be allocated from that zone. This means that the huge page pool can be resized to the size of ZONE_MOVABLE during the lifetime of the system assuming that pages are not mlocked. Despite huge pages being non-movable, we do not introduce additional external fragmentation of note as huge pages are always the largest contiguous block we care about. Credit goes to Andy Whitcroft for catching a large variety of problems during review of the patches. This patch creates an additional zone, ZONE_MOVABLE. This zone is only usable by allocations which specify both __GFP_HIGHMEM and __GFP_MOVABLE. Hot-added memory continues to be placed in their existing destination as there is no mechanism to redirect them to a specific zone. [y-goto@jp.fujitsu.com: Fix section mismatch of memory hotplug related code] [akpm@linux-foundation.org: various fixes] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: William Lee Irwin III <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmzone.h20
-rw-r--r--include/linux/vmstat.h5
4 files changed, 25 insertions, 4 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index e5882fe49f83..bc68dd9a6d41 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -106,6 +106,9 @@ static inline enum zone_type gfp_zone(gfp_t flags)
106 if (flags & __GFP_DMA32) 106 if (flags & __GFP_DMA32)
107 return ZONE_DMA32; 107 return ZONE_DMA32;
108#endif 108#endif
109 if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
110 (__GFP_HIGHMEM | __GFP_MOVABLE))
111 return ZONE_MOVABLE;
109#ifdef CONFIG_HIGHMEM 112#ifdef CONFIG_HIGHMEM
110 if (flags & __GFP_HIGHMEM) 113 if (flags & __GFP_HIGHMEM)
111 return ZONE_HIGHMEM; 114 return ZONE_HIGHMEM;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97d0cddfd223..857e44817178 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1005,6 +1005,7 @@ extern unsigned long find_max_pfn_with_active_regions(void);
1005extern void free_bootmem_with_active_regions(int nid, 1005extern void free_bootmem_with_active_regions(int nid,
1006 unsigned long max_low_pfn); 1006 unsigned long max_low_pfn);
1007extern void sparse_memory_present_with_active_regions(int nid); 1007extern void sparse_memory_present_with_active_regions(int nid);
1008extern int cmdline_parse_kernelcore(char *p);
1008#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID 1009#ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
1009extern int early_pfn_to_nid(unsigned long pfn); 1010extern int early_pfn_to_nid(unsigned long pfn);
1010#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ 1011#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 04b1636a970b..d71ff763c9df 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -146,6 +146,7 @@ enum zone_type {
146 */ 146 */
147 ZONE_HIGHMEM, 147 ZONE_HIGHMEM,
148#endif 148#endif
149 ZONE_MOVABLE,
149 MAX_NR_ZONES 150 MAX_NR_ZONES
150}; 151};
151 152
@@ -167,6 +168,7 @@ enum zone_type {
167 + defined(CONFIG_ZONE_DMA32) \ 168 + defined(CONFIG_ZONE_DMA32) \
168 + 1 \ 169 + 1 \
169 + defined(CONFIG_HIGHMEM) \ 170 + defined(CONFIG_HIGHMEM) \
171 + 1 \
170) 172)
171#if __ZONE_COUNT < 2 173#if __ZONE_COUNT < 2
172#define ZONES_SHIFT 0 174#define ZONES_SHIFT 0
@@ -499,10 +501,22 @@ static inline int populated_zone(struct zone *zone)
499 return (!!zone->present_pages); 501 return (!!zone->present_pages);
500} 502}
501 503
504extern int movable_zone;
505
506static inline int zone_movable_is_highmem(void)
507{
508#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
509 return movable_zone == ZONE_HIGHMEM;
510#else
511 return 0;
512#endif
513}
514
502static inline int is_highmem_idx(enum zone_type idx) 515static inline int is_highmem_idx(enum zone_type idx)
503{ 516{
504#ifdef CONFIG_HIGHMEM 517#ifdef CONFIG_HIGHMEM
505 return (idx == ZONE_HIGHMEM); 518 return (idx == ZONE_HIGHMEM ||
519 (idx == ZONE_MOVABLE && zone_movable_is_highmem()));
506#else 520#else
507 return 0; 521 return 0;
508#endif 522#endif
@@ -522,7 +536,9 @@ static inline int is_normal_idx(enum zone_type idx)
522static inline int is_highmem(struct zone *zone) 536static inline int is_highmem(struct zone *zone)
523{ 537{
524#ifdef CONFIG_HIGHMEM 538#ifdef CONFIG_HIGHMEM
525 return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; 539 int zone_idx = zone - zone->zone_pgdat->node_zones;
540 return zone_idx == ZONE_HIGHMEM ||
541 (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
526#else 542#else
527 return 0; 543 return 0;
528#endif 544#endif
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index d9325cf8a134..75370ec0923e 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,7 +25,7 @@
25#define HIGHMEM_ZONE(xx) 25#define HIGHMEM_ZONE(xx)
26#endif 26#endif
27 27
28#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) 28#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
29 29
30enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 30enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
31 FOR_ALL_ZONES(PGALLOC), 31 FOR_ALL_ZONES(PGALLOC),
@@ -170,7 +170,8 @@ static inline unsigned long node_page_state(int node,
170#ifdef CONFIG_HIGHMEM 170#ifdef CONFIG_HIGHMEM
171 zone_page_state(&zones[ZONE_HIGHMEM], item) + 171 zone_page_state(&zones[ZONE_HIGHMEM], item) +
172#endif 172#endif
173 zone_page_state(&zones[ZONE_NORMAL], item); 173 zone_page_state(&zones[ZONE_NORMAL], item) +
174 zone_page_state(&zones[ZONE_MOVABLE], item);
174} 175}
175 176
176extern void zone_statistics(struct zonelist *, struct zone *); 177extern void zone_statistics(struct zonelist *, struct zone *);