aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 02:31:13 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:46 -0400
commit2f1b6248682f8b39ca3c7e549dfc216d26c4109b (patch)
tree2340347d10fd0e564fb8527efe3ffbcb216e1906
parent98d2b0ebda72fc39cdefd3720d50b9b3ce409085 (diff)
[PATCH] reduce MAX_NR_ZONES: use enum to define zones, reformat and comment
Use enum for zones and reformat zones dependent information Add comments explaning the use of zones and add a zones_t type for zone numbers. Line up information that will be #ifdefd by the following patches. [akpm@osdl.org: comment cleanups] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/mm.h7
-rw-r--r--include/linux/mmzone.h66
-rw-r--r--mm/page_alloc.c24
3 files changed, 69 insertions, 28 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 45678b036955..2db4229a0066 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -470,7 +470,7 @@ void split_page(struct page *page, unsigned int order);
470#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 470#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
471#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) 471#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
472 472
473static inline unsigned long page_zonenum(struct page *page) 473static inline enum zone_type page_zonenum(struct page *page)
474{ 474{
475 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 475 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
476} 476}
@@ -499,11 +499,12 @@ static inline unsigned long page_to_section(struct page *page)
499 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 499 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
500} 500}
501 501
502static inline void set_page_zone(struct page *page, unsigned long zone) 502static inline void set_page_zone(struct page *page, enum zone_type zone)
503{ 503{
504 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 504 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
505 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 505 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
506} 506}
507
507static inline void set_page_node(struct page *page, unsigned long node) 508static inline void set_page_node(struct page *page, unsigned long node)
508{ 509{
509 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 510 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
@@ -515,7 +516,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
515 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 516 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
516} 517}
517 518
518static inline void set_page_links(struct page *page, unsigned long zone, 519static inline void set_page_links(struct page *page, enum zone_type zone,
519 unsigned long node, unsigned long pfn) 520 unsigned long node, unsigned long pfn)
520{ 521{
521 set_page_zone(page, zone); 522 set_page_zone(page, zone);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f45163c528e8..03a5a6eb0ffa 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -88,14 +88,53 @@ struct per_cpu_pageset {
88#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) 88#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
89#endif 89#endif
90 90
91#define ZONE_DMA 0 91enum zone_type {
92#define ZONE_DMA32 1 92 /*
93#define ZONE_NORMAL 2 93 * ZONE_DMA is used when there are devices that are not able
94#define ZONE_HIGHMEM 3 94 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
95 * carve out the portion of memory that is needed for these devices.
96 * The range is arch specific.
97 *
98 * Some examples
99 *
100 * Architecture Limit
101 * ---------------------------
102 * parisc, ia64, sparc <4G
103 * s390 <2G
104 * arm26 <48M
105 * arm Various
106 * alpha Unlimited or 0-16MB.
107 *
108 * i386, x86_64 and multiple other arches
109 * <16M.
110 */
111 ZONE_DMA,
112 /*
113 * x86_64 needs two ZONE_DMAs because it supports devices that are
114 * only able to do DMA to the lower 16M but also 32 bit devices that
115 * can only do DMA areas below 4G.
116 */
117 ZONE_DMA32,
118 /*
119 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
120 * performed on pages in ZONE_NORMAL if the DMA devices support
121 * transfers to all addressable memory.
122 */
123 ZONE_NORMAL,
124 /*
125 * A memory area that is only addressable by the kernel through
126 * mapping portions into its own address space. This is for example
127 * used by i386 to allow the kernel to address the memory beyond
128 * 900MB. The kernel will set up special mappings (page
129 * table entries on i386) for each page that the kernel needs to
130 * access.
131 */
132 ZONE_HIGHMEM,
95 133
96#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ 134 MAX_NR_ZONES
97#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ 135};
98 136
137#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
99 138
100/* 139/*
101 * When a memory allocation must conform to specific limitations (such 140 * When a memory allocation must conform to specific limitations (such
@@ -126,16 +165,6 @@ struct per_cpu_pageset {
126/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */ 165/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
127#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ 166#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
128 167
129/*
130 * On machines where it is needed (eg PCs) we divide physical memory
131 * into multiple physical zones. On a 32bit PC we have 4 zones:
132 *
133 * ZONE_DMA < 16 MB ISA DMA capable memory
134 * ZONE_DMA32 0 MB Empty
135 * ZONE_NORMAL 16-896 MB direct mapped by the kernel
136 * ZONE_HIGHMEM > 896 MB only page cache and user processes
137 */
138
139struct zone { 168struct zone {
140 /* Fields commonly accessed by the page allocator */ 169 /* Fields commonly accessed by the page allocator */
141 unsigned long free_pages; 170 unsigned long free_pages;
@@ -266,7 +295,6 @@ struct zone {
266 char *name; 295 char *name;
267} ____cacheline_internodealigned_in_smp; 296} ____cacheline_internodealigned_in_smp;
268 297
269
270/* 298/*
271 * The "priority" of VM scanning is how much of the queues we will scan in one 299 * The "priority" of VM scanning is how much of the queues we will scan in one
272 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 300 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
@@ -373,12 +401,12 @@ static inline int populated_zone(struct zone *zone)
373 return (!!zone->present_pages); 401 return (!!zone->present_pages);
374} 402}
375 403
376static inline int is_highmem_idx(int idx) 404static inline int is_highmem_idx(enum zone_type idx)
377{ 405{
378 return (idx == ZONE_HIGHMEM); 406 return (idx == ZONE_HIGHMEM);
379} 407}
380 408
381static inline int is_normal_idx(int idx) 409static inline int is_normal_idx(enum zone_type idx)
382{ 410{
383 return (idx == ZONE_NORMAL); 411 return (idx == ZONE_NORMAL);
384} 412}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 13c102b95c57..2410a3cb1c53 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -68,7 +68,11 @@ static void __free_pages_ok(struct page *page, unsigned int order);
68 * TBD: should special case ZONE_DMA32 machines here - in those we normally 68 * TBD: should special case ZONE_DMA32 machines here - in those we normally
69 * don't need any ZONE_NORMAL reservation 69 * don't need any ZONE_NORMAL reservation
70 */ 70 */
71int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = { 256, 256, 32 }; 71int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
72 256,
73 256,
74 32
75};
72 76
73EXPORT_SYMBOL(totalram_pages); 77EXPORT_SYMBOL(totalram_pages);
74 78
@@ -79,7 +83,13 @@ EXPORT_SYMBOL(totalram_pages);
79struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly; 83struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
80EXPORT_SYMBOL(zone_table); 84EXPORT_SYMBOL(zone_table);
81 85
82static char *zone_names[MAX_NR_ZONES] = { "DMA", "DMA32", "Normal", "HighMem" }; 86static char *zone_names[MAX_NR_ZONES] = {
87 "DMA",
88 "DMA32",
89 "Normal",
90 "HighMem"
91};
92
83int min_free_kbytes = 1024; 93int min_free_kbytes = 1024;
84 94
85unsigned long __meminitdata nr_kernel_pages; 95unsigned long __meminitdata nr_kernel_pages;
@@ -1487,7 +1497,9 @@ static void __meminit build_zonelists(pg_data_t *pgdat)
1487 1497
1488static void __meminit build_zonelists(pg_data_t *pgdat) 1498static void __meminit build_zonelists(pg_data_t *pgdat)
1489{ 1499{
1490 int i, j, k, node, local_node; 1500 int i, node, local_node;
1501 enum zone_type k;
1502 enum zone_type j;
1491 1503
1492 local_node = pgdat->node_id; 1504 local_node = pgdat->node_id;
1493 for (i = 0; i < GFP_ZONETYPES; i++) { 1505 for (i = 0; i < GFP_ZONETYPES; i++) {
@@ -1675,8 +1687,8 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
1675} 1687}
1676 1688
1677#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr) 1689#define ZONETABLE_INDEX(x, zone_nr) ((x << ZONES_SHIFT) | zone_nr)
1678void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn, 1690void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
1679 unsigned long size) 1691 unsigned long pfn, unsigned long size)
1680{ 1692{
1681 unsigned long snum = pfn_to_section_nr(pfn); 1693 unsigned long snum = pfn_to_section_nr(pfn);
1682 unsigned long end = pfn_to_section_nr(pfn + size); 1694 unsigned long end = pfn_to_section_nr(pfn + size);
@@ -1960,7 +1972,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
1960static void __meminit free_area_init_core(struct pglist_data *pgdat, 1972static void __meminit free_area_init_core(struct pglist_data *pgdat,
1961 unsigned long *zones_size, unsigned long *zholes_size) 1973 unsigned long *zones_size, unsigned long *zholes_size)
1962{ 1974{
1963 unsigned long j; 1975 enum zone_type j;
1964 int nid = pgdat->node_id; 1976 int nid = pgdat->node_id;
1965 unsigned long zone_start_pfn = pgdat->node_start_pfn; 1977 unsigned long zone_start_pfn = pgdat->node_start_pfn;
1966 int ret; 1978 int ret;