aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 02:31:13 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:46 -0400
commit2f1b6248682f8b39ca3c7e549dfc216d26c4109b (patch)
tree2340347d10fd0e564fb8527efe3ffbcb216e1906 /include/linux
parent98d2b0ebda72fc39cdefd3720d50b9b3ce409085 (diff)
[PATCH] reduce MAX_NR_ZONES: use enum to define zones, reformat and comment
Use enum for zones and reformat zones dependent information Add comments explaning the use of zones and add a zones_t type for zone numbers. Line up information that will be #ifdefd by the following patches. [akpm@osdl.org: comment cleanups] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm.h7
-rw-r--r--include/linux/mmzone.h66
2 files changed, 51 insertions, 22 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 45678b036955..2db4229a0066 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -470,7 +470,7 @@ void split_page(struct page *page, unsigned int order);
470#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) 470#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
471#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) 471#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1)
472 472
473static inline unsigned long page_zonenum(struct page *page) 473static inline enum zone_type page_zonenum(struct page *page)
474{ 474{
475 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; 475 return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
476} 476}
@@ -499,11 +499,12 @@ static inline unsigned long page_to_section(struct page *page)
499 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; 499 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
500} 500}
501 501
502static inline void set_page_zone(struct page *page, unsigned long zone) 502static inline void set_page_zone(struct page *page, enum zone_type zone)
503{ 503{
504 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); 504 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
505 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; 505 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
506} 506}
507
507static inline void set_page_node(struct page *page, unsigned long node) 508static inline void set_page_node(struct page *page, unsigned long node)
508{ 509{
509 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); 510 page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
@@ -515,7 +516,7 @@ static inline void set_page_section(struct page *page, unsigned long section)
515 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; 516 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
516} 517}
517 518
518static inline void set_page_links(struct page *page, unsigned long zone, 519static inline void set_page_links(struct page *page, enum zone_type zone,
519 unsigned long node, unsigned long pfn) 520 unsigned long node, unsigned long pfn)
520{ 521{
521 set_page_zone(page, zone); 522 set_page_zone(page, zone);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f45163c528e8..03a5a6eb0ffa 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -88,14 +88,53 @@ struct per_cpu_pageset {
88#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) 88#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
89#endif 89#endif
90 90
91#define ZONE_DMA 0 91enum zone_type {
92#define ZONE_DMA32 1 92 /*
93#define ZONE_NORMAL 2 93 * ZONE_DMA is used when there are devices that are not able
94#define ZONE_HIGHMEM 3 94 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
95 * carve out the portion of memory that is needed for these devices.
96 * The range is arch specific.
97 *
98 * Some examples
99 *
100 * Architecture Limit
101 * ---------------------------
102 * parisc, ia64, sparc <4G
103 * s390 <2G
104 * arm26 <48M
105 * arm Various
106 * alpha Unlimited or 0-16MB.
107 *
108 * i386, x86_64 and multiple other arches
109 * <16M.
110 */
111 ZONE_DMA,
112 /*
113 * x86_64 needs two ZONE_DMAs because it supports devices that are
114 * only able to do DMA to the lower 16M but also 32 bit devices that
115 * can only do DMA areas below 4G.
116 */
117 ZONE_DMA32,
118 /*
119 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
120 * performed on pages in ZONE_NORMAL if the DMA devices support
121 * transfers to all addressable memory.
122 */
123 ZONE_NORMAL,
124 /*
125 * A memory area that is only addressable by the kernel through
126 * mapping portions into its own address space. This is for example
127 * used by i386 to allow the kernel to address the memory beyond
128 * 900MB. The kernel will set up special mappings (page
129 * table entries on i386) for each page that the kernel needs to
130 * access.
131 */
132 ZONE_HIGHMEM,
95 133
96#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ 134 MAX_NR_ZONES
97#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ 135};
98 136
137#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */
99 138
100/* 139/*
101 * When a memory allocation must conform to specific limitations (such 140 * When a memory allocation must conform to specific limitations (such
@@ -126,16 +165,6 @@ struct per_cpu_pageset {
126/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */ 165/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
127#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */ 166#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
128 167
129/*
130 * On machines where it is needed (eg PCs) we divide physical memory
131 * into multiple physical zones. On a 32bit PC we have 4 zones:
132 *
133 * ZONE_DMA < 16 MB ISA DMA capable memory
134 * ZONE_DMA32 0 MB Empty
135 * ZONE_NORMAL 16-896 MB direct mapped by the kernel
136 * ZONE_HIGHMEM > 896 MB only page cache and user processes
137 */
138
139struct zone { 168struct zone {
140 /* Fields commonly accessed by the page allocator */ 169 /* Fields commonly accessed by the page allocator */
141 unsigned long free_pages; 170 unsigned long free_pages;
@@ -266,7 +295,6 @@ struct zone {
266 char *name; 295 char *name;
267} ____cacheline_internodealigned_in_smp; 296} ____cacheline_internodealigned_in_smp;
268 297
269
270/* 298/*
271 * The "priority" of VM scanning is how much of the queues we will scan in one 299 * The "priority" of VM scanning is how much of the queues we will scan in one
272 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 300 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
@@ -373,12 +401,12 @@ static inline int populated_zone(struct zone *zone)
373 return (!!zone->present_pages); 401 return (!!zone->present_pages);
374} 402}
375 403
376static inline int is_highmem_idx(int idx) 404static inline int is_highmem_idx(enum zone_type idx)
377{ 405{
378 return (idx == ZONE_HIGHMEM); 406 return (idx == ZONE_HIGHMEM);
379} 407}
380 408
381static inline int is_normal_idx(int idx) 409static inline int is_normal_idx(enum zone_type idx)
382{ 410{
383 return (idx == ZONE_NORMAL); 411 return (idx == ZONE_NORMAL);
384} 412}