aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h120
1 files changed, 75 insertions, 45 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index f45163c528e8..3693f1a52788 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -51,7 +51,8 @@ enum zone_stat_item {
51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. 51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
52 only modified from process context */ 52 only modified from process context */
53 NR_FILE_PAGES, 53 NR_FILE_PAGES,
54 NR_SLAB, /* Pages used by slab allocator */ 54 NR_SLAB_RECLAIMABLE,
55 NR_SLAB_UNRECLAIMABLE,
55 NR_PAGETABLE, /* used for pagetables */ 56 NR_PAGETABLE, /* used for pagetables */
56 NR_FILE_DIRTY, 57 NR_FILE_DIRTY,
57 NR_WRITEBACK, 58 NR_WRITEBACK,
@@ -88,53 +89,68 @@ struct per_cpu_pageset {
88#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) 89#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
89#endif 90#endif
90 91
91#define ZONE_DMA 0 92enum zone_type {
92#define ZONE_DMA32 1 93 /*
93#define ZONE_NORMAL 2 94 * ZONE_DMA is used when there are devices that are not able
94#define ZONE_HIGHMEM 3 95 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
95 96 * carve out the portion of memory that is needed for these devices.
96#define MAX_NR_ZONES 4 /* Sync this with ZONES_SHIFT */ 97 * The range is arch specific.
97#define ZONES_SHIFT 2 /* ceil(log2(MAX_NR_ZONES)) */ 98 *
98 99 * Some examples
100 *
101 * Architecture Limit
102 * ---------------------------
103 * parisc, ia64, sparc <4G
104 * s390 <2G
105 * arm26 <48M
106 * arm Various
107 * alpha Unlimited or 0-16MB.
108 *
109 * i386, x86_64 and multiple other arches
110 * <16M.
111 */
112 ZONE_DMA,
113#ifdef CONFIG_ZONE_DMA32
114 /*
115 * x86_64 needs two ZONE_DMAs because it supports devices that are
116 * only able to do DMA to the lower 16M but also 32 bit devices that
117 * can only do DMA areas below 4G.
118 */
119 ZONE_DMA32,
120#endif
121 /*
122 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
123 * performed on pages in ZONE_NORMAL if the DMA devices support
124 * transfers to all addressable memory.
125 */
126 ZONE_NORMAL,
127#ifdef CONFIG_HIGHMEM
128 /*
129 * A memory area that is only addressable by the kernel through
130 * mapping portions into its own address space. This is for example
131 * used by i386 to allow the kernel to address the memory beyond
132 * 900MB. The kernel will set up special mappings (page
133 * table entries on i386) for each page that the kernel needs to
134 * access.
135 */
136 ZONE_HIGHMEM,
137#endif
138 MAX_NR_ZONES
139};
99 140
100/* 141/*
101 * When a memory allocation must conform to specific limitations (such 142 * When a memory allocation must conform to specific limitations (such
102 * as being suitable for DMA) the caller will pass in hints to the 143 * as being suitable for DMA) the caller will pass in hints to the
103 * allocator in the gfp_mask, in the zone modifier bits. These bits 144 * allocator in the gfp_mask, in the zone modifier bits. These bits
104 * are used to select a priority ordered list of memory zones which 145 * are used to select a priority ordered list of memory zones which
105 * match the requested limits. GFP_ZONEMASK defines which bits within 146 * match the requested limits. See gfp_zone() in include/linux/gfp.h
106 * the gfp_mask should be considered as zone modifiers. Each valid
107 * combination of the zone modifier bits has a corresponding list
108 * of zones (in node_zonelists). Thus for two zone modifiers there
109 * will be a maximum of 4 (2 ** 2) zonelists, for 3 modifiers there will
110 * be 8 (2 ** 3) zonelists. GFP_ZONETYPES defines the number of possible
111 * combinations of zone modifiers in "zone modifier space".
112 *
113 * As an optimisation any zone modifier bits which are only valid when
114 * no other zone modifier bits are set (loners) should be placed in
115 * the highest order bits of this field. This allows us to reduce the
116 * extent of the zonelists thus saving space. For example in the case
117 * of three zone modifier bits, we could require up to eight zonelists.
118 * If the left most zone modifier is a "loner" then the highest valid
119 * zonelist would be four allowing us to allocate only five zonelists.
120 * Use the first form for GFP_ZONETYPES when the left most bit is not
121 * a "loner", otherwise use the second.
122 *
123 * NOTE! Make sure this matches the zones in <linux/gfp.h>
124 */ 147 */
125#define GFP_ZONEMASK 0x07
126/* #define GFP_ZONETYPES (GFP_ZONEMASK + 1) */ /* Non-loner */
127#define GFP_ZONETYPES ((GFP_ZONEMASK + 1) / 2 + 1) /* Loner */
128 148
129/* 149#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM)
130 * On machines where it is needed (eg PCs) we divide physical memory 150#define ZONES_SHIFT 1
131 * into multiple physical zones. On a 32bit PC we have 4 zones: 151#else
132 * 152#define ZONES_SHIFT 2
133 * ZONE_DMA < 16 MB ISA DMA capable memory 153#endif
134 * ZONE_DMA32 0 MB Empty
135 * ZONE_NORMAL 16-896 MB direct mapped by the kernel
136 * ZONE_HIGHMEM > 896 MB only page cache and user processes
137 */
138 154
139struct zone { 155struct zone {
140 /* Fields commonly accessed by the page allocator */ 156 /* Fields commonly accessed by the page allocator */
@@ -154,7 +170,8 @@ struct zone {
154 /* 170 /*
155 * zone reclaim becomes active if more unmapped pages exist. 171 * zone reclaim becomes active if more unmapped pages exist.
156 */ 172 */
157 unsigned long min_unmapped_ratio; 173 unsigned long min_unmapped_pages;
174 unsigned long min_slab_pages;
158 struct per_cpu_pageset *pageset[NR_CPUS]; 175 struct per_cpu_pageset *pageset[NR_CPUS];
159#else 176#else
160 struct per_cpu_pageset pageset[NR_CPUS]; 177 struct per_cpu_pageset pageset[NR_CPUS];
@@ -266,7 +283,6 @@ struct zone {
266 char *name; 283 char *name;
267} ____cacheline_internodealigned_in_smp; 284} ____cacheline_internodealigned_in_smp;
268 285
269
270/* 286/*
271 * The "priority" of VM scanning is how much of the queues we will scan in one 287 * The "priority" of VM scanning is how much of the queues we will scan in one
272 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 288 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
@@ -304,7 +320,7 @@ struct zonelist {
304struct bootmem_data; 320struct bootmem_data;
305typedef struct pglist_data { 321typedef struct pglist_data {
306 struct zone node_zones[MAX_NR_ZONES]; 322 struct zone node_zones[MAX_NR_ZONES];
307 struct zonelist node_zonelists[GFP_ZONETYPES]; 323 struct zonelist node_zonelists[MAX_NR_ZONES];
308 int nr_zones; 324 int nr_zones;
309#ifdef CONFIG_FLAT_NODE_MEM_MAP 325#ifdef CONFIG_FLAT_NODE_MEM_MAP
310 struct page *node_mem_map; 326 struct page *node_mem_map;
@@ -373,12 +389,16 @@ static inline int populated_zone(struct zone *zone)
373 return (!!zone->present_pages); 389 return (!!zone->present_pages);
374} 390}
375 391
376static inline int is_highmem_idx(int idx) 392static inline int is_highmem_idx(enum zone_type idx)
377{ 393{
394#ifdef CONFIG_HIGHMEM
378 return (idx == ZONE_HIGHMEM); 395 return (idx == ZONE_HIGHMEM);
396#else
397 return 0;
398#endif
379} 399}
380 400
381static inline int is_normal_idx(int idx) 401static inline int is_normal_idx(enum zone_type idx)
382{ 402{
383 return (idx == ZONE_NORMAL); 403 return (idx == ZONE_NORMAL);
384} 404}
@@ -391,7 +411,11 @@ static inline int is_normal_idx(int idx)
391 */ 411 */
392static inline int is_highmem(struct zone *zone) 412static inline int is_highmem(struct zone *zone)
393{ 413{
414#ifdef CONFIG_HIGHMEM
394 return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM; 415 return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
416#else
417 return 0;
418#endif
395} 419}
396 420
397static inline int is_normal(struct zone *zone) 421static inline int is_normal(struct zone *zone)
@@ -401,7 +425,11 @@ static inline int is_normal(struct zone *zone)
401 425
402static inline int is_dma32(struct zone *zone) 426static inline int is_dma32(struct zone *zone)
403{ 427{
428#ifdef CONFIG_ZONE_DMA32
404 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32; 429 return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
430#else
431 return 0;
432#endif
405} 433}
406 434
407static inline int is_dma(struct zone *zone) 435static inline int is_dma(struct zone *zone)
@@ -421,6 +449,8 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file
421 void __user *, size_t *, loff_t *); 449 void __user *, size_t *, loff_t *);
422int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, 450int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
423 struct file *, void __user *, size_t *, loff_t *); 451 struct file *, void __user *, size_t *, loff_t *);
452int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
453 struct file *, void __user *, size_t *, loff_t *);
424 454
425#include <linux/topology.h> 455#include <linux/topology.h>
426/* Returns the number of the current Node. */ 456/* Returns the number of the current Node. */