diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 42 |
1 files changed, 32 insertions, 10 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b262f47961fb..ee9e3143df4f 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -47,15 +47,20 @@ struct zone_padding { | |||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | enum zone_stat_item { | 49 | enum zone_stat_item { |
50 | /* First 128 byte cacheline (assuming 64 bit words) */ | ||
51 | NR_FREE_PAGES, | ||
52 | NR_INACTIVE, | ||
53 | NR_ACTIVE, | ||
50 | NR_ANON_PAGES, /* Mapped anonymous pages */ | 54 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
51 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | 55 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
52 | only modified from process context */ | 56 | only modified from process context */ |
53 | NR_FILE_PAGES, | 57 | NR_FILE_PAGES, |
54 | NR_SLAB_RECLAIMABLE, | ||
55 | NR_SLAB_UNRECLAIMABLE, | ||
56 | NR_PAGETABLE, /* used for pagetables */ | ||
57 | NR_FILE_DIRTY, | 58 | NR_FILE_DIRTY, |
58 | NR_WRITEBACK, | 59 | NR_WRITEBACK, |
60 | /* Second 128 byte cacheline */ | ||
61 | NR_SLAB_RECLAIMABLE, | ||
62 | NR_SLAB_UNRECLAIMABLE, | ||
63 | NR_PAGETABLE, /* used for pagetables */ | ||
59 | NR_UNSTABLE_NFS, /* NFS unstable pages */ | 64 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
60 | NR_BOUNCE, | 65 | NR_BOUNCE, |
61 | NR_VMSCAN_WRITE, | 66 | NR_VMSCAN_WRITE, |
@@ -91,6 +96,7 @@ struct per_cpu_pageset { | |||
91 | #endif | 96 | #endif |
92 | 97 | ||
93 | enum zone_type { | 98 | enum zone_type { |
99 | #ifdef CONFIG_ZONE_DMA | ||
94 | /* | 100 | /* |
95 | * ZONE_DMA is used when there are devices that are not able | 101 | * ZONE_DMA is used when there are devices that are not able |
96 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we | 102 | * to do DMA to all of addressable memory (ZONE_NORMAL). Then we |
@@ -111,6 +117,7 @@ enum zone_type { | |||
111 | * <16M. | 117 | * <16M. |
112 | */ | 118 | */ |
113 | ZONE_DMA, | 119 | ZONE_DMA, |
120 | #endif | ||
114 | #ifdef CONFIG_ZONE_DMA32 | 121 | #ifdef CONFIG_ZONE_DMA32 |
115 | /* | 122 | /* |
116 | * x86_64 needs two ZONE_DMAs because it supports devices that are | 123 | * x86_64 needs two ZONE_DMAs because it supports devices that are |
@@ -147,15 +154,30 @@ enum zone_type { | |||
147 | * match the requested limits. See gfp_zone() in include/linux/gfp.h | 154 | * match the requested limits. See gfp_zone() in include/linux/gfp.h |
148 | */ | 155 | */ |
149 | 156 | ||
150 | #if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM) | 157 | /* |
158 | * Count the active zones. Note that the use of defined(X) outside | ||
159 | * #if and family is not necessarily defined so ensure we cannot use | ||
160 | * it later. Use __ZONE_COUNT to work out how many shift bits we need. | ||
161 | */ | ||
162 | #define __ZONE_COUNT ( \ | ||
163 | defined(CONFIG_ZONE_DMA) \ | ||
164 | + defined(CONFIG_ZONE_DMA32) \ | ||
165 | + 1 \ | ||
166 | + defined(CONFIG_HIGHMEM) \ | ||
167 | ) | ||
168 | #if __ZONE_COUNT < 2 | ||
169 | #define ZONES_SHIFT 0 | ||
170 | #elif __ZONE_COUNT <= 2 | ||
151 | #define ZONES_SHIFT 1 | 171 | #define ZONES_SHIFT 1 |
152 | #else | 172 | #elif __ZONE_COUNT <= 4 |
153 | #define ZONES_SHIFT 2 | 173 | #define ZONES_SHIFT 2 |
174 | #else | ||
175 | #error ZONES_SHIFT -- too many zones configured adjust calculation | ||
154 | #endif | 176 | #endif |
177 | #undef __ZONE_COUNT | ||
155 | 178 | ||
156 | struct zone { | 179 | struct zone { |
157 | /* Fields commonly accessed by the page allocator */ | 180 | /* Fields commonly accessed by the page allocator */ |
158 | unsigned long free_pages; | ||
159 | unsigned long pages_min, pages_low, pages_high; | 181 | unsigned long pages_min, pages_low, pages_high; |
160 | /* | 182 | /* |
161 | * We don't know if the memory that we're going to allocate will be freeable | 183 | * We don't know if the memory that we're going to allocate will be freeable |
@@ -197,8 +219,6 @@ struct zone { | |||
197 | struct list_head inactive_list; | 219 | struct list_head inactive_list; |
198 | unsigned long nr_scan_active; | 220 | unsigned long nr_scan_active; |
199 | unsigned long nr_scan_inactive; | 221 | unsigned long nr_scan_inactive; |
200 | unsigned long nr_active; | ||
201 | unsigned long nr_inactive; | ||
202 | unsigned long pages_scanned; /* since last reclaim */ | 222 | unsigned long pages_scanned; /* since last reclaim */ |
203 | int all_unreclaimable; /* All pages pinned */ | 223 | int all_unreclaimable; /* All pages pinned */ |
204 | 224 | ||
@@ -442,8 +462,6 @@ typedef struct pglist_data { | |||
442 | 462 | ||
443 | #include <linux/memory_hotplug.h> | 463 | #include <linux/memory_hotplug.h> |
444 | 464 | ||
445 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, | ||
446 | unsigned long *free, struct pglist_data *pgdat); | ||
447 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 465 | void get_zone_counts(unsigned long *active, unsigned long *inactive, |
448 | unsigned long *free); | 466 | unsigned long *free); |
449 | void build_all_zonelists(void); | 467 | void build_all_zonelists(void); |
@@ -523,7 +541,11 @@ static inline int is_dma32(struct zone *zone) | |||
523 | 541 | ||
524 | static inline int is_dma(struct zone *zone) | 542 | static inline int is_dma(struct zone *zone) |
525 | { | 543 | { |
544 | #ifdef CONFIG_ZONE_DMA | ||
526 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; | 545 | return zone == zone->zone_pgdat->node_zones + ZONE_DMA; |
546 | #else | ||
547 | return 0; | ||
548 | #endif | ||
527 | } | 549 | } |
528 | 550 | ||
529 | /* These two functions are used to setup the per zone pages min values */ | 551 | /* These two functions are used to setup the per zone pages min values */ |