diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 57 |
1 files changed, 42 insertions, 15 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 230180c3eb61..656b588a9f96 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/seqlock.h> | 14 | #include <linux/seqlock.h> |
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
17 | #include <asm/page.h> | ||
17 | 18 | ||
18 | /* Free memory management - zoned buddy allocator. */ | 19 | /* Free memory management - zoned buddy allocator. */ |
19 | #ifndef CONFIG_FORCE_MAX_ZONEORDER | 20 | #ifndef CONFIG_FORCE_MAX_ZONEORDER |
@@ -45,6 +46,27 @@ struct zone_padding { | |||
45 | #define ZONE_PADDING(name) | 46 | #define ZONE_PADDING(name) |
46 | #endif | 47 | #endif |
47 | 48 | ||
49 | enum zone_stat_item { | ||
50 | NR_ANON_PAGES, /* Mapped anonymous pages */ | ||
51 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | ||
52 | only modified from process context */ | ||
53 | NR_FILE_PAGES, | ||
54 | NR_SLAB, /* Pages used by slab allocator */ | ||
55 | NR_PAGETABLE, /* used for pagetables */ | ||
56 | NR_FILE_DIRTY, | ||
57 | NR_WRITEBACK, | ||
58 | NR_UNSTABLE_NFS, /* NFS unstable pages */ | ||
59 | NR_BOUNCE, | ||
60 | #ifdef CONFIG_NUMA | ||
61 | NUMA_HIT, /* allocated in intended node */ | ||
62 | NUMA_MISS, /* allocated in non intended node */ | ||
63 | NUMA_FOREIGN, /* was intended here, hit elsewhere */ | ||
64 | NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */ | ||
65 | NUMA_LOCAL, /* allocation from local node */ | ||
66 | NUMA_OTHER, /* allocation from other node */ | ||
67 | #endif | ||
68 | NR_VM_ZONE_STAT_ITEMS }; | ||
69 | |||
48 | struct per_cpu_pages { | 70 | struct per_cpu_pages { |
49 | int count; /* number of pages in the list */ | 71 | int count; /* number of pages in the list */ |
50 | int high; /* high watermark, emptying needed */ | 72 | int high; /* high watermark, emptying needed */ |
@@ -54,13 +76,8 @@ struct per_cpu_pages { | |||
54 | 76 | ||
55 | struct per_cpu_pageset { | 77 | struct per_cpu_pageset { |
56 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ | 78 | struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ |
57 | #ifdef CONFIG_NUMA | 79 | #ifdef CONFIG_SMP |
58 | unsigned long numa_hit; /* allocated in intended node */ | 80 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
59 | unsigned long numa_miss; /* allocated in non intended node */ | ||
60 | unsigned long numa_foreign; /* was intended here, hit elsewhere */ | ||
61 | unsigned long interleave_hit; /* interleaver prefered this zone */ | ||
62 | unsigned long local_node; /* allocation from local node */ | ||
63 | unsigned long other_node; /* allocation from other node */ | ||
64 | #endif | 81 | #endif |
65 | } ____cacheline_aligned_in_smp; | 82 | } ____cacheline_aligned_in_smp; |
66 | 83 | ||
@@ -133,6 +150,10 @@ struct zone { | |||
133 | unsigned long lowmem_reserve[MAX_NR_ZONES]; | 150 | unsigned long lowmem_reserve[MAX_NR_ZONES]; |
134 | 151 | ||
135 | #ifdef CONFIG_NUMA | 152 | #ifdef CONFIG_NUMA |
153 | /* | ||
154 | * zone reclaim becomes active if more unmapped pages exist. | ||
155 | */ | ||
156 | unsigned long min_unmapped_ratio; | ||
136 | struct per_cpu_pageset *pageset[NR_CPUS]; | 157 | struct per_cpu_pageset *pageset[NR_CPUS]; |
137 | #else | 158 | #else |
138 | struct per_cpu_pageset pageset[NR_CPUS]; | 159 | struct per_cpu_pageset pageset[NR_CPUS]; |
@@ -164,12 +185,8 @@ struct zone { | |||
164 | /* A count of how many reclaimers are scanning this zone */ | 185 | /* A count of how many reclaimers are scanning this zone */ |
165 | atomic_t reclaim_in_progress; | 186 | atomic_t reclaim_in_progress; |
166 | 187 | ||
167 | /* | 188 | /* Zone statistics */ |
168 | * timestamp (in jiffies) of the last zone reclaim that did not | 189 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
169 | * result in freeing of pages. This is used to avoid repeated scans | ||
170 | * if all memory in the zone is in use. | ||
171 | */ | ||
172 | unsigned long last_unsuccessful_zone_reclaim; | ||
173 | 190 | ||
174 | /* | 191 | /* |
175 | * prev_priority holds the scanning priority for this zone. It is | 192 | * prev_priority holds the scanning priority for this zone. It is |
@@ -196,7 +213,7 @@ struct zone { | |||
196 | 213 | ||
197 | /* | 214 | /* |
198 | * wait_table -- the array holding the hash table | 215 | * wait_table -- the array holding the hash table |
199 | * wait_table_size -- the size of the hash table array | 216 | * wait_table_hash_nr_entries -- the size of the hash table array |
200 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) | 217 | * wait_table_bits -- wait_table_size == (1 << wait_table_bits) |
201 | * | 218 | * |
202 | * The purpose of all these is to keep track of the people | 219 | * The purpose of all these is to keep track of the people |
@@ -219,7 +236,7 @@ struct zone { | |||
219 | * free_area_init_core() performs the initialization of them. | 236 | * free_area_init_core() performs the initialization of them. |
220 | */ | 237 | */ |
221 | wait_queue_head_t * wait_table; | 238 | wait_queue_head_t * wait_table; |
222 | unsigned long wait_table_size; | 239 | unsigned long wait_table_hash_nr_entries; |
223 | unsigned long wait_table_bits; | 240 | unsigned long wait_table_bits; |
224 | 241 | ||
225 | /* | 242 | /* |
@@ -332,6 +349,9 @@ void wakeup_kswapd(struct zone *zone, int order); | |||
332 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 349 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
333 | int classzone_idx, int alloc_flags); | 350 | int classzone_idx, int alloc_flags); |
334 | 351 | ||
352 | extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, | ||
353 | unsigned long size); | ||
354 | |||
335 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 355 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
336 | void memory_present(int nid, unsigned long start, unsigned long end); | 356 | void memory_present(int nid, unsigned long start, unsigned long end); |
337 | #else | 357 | #else |
@@ -398,6 +418,8 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *, | |||
398 | void __user *, size_t *, loff_t *); | 418 | void __user *, size_t *, loff_t *); |
399 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, | 419 | int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *, |
400 | void __user *, size_t *, loff_t *); | 420 | void __user *, size_t *, loff_t *); |
421 | int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, | ||
422 | struct file *, void __user *, size_t *, loff_t *); | ||
401 | 423 | ||
402 | #include <linux/topology.h> | 424 | #include <linux/topology.h> |
403 | /* Returns the number of the current Node. */ | 425 | /* Returns the number of the current Node. */ |
@@ -505,6 +527,10 @@ struct mem_section { | |||
505 | * pages. However, it is stored with some other magic. | 527 | * pages. However, it is stored with some other magic. |
506 | * (see sparse.c::sparse_init_one_section()) | 528 | * (see sparse.c::sparse_init_one_section()) |
507 | * | 529 | * |
530 | * Additionally during early boot we encode node id of | ||
531 | * the location of the section here to guide allocation. | ||
532 | * (see sparse.c::memory_present()) | ||
533 | * | ||
508 | * Making it a UL at least makes someone do a cast | 534 | * Making it a UL at least makes someone do a cast |
509 | * before using it wrong. | 535 | * before using it wrong. |
510 | */ | 536 | */ |
@@ -544,6 +570,7 @@ extern int __section_nr(struct mem_section* ms); | |||
544 | #define SECTION_HAS_MEM_MAP (1UL<<1) | 570 | #define SECTION_HAS_MEM_MAP (1UL<<1) |
545 | #define SECTION_MAP_LAST_BIT (1UL<<2) | 571 | #define SECTION_MAP_LAST_BIT (1UL<<2) |
546 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) | 572 | #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) |
573 | #define SECTION_NID_SHIFT 2 | ||
547 | 574 | ||
548 | static inline struct page *__section_mem_map_addr(struct mem_section *section) | 575 | static inline struct page *__section_mem_map_addr(struct mem_section *section) |
549 | { | 576 | { |