aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h51
1 files changed, 35 insertions, 16 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2d8337150493..27e748eb72b0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -4,7 +4,6 @@
4#ifdef __KERNEL__ 4#ifdef __KERNEL__
5#ifndef __ASSEMBLY__ 5#ifndef __ASSEMBLY__
6 6
7#include <linux/config.h>
8#include <linux/spinlock.h> 7#include <linux/spinlock.h>
9#include <linux/list.h> 8#include <linux/list.h>
10#include <linux/wait.h> 9#include <linux/wait.h>
@@ -47,6 +46,27 @@ struct zone_padding {
47#define ZONE_PADDING(name) 46#define ZONE_PADDING(name)
48#endif 47#endif
49 48
49enum zone_stat_item {
50 NR_ANON_PAGES, /* Mapped anonymous pages */
51 NR_FILE_MAPPED, /* pagecache pages mapped into pagetables.
52 only modified from process context */
53 NR_FILE_PAGES,
54 NR_SLAB, /* Pages used by slab allocator */
55 NR_PAGETABLE, /* used for pagetables */
56 NR_FILE_DIRTY,
57 NR_WRITEBACK,
58 NR_UNSTABLE_NFS, /* NFS unstable pages */
59 NR_BOUNCE,
60#ifdef CONFIG_NUMA
61 NUMA_HIT, /* allocated in intended node */
62 NUMA_MISS, /* allocated in non intended node */
63 NUMA_FOREIGN, /* was intended here, hit elsewhere */
64 NUMA_INTERLEAVE_HIT, /* interleaver preferred this zone */
65 NUMA_LOCAL, /* allocation from local node */
66 NUMA_OTHER, /* allocation from other node */
67#endif
68 NR_VM_ZONE_STAT_ITEMS };
69
50struct per_cpu_pages { 70struct per_cpu_pages {
51 int count; /* number of pages in the list */ 71 int count; /* number of pages in the list */
52 int high; /* high watermark, emptying needed */ 72 int high; /* high watermark, emptying needed */
@@ -56,13 +76,8 @@ struct per_cpu_pages {
56 76
57struct per_cpu_pageset { 77struct per_cpu_pageset {
58 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */ 78 struct per_cpu_pages pcp[2]; /* 0: hot. 1: cold */
59#ifdef CONFIG_NUMA 79#ifdef CONFIG_SMP
60 unsigned long numa_hit; /* allocated in intended node */ 80 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
61 unsigned long numa_miss; /* allocated in non intended node */
62 unsigned long numa_foreign; /* was intended here, hit elsewhere */
63 unsigned long interleave_hit; /* interleaver prefered this zone */
64 unsigned long local_node; /* allocation from local node */
65 unsigned long other_node; /* allocation from other node */
66#endif 81#endif
67} ____cacheline_aligned_in_smp; 82} ____cacheline_aligned_in_smp;
68 83
@@ -166,12 +181,8 @@ struct zone {
166 /* A count of how many reclaimers are scanning this zone */ 181 /* A count of how many reclaimers are scanning this zone */
167 atomic_t reclaim_in_progress; 182 atomic_t reclaim_in_progress;
168 183
169 /* 184 /* Zone statistics */
170 * timestamp (in jiffies) of the last zone reclaim that did not 185 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
171 * result in freeing of pages. This is used to avoid repeated scans
172 * if all memory in the zone is in use.
173 */
174 unsigned long last_unsuccessful_zone_reclaim;
175 186
176 /* 187 /*
177 * prev_priority holds the scanning priority for this zone. It is 188 * prev_priority holds the scanning priority for this zone. It is
@@ -198,7 +209,7 @@ struct zone {
198 209
199 /* 210 /*
200 * wait_table -- the array holding the hash table 211 * wait_table -- the array holding the hash table
201 * wait_table_size -- the size of the hash table array 212 * wait_table_hash_nr_entries -- the size of the hash table array
202 * wait_table_bits -- wait_table_size == (1 << wait_table_bits) 213 * wait_table_bits -- wait_table_size == (1 << wait_table_bits)
203 * 214 *
204 * The purpose of all these is to keep track of the people 215 * The purpose of all these is to keep track of the people
@@ -221,7 +232,7 @@ struct zone {
221 * free_area_init_core() performs the initialization of them. 232 * free_area_init_core() performs the initialization of them.
222 */ 233 */
223 wait_queue_head_t * wait_table; 234 wait_queue_head_t * wait_table;
224 unsigned long wait_table_size; 235 unsigned long wait_table_hash_nr_entries;
225 unsigned long wait_table_bits; 236 unsigned long wait_table_bits;
226 237
227 /* 238 /*
@@ -334,6 +345,9 @@ void wakeup_kswapd(struct zone *zone, int order);
334int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 345int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
335 int classzone_idx, int alloc_flags); 346 int classzone_idx, int alloc_flags);
336 347
348extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
349 unsigned long size);
350
337#ifdef CONFIG_HAVE_MEMORY_PRESENT 351#ifdef CONFIG_HAVE_MEMORY_PRESENT
338void memory_present(int nid, unsigned long start, unsigned long end); 352void memory_present(int nid, unsigned long start, unsigned long end);
339#else 353#else
@@ -507,6 +521,10 @@ struct mem_section {
507 * pages. However, it is stored with some other magic. 521 * pages. However, it is stored with some other magic.
508 * (see sparse.c::sparse_init_one_section()) 522 * (see sparse.c::sparse_init_one_section())
509 * 523 *
524 * Additionally during early boot we encode node id of
525 * the location of the section here to guide allocation.
526 * (see sparse.c::memory_present())
527 *
510 * Making it a UL at least makes someone do a cast 528 * Making it a UL at least makes someone do a cast
511 * before using it wrong. 529 * before using it wrong.
512 */ 530 */
@@ -546,6 +564,7 @@ extern int __section_nr(struct mem_section* ms);
546#define SECTION_HAS_MEM_MAP (1UL<<1) 564#define SECTION_HAS_MEM_MAP (1UL<<1)
547#define SECTION_MAP_LAST_BIT (1UL<<2) 565#define SECTION_MAP_LAST_BIT (1UL<<2)
548#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) 566#define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1))
567#define SECTION_NID_SHIFT 2
549 568
550static inline struct page *__section_mem_map_addr(struct mem_section *section) 569static inline struct page *__section_mem_map_addr(struct mem_section *section)
551{ 570{