diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 30 |
1 files changed, 29 insertions, 1 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5ed471b58f4f..f5fa3082fd6a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/threads.h> | 12 | #include <linux/threads.h> |
13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/seqlock.h> | ||
15 | #include <asm/atomic.h> | 16 | #include <asm/atomic.h> |
16 | 17 | ||
17 | /* Free memory management - zoned buddy allocator. */ | 18 | /* Free memory management - zoned buddy allocator. */ |
@@ -137,6 +138,10 @@ struct zone { | |||
137 | * free areas of different sizes | 138 | * free areas of different sizes |
138 | */ | 139 | */ |
139 | spinlock_t lock; | 140 | spinlock_t lock; |
141 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
142 | /* see spanned/present_pages for more description */ | ||
143 | seqlock_t span_seqlock; | ||
144 | #endif | ||
140 | struct free_area free_area[MAX_ORDER]; | 145 | struct free_area free_area[MAX_ORDER]; |
141 | 146 | ||
142 | 147 | ||
@@ -220,6 +225,16 @@ struct zone { | |||
220 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 225 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
221 | unsigned long zone_start_pfn; | 226 | unsigned long zone_start_pfn; |
222 | 227 | ||
228 | /* | ||
229 | * zone_start_pfn, spanned_pages and present_pages are all | ||
230 | * protected by span_seqlock. It is a seqlock because it has | ||
231 | * to be read outside of zone->lock, and it is done in the main | ||
232 | * allocator path. But, it is written quite infrequently. | ||
233 | * | ||
234 | * The lock is declared along with zone->lock because it is | ||
235 | * frequently read in proximity to zone->lock. It's good to | ||
236 | * give them a chance of being in the same cacheline. | ||
237 | */ | ||
223 | unsigned long spanned_pages; /* total size, including holes */ | 238 | unsigned long spanned_pages; /* total size, including holes */ |
224 | unsigned long present_pages; /* amount of memory (excluding holes) */ | 239 | unsigned long present_pages; /* amount of memory (excluding holes) */ |
225 | 240 | ||
@@ -273,6 +288,16 @@ typedef struct pglist_data { | |||
273 | struct page *node_mem_map; | 288 | struct page *node_mem_map; |
274 | #endif | 289 | #endif |
275 | struct bootmem_data *bdata; | 290 | struct bootmem_data *bdata; |
291 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
292 | /* | ||
293 | * Must be held any time you expect node_start_pfn, node_present_pages | ||
294 | * or node_spanned_pages stay constant. Holding this will also | ||
295 | * guarantee that any pfn_valid() stays that way. | ||
296 | * | ||
297 | * Nests above zone->lock and zone->size_seqlock. | ||
298 | */ | ||
299 | spinlock_t node_size_lock; | ||
300 | #endif | ||
276 | unsigned long node_start_pfn; | 301 | unsigned long node_start_pfn; |
277 | unsigned long node_present_pages; /* total number of physical pages */ | 302 | unsigned long node_present_pages; /* total number of physical pages */ |
278 | unsigned long node_spanned_pages; /* total size of physical page | 303 | unsigned long node_spanned_pages; /* total size of physical page |
@@ -293,6 +318,8 @@ typedef struct pglist_data { | |||
293 | #endif | 318 | #endif |
294 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) | 319 | #define nid_page_nr(nid, pagenr) pgdat_page_nr(NODE_DATA(nid),(pagenr)) |
295 | 320 | ||
321 | #include <linux/memory_hotplug.h> | ||
322 | |||
296 | extern struct pglist_data *pgdat_list; | 323 | extern struct pglist_data *pgdat_list; |
297 | 324 | ||
298 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, | 325 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, |
@@ -302,7 +329,7 @@ void get_zone_counts(unsigned long *active, unsigned long *inactive, | |||
302 | void build_all_zonelists(void); | 329 | void build_all_zonelists(void); |
303 | void wakeup_kswapd(struct zone *zone, int order); | 330 | void wakeup_kswapd(struct zone *zone, int order); |
304 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 331 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
305 | int alloc_type, int can_try_harder, int gfp_high); | 332 | int alloc_type, int can_try_harder, gfp_t gfp_high); |
306 | 333 | ||
307 | #ifdef CONFIG_HAVE_MEMORY_PRESENT | 334 | #ifdef CONFIG_HAVE_MEMORY_PRESENT |
308 | void memory_present(int nid, unsigned long start, unsigned long end); | 335 | void memory_present(int nid, unsigned long start, unsigned long end); |
@@ -509,6 +536,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) | |||
509 | return NULL; | 536 | return NULL; |
510 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; | 537 | return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; |
511 | } | 538 | } |
539 | extern int __section_nr(struct mem_section* ms); | ||
512 | 540 | ||
513 | /* | 541 | /* |
514 | * We use the lower bits of the mem_map pointer to store | 542 | * We use the lower bits of the mem_map pointer to store |