diff options
Diffstat (limited to 'include/linux/mmzone.h')
| -rw-r--r-- | include/linux/mmzone.h | 71 |
1 files changed, 36 insertions, 35 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 30fe668c2542..3984c4eb41fd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -184,13 +184,7 @@ struct per_cpu_pageset { | |||
| 184 | s8 stat_threshold; | 184 | s8 stat_threshold; |
| 185 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; | 185 | s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; |
| 186 | #endif | 186 | #endif |
| 187 | } ____cacheline_aligned_in_smp; | 187 | }; |
| 188 | |||
| 189 | #ifdef CONFIG_NUMA | ||
| 190 | #define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)]) | ||
| 191 | #else | ||
| 192 | #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)]) | ||
| 193 | #endif | ||
| 194 | 188 | ||
| 195 | #endif /* !__GENERATING_BOUNDS.H */ | 189 | #endif /* !__GENERATING_BOUNDS.H */ |
| 196 | 190 | ||
| @@ -290,6 +284,13 @@ struct zone { | |||
| 290 | unsigned long watermark[NR_WMARK]; | 284 | unsigned long watermark[NR_WMARK]; |
| 291 | 285 | ||
| 292 | /* | 286 | /* |
| 287 | * When free pages are below this point, additional steps are taken | ||
| 288 | * when reading the number of free pages to avoid per-cpu counter | ||
| 289 | * drift allowing watermarks to be breached | ||
| 290 | */ | ||
| 291 | unsigned long percpu_drift_mark; | ||
| 292 | |||
| 293 | /* | ||
| 293 | * We don't know if the memory that we're going to allocate will be freeable | 294 | * We don't know if the memory that we're going to allocate will be freeable |
| 294 | * or/and it will be released eventually, so to avoid totally wasting several | 295 | * or/and it will be released eventually, so to avoid totally wasting several |
| 295 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 296 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk |
| @@ -306,14 +307,13 @@ struct zone { | |||
| 306 | */ | 307 | */ |
| 307 | unsigned long min_unmapped_pages; | 308 | unsigned long min_unmapped_pages; |
| 308 | unsigned long min_slab_pages; | 309 | unsigned long min_slab_pages; |
| 309 | struct per_cpu_pageset *pageset[NR_CPUS]; | ||
| 310 | #else | ||
| 311 | struct per_cpu_pageset pageset[NR_CPUS]; | ||
| 312 | #endif | 310 | #endif |
| 311 | struct per_cpu_pageset __percpu *pageset; | ||
| 313 | /* | 312 | /* |
| 314 | * free areas of different sizes | 313 | * free areas of different sizes |
| 315 | */ | 314 | */ |
| 316 | spinlock_t lock; | 315 | spinlock_t lock; |
| 316 | int all_unreclaimable; /* All pages pinned */ | ||
| 317 | #ifdef CONFIG_MEMORY_HOTPLUG | 317 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 318 | /* see spanned/present_pages for more description */ | 318 | /* see spanned/present_pages for more description */ |
| 319 | seqlock_t span_seqlock; | 319 | seqlock_t span_seqlock; |
| @@ -328,6 +328,15 @@ struct zone { | |||
| 328 | unsigned long *pageblock_flags; | 328 | unsigned long *pageblock_flags; |
| 329 | #endif /* CONFIG_SPARSEMEM */ | 329 | #endif /* CONFIG_SPARSEMEM */ |
| 330 | 330 | ||
| 331 | #ifdef CONFIG_COMPACTION | ||
| 332 | /* | ||
| 333 | * On compaction failure, 1<<compact_defer_shift compactions | ||
| 334 | * are skipped before trying again. The number attempted since | ||
| 335 | * last failure is tracked with compact_considered. | ||
| 336 | */ | ||
| 337 | unsigned int compact_considered; | ||
| 338 | unsigned int compact_defer_shift; | ||
| 339 | #endif | ||
| 331 | 340 | ||
| 332 | ZONE_PADDING(_pad1_) | 341 | ZONE_PADDING(_pad1_) |
| 333 | 342 | ||
| @@ -346,21 +355,6 @@ struct zone { | |||
| 346 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; | 355 | atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; |
| 347 | 356 | ||
| 348 | /* | 357 | /* |
| 349 | * prev_priority holds the scanning priority for this zone. It is | ||
| 350 | * defined as the scanning priority at which we achieved our reclaim | ||
| 351 | * target at the previous try_to_free_pages() or balance_pgdat() | ||
| 352 | * invokation. | ||
| 353 | * | ||
| 354 | * We use prev_priority as a measure of how much stress page reclaim is | ||
| 355 | * under - it drives the swappiness decision: whether to unmap mapped | ||
| 356 | * pages. | ||
| 357 | * | ||
| 358 | * Access to both this field is quite racy even on uniprocessor. But | ||
| 359 | * it is expected to average out OK. | ||
| 360 | */ | ||
| 361 | int prev_priority; | ||
| 362 | |||
| 363 | /* | ||
| 364 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on | 358 | * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on |
| 365 | * this zone's LRU. Maintained by the pageout code. | 359 | * this zone's LRU. Maintained by the pageout code. |
| 366 | */ | 360 | */ |
| @@ -425,7 +419,6 @@ struct zone { | |||
| 425 | } ____cacheline_internodealigned_in_smp; | 419 | } ____cacheline_internodealigned_in_smp; |
| 426 | 420 | ||
| 427 | typedef enum { | 421 | typedef enum { |
| 428 | ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */ | ||
| 429 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | 422 | ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
| 430 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ | 423 | ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ |
| 431 | } zone_flags_t; | 424 | } zone_flags_t; |
| @@ -445,11 +438,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) | |||
| 445 | clear_bit(flag, &zone->flags); | 438 | clear_bit(flag, &zone->flags); |
| 446 | } | 439 | } |
| 447 | 440 | ||
| 448 | static inline int zone_is_all_unreclaimable(const struct zone *zone) | ||
| 449 | { | ||
| 450 | return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags); | ||
| 451 | } | ||
| 452 | |||
| 453 | static inline int zone_is_reclaim_locked(const struct zone *zone) | 441 | static inline int zone_is_reclaim_locked(const struct zone *zone) |
| 454 | { | 442 | { |
| 455 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); | 443 | return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); |
| @@ -460,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
| 460 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 448 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
| 461 | } | 449 | } |
| 462 | 450 | ||
| 451 | #ifdef CONFIG_SMP | ||
| 452 | unsigned long zone_nr_free_pages(struct zone *zone); | ||
| 453 | #else | ||
| 454 | #define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) | ||
| 455 | #endif /* CONFIG_SMP */ | ||
| 456 | |||
| 463 | /* | 457 | /* |
| 464 | * The "priority" of VM scanning is how much of the queues we will scan in one | 458 | * The "priority" of VM scanning is how much of the queues we will scan in one |
| 465 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
| @@ -620,7 +614,9 @@ typedef struct pglist_data { | |||
| 620 | struct page_cgroup *node_page_cgroup; | 614 | struct page_cgroup *node_page_cgroup; |
| 621 | #endif | 615 | #endif |
| 622 | #endif | 616 | #endif |
| 617 | #ifndef CONFIG_NO_BOOTMEM | ||
| 623 | struct bootmem_data *bdata; | 618 | struct bootmem_data *bdata; |
| 619 | #endif | ||
| 624 | #ifdef CONFIG_MEMORY_HOTPLUG | 620 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 625 | /* | 621 | /* |
| 626 | * Must be held any time you expect node_start_pfn, node_present_pages | 622 | * Must be held any time you expect node_start_pfn, node_present_pages |
| @@ -652,9 +648,8 @@ typedef struct pglist_data { | |||
| 652 | 648 | ||
| 653 | #include <linux/memory_hotplug.h> | 649 | #include <linux/memory_hotplug.h> |
| 654 | 650 | ||
| 655 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 651 | extern struct mutex zonelists_mutex; |
| 656 | unsigned long *free); | 652 | void build_all_zonelists(void *data); |
| 657 | void build_all_zonelists(void); | ||
| 658 | void wakeup_kswapd(struct zone *zone, int order); | 653 | void wakeup_kswapd(struct zone *zone, int order); |
| 659 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, | 654 | int zone_watermark_ok(struct zone *z, int order, unsigned long mark, |
| 660 | int classzone_idx, int alloc_flags); | 655 | int classzone_idx, int alloc_flags); |
| @@ -672,6 +667,12 @@ void memory_present(int nid, unsigned long start, unsigned long end); | |||
| 672 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} | 667 | static inline void memory_present(int nid, unsigned long start, unsigned long end) {} |
| 673 | #endif | 668 | #endif |
| 674 | 669 | ||
| 670 | #ifdef CONFIG_HAVE_MEMORYLESS_NODES | ||
| 671 | int local_memory_node(int node_id); | ||
| 672 | #else | ||
| 673 | static inline int local_memory_node(int node_id) { return node_id; }; | ||
| 674 | #endif | ||
| 675 | |||
| 675 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE | 676 | #ifdef CONFIG_NEED_NODE_MEMMAP_SIZE |
| 676 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | 677 | unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); |
| 677 | #endif | 678 | #endif |
| @@ -983,7 +984,7 @@ struct mem_section { | |||
| 983 | #endif | 984 | #endif |
| 984 | 985 | ||
| 985 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) | 986 | #define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) |
| 986 | #define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) | 987 | #define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT) |
| 987 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) | 988 | #define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) |
| 988 | 989 | ||
| 989 | #ifdef CONFIG_SPARSEMEM_EXTREME | 990 | #ifdef CONFIG_SPARSEMEM_EXTREME |
