aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h43
1 files changed, 24 insertions, 19 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668c2542..b4d109e389b8 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -184,13 +184,7 @@ struct per_cpu_pageset {
184 s8 stat_threshold; 184 s8 stat_threshold;
185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
186#endif 186#endif
187} ____cacheline_aligned_in_smp; 187};
188
189#ifdef CONFIG_NUMA
190#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
191#else
192#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
193#endif
194 188
195#endif /* !__GENERATING_BOUNDS.H */ 189#endif /* !__GENERATING_BOUNDS.H */
196 190
@@ -306,14 +300,13 @@ struct zone {
306 */ 300 */
307 unsigned long min_unmapped_pages; 301 unsigned long min_unmapped_pages;
308 unsigned long min_slab_pages; 302 unsigned long min_slab_pages;
309 struct per_cpu_pageset *pageset[NR_CPUS];
310#else
311 struct per_cpu_pageset pageset[NR_CPUS];
312#endif 303#endif
304 struct per_cpu_pageset __percpu *pageset;
313 /* 305 /*
314 * free areas of different sizes 306 * free areas of different sizes
315 */ 307 */
316 spinlock_t lock; 308 spinlock_t lock;
309 int all_unreclaimable; /* All pages pinned */
317#ifdef CONFIG_MEMORY_HOTPLUG 310#ifdef CONFIG_MEMORY_HOTPLUG
318 /* see spanned/present_pages for more description */ 311 /* see spanned/present_pages for more description */
319 seqlock_t span_seqlock; 312 seqlock_t span_seqlock;
@@ -328,6 +321,15 @@ struct zone {
328 unsigned long *pageblock_flags; 321 unsigned long *pageblock_flags;
329#endif /* CONFIG_SPARSEMEM */ 322#endif /* CONFIG_SPARSEMEM */
330 323
324#ifdef CONFIG_COMPACTION
325 /*
326 * On compaction failure, 1<<compact_defer_shift compactions
327 * are skipped before trying again. The number attempted since
328 * last failure is tracked with compact_considered.
329 */
330 unsigned int compact_considered;
331 unsigned int compact_defer_shift;
332#endif
331 333
332 ZONE_PADDING(_pad1_) 334 ZONE_PADDING(_pad1_)
333 335
@@ -349,7 +351,7 @@ struct zone {
349 * prev_priority holds the scanning priority for this zone. It is 351 * prev_priority holds the scanning priority for this zone. It is
350 * defined as the scanning priority at which we achieved our reclaim 352 * defined as the scanning priority at which we achieved our reclaim
351 * target at the previous try_to_free_pages() or balance_pgdat() 353 * target at the previous try_to_free_pages() or balance_pgdat()
352 * invokation. 354 * invocation.
353 * 355 *
354 * We use prev_priority as a measure of how much stress page reclaim is 356 * We use prev_priority as a measure of how much stress page reclaim is
355 * under - it drives the swappiness decision: whether to unmap mapped 357 * under - it drives the swappiness decision: whether to unmap mapped
@@ -425,7 +427,6 @@ struct zone {
425} ____cacheline_internodealigned_in_smp; 427} ____cacheline_internodealigned_in_smp;
426 428
427typedef enum { 429typedef enum {
428 ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */
429 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 430 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
430 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 431 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
431} zone_flags_t; 432} zone_flags_t;
@@ -445,11 +446,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
445 clear_bit(flag, &zone->flags); 446 clear_bit(flag, &zone->flags);
446} 447}
447 448
448static inline int zone_is_all_unreclaimable(const struct zone *zone)
449{
450 return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
451}
452
453static inline int zone_is_reclaim_locked(const struct zone *zone) 449static inline int zone_is_reclaim_locked(const struct zone *zone)
454{ 450{
455 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 451 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
@@ -620,7 +616,9 @@ typedef struct pglist_data {
620 struct page_cgroup *node_page_cgroup; 616 struct page_cgroup *node_page_cgroup;
621#endif 617#endif
622#endif 618#endif
619#ifndef CONFIG_NO_BOOTMEM
623 struct bootmem_data *bdata; 620 struct bootmem_data *bdata;
621#endif
624#ifdef CONFIG_MEMORY_HOTPLUG 622#ifdef CONFIG_MEMORY_HOTPLUG
625 /* 623 /*
626 * Must be held any time you expect node_start_pfn, node_present_pages 624 * Must be held any time you expect node_start_pfn, node_present_pages
@@ -652,9 +650,10 @@ typedef struct pglist_data {
652 650
653#include <linux/memory_hotplug.h> 651#include <linux/memory_hotplug.h>
654 652
653extern struct mutex zonelists_mutex;
655void get_zone_counts(unsigned long *active, unsigned long *inactive, 654void get_zone_counts(unsigned long *active, unsigned long *inactive,
656 unsigned long *free); 655 unsigned long *free);
657void build_all_zonelists(void); 656void build_all_zonelists(void *data);
658void wakeup_kswapd(struct zone *zone, int order); 657void wakeup_kswapd(struct zone *zone, int order);
659int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 658int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
660 int classzone_idx, int alloc_flags); 659 int classzone_idx, int alloc_flags);
@@ -672,6 +671,12 @@ void memory_present(int nid, unsigned long start, unsigned long end);
672static inline void memory_present(int nid, unsigned long start, unsigned long end) {} 671static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
673#endif 672#endif
674 673
674#ifdef CONFIG_HAVE_MEMORYLESS_NODES
675int local_memory_node(int node_id);
676#else
677static inline int local_memory_node(int node_id) { return node_id; };
678#endif
679
675#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE 680#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
676unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); 681unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
677#endif 682#endif
@@ -983,7 +988,7 @@ struct mem_section {
983#endif 988#endif
984 989
985#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT) 990#define SECTION_NR_TO_ROOT(sec) ((sec) / SECTIONS_PER_ROOT)
986#define NR_SECTION_ROOTS (NR_MEM_SECTIONS / SECTIONS_PER_ROOT) 991#define NR_SECTION_ROOTS DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
987#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1) 992#define SECTION_ROOT_MASK (SECTIONS_PER_ROOT - 1)
988 993
989#ifdef CONFIG_SPARSEMEM_EXTREME 994#ifdef CONFIG_SPARSEMEM_EXTREME