aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h21
1 files changed, 5 insertions, 16 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e60a340fe890..cf9e458e96b0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -184,13 +184,7 @@ struct per_cpu_pageset {
184 s8 stat_threshold; 184 s8 stat_threshold;
185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; 185 s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
186#endif 186#endif
187} ____cacheline_aligned_in_smp; 187};
188
189#ifdef CONFIG_NUMA
190#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
191#else
192#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
193#endif
194 188
195#endif /* !__GENERATING_BOUNDS.H */ 189#endif /* !__GENERATING_BOUNDS.H */
196 190
@@ -306,14 +300,13 @@ struct zone {
306 */ 300 */
307 unsigned long min_unmapped_pages; 301 unsigned long min_unmapped_pages;
308 unsigned long min_slab_pages; 302 unsigned long min_slab_pages;
309 struct per_cpu_pageset *pageset[NR_CPUS];
310#else
311 struct per_cpu_pageset pageset[NR_CPUS];
312#endif 303#endif
304 struct per_cpu_pageset __percpu *pageset;
313 /* 305 /*
314 * free areas of different sizes 306 * free areas of different sizes
315 */ 307 */
316 spinlock_t lock; 308 spinlock_t lock;
309 int all_unreclaimable; /* All pages pinned */
317#ifdef CONFIG_MEMORY_HOTPLUG 310#ifdef CONFIG_MEMORY_HOTPLUG
318 /* see spanned/present_pages for more description */ 311 /* see spanned/present_pages for more description */
319 seqlock_t span_seqlock; 312 seqlock_t span_seqlock;
@@ -425,7 +418,6 @@ struct zone {
425} ____cacheline_internodealigned_in_smp; 418} ____cacheline_internodealigned_in_smp;
426 419
427typedef enum { 420typedef enum {
428 ZONE_ALL_UNRECLAIMABLE, /* all pages pinned */
429 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 421 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
430 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 422 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
431} zone_flags_t; 423} zone_flags_t;
@@ -445,11 +437,6 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
445 clear_bit(flag, &zone->flags); 437 clear_bit(flag, &zone->flags);
446} 438}
447 439
448static inline int zone_is_all_unreclaimable(const struct zone *zone)
449{
450 return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
451}
452
453static inline int zone_is_reclaim_locked(const struct zone *zone) 440static inline int zone_is_reclaim_locked(const struct zone *zone)
454{ 441{
455 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 442 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
@@ -620,7 +607,9 @@ typedef struct pglist_data {
620 struct page_cgroup *node_page_cgroup; 607 struct page_cgroup *node_page_cgroup;
621#endif 608#endif
622#endif 609#endif
610#ifndef CONFIG_NO_BOOTMEM
623 struct bootmem_data *bdata; 611 struct bootmem_data *bdata;
612#endif
624#ifdef CONFIG_MEMORY_HOTPLUG 613#ifdef CONFIG_MEMORY_HOTPLUG
625 /* 614 /*
626 * Must be held any time you expect node_start_pfn, node_present_pages 615 * Must be held any time you expect node_start_pfn, node_present_pages