aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h23
1 files changed, 23 insertions, 0 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 6e6e62648a4d..39c24ebe9cfd 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -104,6 +104,8 @@ enum zone_stat_item {
104 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */ 104 NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
105 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */ 105 NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
106 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ 106 NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
107 NR_DIRTIED, /* page dirtyings since bootup */
108 NR_WRITTEN, /* page writings since bootup */
107#ifdef CONFIG_NUMA 109#ifdef CONFIG_NUMA
108 NUMA_HIT, /* allocated in intended node */ 110 NUMA_HIT, /* allocated in intended node */
109 NUMA_MISS, /* allocated in non intended node */ 111 NUMA_MISS, /* allocated in non intended node */
@@ -284,6 +286,13 @@ struct zone {
284 unsigned long watermark[NR_WMARK]; 286 unsigned long watermark[NR_WMARK];
285 287
286 /* 288 /*
289 * When free pages are below this point, additional steps are taken
290 * when reading the number of free pages to avoid per-cpu counter
291 * drift allowing watermarks to be breached
292 */
293 unsigned long percpu_drift_mark;
294
295 /*
287 * We don't know if the memory that we're going to allocate will be freeable 296 * We don't know if the memory that we're going to allocate will be freeable
288 * or/and it will be released eventually, so to avoid totally wasting several 297 * or/and it will be released eventually, so to avoid totally wasting several
289 * GB of ram we must reserve some of the lower zone memory (otherwise we risk 298 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
@@ -414,6 +423,9 @@ struct zone {
414typedef enum { 423typedef enum {
415 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ 424 ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */
416 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ 425 ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */
426 ZONE_CONGESTED, /* zone has many dirty pages backed by
427 * a congested BDI
428 */
417} zone_flags_t; 429} zone_flags_t;
418 430
419static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) 431static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
@@ -431,6 +443,11 @@ static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
431 clear_bit(flag, &zone->flags); 443 clear_bit(flag, &zone->flags);
432} 444}
433 445
446static inline int zone_is_reclaim_congested(const struct zone *zone)
447{
448 return test_bit(ZONE_CONGESTED, &zone->flags);
449}
450
434static inline int zone_is_reclaim_locked(const struct zone *zone) 451static inline int zone_is_reclaim_locked(const struct zone *zone)
435{ 452{
436 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); 453 return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
@@ -441,6 +458,12 @@ static inline int zone_is_oom_locked(const struct zone *zone)
441 return test_bit(ZONE_OOM_LOCKED, &zone->flags); 458 return test_bit(ZONE_OOM_LOCKED, &zone->flags);
442} 459}
443 460
461#ifdef CONFIG_SMP
462unsigned long zone_nr_free_pages(struct zone *zone);
463#else
464#define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES)
465#endif /* CONFIG_SMP */
466
444/* 467/*
445 * The "priority" of VM scanning is how much of the queues we will scan in one 468 * The "priority" of VM scanning is how much of the queues we will scan in one
446 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the 469 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the