aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h17
1 files changed, 0 insertions, 17 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index b4d109e389b8..6e6e62648a4d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -348,21 +348,6 @@ struct zone {
348 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; 348 atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
349 349
350 /* 350 /*
351 * prev_priority holds the scanning priority for this zone. It is
352 * defined as the scanning priority at which we achieved our reclaim
353 * target at the previous try_to_free_pages() or balance_pgdat()
354 * invocation.
355 *
356 * We use prev_priority as a measure of how much stress page reclaim is
357 * under - it drives the swappiness decision: whether to unmap mapped
358 * pages.
359 *
360 * Access to both this field is quite racy even on uniprocessor. But
361 * it is expected to average out OK.
362 */
363 int prev_priority;
364
365 /*
366 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on 351 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
367 * this zone's LRU. Maintained by the pageout code. 352 * this zone's LRU. Maintained by the pageout code.
368 */ 353 */
@@ -651,8 +636,6 @@ typedef struct pglist_data {
651#include <linux/memory_hotplug.h> 636#include <linux/memory_hotplug.h>
652 637
653extern struct mutex zonelists_mutex; 638extern struct mutex zonelists_mutex;
654void get_zone_counts(unsigned long *active, unsigned long *inactive,
655 unsigned long *free);
656void build_all_zonelists(void *data); 639void build_all_zonelists(void *data);
657void wakeup_kswapd(struct zone *zone, int order); 640void wakeup_kswapd(struct zone *zone, int order);
658int zone_watermark_ok(struct zone *z, int order, unsigned long mark, 641int zone_watermark_ok(struct zone *z, int order, unsigned long mark,