diff options
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 47 |
1 files changed, 40 insertions, 7 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 156e18f3919b..59a4c8fd6ebd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -82,21 +82,23 @@ enum zone_stat_item { | |||
82 | /* First 128 byte cacheline (assuming 64 bit words) */ | 82 | /* First 128 byte cacheline (assuming 64 bit words) */ |
83 | NR_FREE_PAGES, | 83 | NR_FREE_PAGES, |
84 | NR_LRU_BASE, | 84 | NR_LRU_BASE, |
85 | NR_INACTIVE = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ | 85 | NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */ |
86 | NR_ACTIVE, /* " " " " " */ | 86 | NR_ACTIVE_ANON, /* " " " " " */ |
87 | NR_INACTIVE_FILE, /* " " " " " */ | ||
88 | NR_ACTIVE_FILE, /* " " " " " */ | ||
87 | NR_ANON_PAGES, /* Mapped anonymous pages */ | 89 | NR_ANON_PAGES, /* Mapped anonymous pages */ |
88 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. | 90 | NR_FILE_MAPPED, /* pagecache pages mapped into pagetables. |
89 | only modified from process context */ | 91 | only modified from process context */ |
90 | NR_FILE_PAGES, | 92 | NR_FILE_PAGES, |
91 | NR_FILE_DIRTY, | 93 | NR_FILE_DIRTY, |
92 | NR_WRITEBACK, | 94 | NR_WRITEBACK, |
93 | /* Second 128 byte cacheline */ | ||
94 | NR_SLAB_RECLAIMABLE, | 95 | NR_SLAB_RECLAIMABLE, |
95 | NR_SLAB_UNRECLAIMABLE, | 96 | NR_SLAB_UNRECLAIMABLE, |
96 | NR_PAGETABLE, /* used for pagetables */ | 97 | NR_PAGETABLE, /* used for pagetables */ |
97 | NR_UNSTABLE_NFS, /* NFS unstable pages */ | 98 | NR_UNSTABLE_NFS, /* NFS unstable pages */ |
98 | NR_BOUNCE, | 99 | NR_BOUNCE, |
99 | NR_VMSCAN_WRITE, | 100 | NR_VMSCAN_WRITE, |
101 | /* Second 128 byte cacheline */ | ||
100 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ | 102 | NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ |
101 | #ifdef CONFIG_NUMA | 103 | #ifdef CONFIG_NUMA |
102 | NUMA_HIT, /* allocated in intended node */ | 104 | NUMA_HIT, /* allocated in intended node */ |
@@ -108,17 +110,36 @@ enum zone_stat_item { | |||
108 | #endif | 110 | #endif |
109 | NR_VM_ZONE_STAT_ITEMS }; | 111 | NR_VM_ZONE_STAT_ITEMS }; |
110 | 112 | ||
113 | /* | ||
114 | * We do arithmetic on the LRU lists in various places in the code, | ||
115 | * so it is important to keep the active lists LRU_ACTIVE higher in | ||
116 | * the array than the corresponding inactive lists, and to keep | ||
117 | * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists. | ||
118 | * | ||
119 | * This has to be kept in sync with the statistics in zone_stat_item | ||
120 | * above and the descriptions in vmstat_text in mm/vmstat.c | ||
121 | */ | ||
122 | #define LRU_BASE 0 | ||
123 | #define LRU_ACTIVE 1 | ||
124 | #define LRU_FILE 2 | ||
125 | |||
111 | enum lru_list { | 126 | enum lru_list { |
112 | LRU_BASE, | 127 | LRU_INACTIVE_ANON = LRU_BASE, |
113 | LRU_INACTIVE=LRU_BASE, /* must match order of NR_[IN]ACTIVE */ | 128 | LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE, |
114 | LRU_ACTIVE, /* " " " " " */ | 129 | LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE, |
130 | LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE, | ||
115 | NR_LRU_LISTS }; | 131 | NR_LRU_LISTS }; |
116 | 132 | ||
117 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) | 133 | #define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++) |
118 | 134 | ||
135 | static inline int is_file_lru(enum lru_list l) | ||
136 | { | ||
137 | return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE); | ||
138 | } | ||
139 | |||
119 | static inline int is_active_lru(enum lru_list l) | 140 | static inline int is_active_lru(enum lru_list l) |
120 | { | 141 | { |
121 | return (l == LRU_ACTIVE); | 142 | return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE); |
122 | } | 143 | } |
123 | 144 | ||
124 | struct per_cpu_pages { | 145 | struct per_cpu_pages { |
@@ -269,6 +290,18 @@ struct zone { | |||
269 | struct list_head list; | 290 | struct list_head list; |
270 | unsigned long nr_scan; | 291 | unsigned long nr_scan; |
271 | } lru[NR_LRU_LISTS]; | 292 | } lru[NR_LRU_LISTS]; |
293 | |||
294 | /* | ||
295 | * The pageout code in vmscan.c keeps track of how many of the | ||
296 | * mem/swap backed and file backed pages are refeferenced. | ||
297 | * The higher the rotated/scanned ratio, the more valuable | ||
298 | * that cache is. | ||
299 | * | ||
300 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
301 | */ | ||
302 | unsigned long recent_rotated[2]; | ||
303 | unsigned long recent_scanned[2]; | ||
304 | |||
272 | unsigned long pages_scanned; /* since last reclaim */ | 305 | unsigned long pages_scanned; /* since last reclaim */ |
273 | unsigned long flags; /* zone flags, see below */ | 306 | unsigned long flags; /* zone flags, see below */ |
274 | 307 | ||