aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mmzone.h
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2012-06-29 08:45:58 -0400
committerJiri Kosina <jkosina@suse.cz>2012-06-29 08:45:58 -0400
commit59f91e5dd0504dc0ebfaa0b6f3a55e6931f96266 (patch)
treeb913718405d44a921905ac71044fbde410256865 /include/linux/mmzone.h
parent57bdfdd80077addf518a9b90c4a66890efc4f70e (diff)
parent89abfab133ef1f5902abafb744df72793213ac19 (diff)
Merge branch 'master' into for-next
Conflicts: include/linux/mmzone.h Synced with Linus' tree so that trivial patch can be applied on top of up-to-date code properly. Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r--include/linux/mmzone.h29
1 files changed, 14 insertions, 15 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 54631776dff2..588c5cb2851d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -185,8 +185,22 @@ static inline int is_unevictable_lru(enum lru_list lru)
185 return (lru == LRU_UNEVICTABLE); 185 return (lru == LRU_UNEVICTABLE);
186} 186}
187 187
188struct zone_reclaim_stat {
189 /*
190 * The pageout code in vmscan.c keeps track of how many of the
191 * mem/swap backed and file backed pages are referenced.
192 * The higher the rotated/scanned ratio, the more valuable
193 * that cache is.
194 *
195 * The anon LRU stats live in [0], file LRU stats in [1]
196 */
197 unsigned long recent_rotated[2];
198 unsigned long recent_scanned[2];
199};
200
188struct lruvec { 201struct lruvec {
189 struct list_head lists[NR_LRU_LISTS]; 202 struct list_head lists[NR_LRU_LISTS];
203 struct zone_reclaim_stat reclaim_stat;
190}; 204};
191 205
192/* Mask used at gathering information at once (see memcontrol.c) */ 206/* Mask used at gathering information at once (see memcontrol.c) */
@@ -313,19 +327,6 @@ enum zone_type {
313#error ZONES_SHIFT -- too many zones configured adjust calculation 327#error ZONES_SHIFT -- too many zones configured adjust calculation
314#endif 328#endif
315 329
316struct zone_reclaim_stat {
317 /*
318 * The pageout code in vmscan.c keeps track of how many of the
319 * mem/swap backed and file backed pages are referenced.
320 * The higher the rotated/scanned ratio, the more valuable
321 * that cache is.
322 *
323 * The anon LRU stats live in [0], file LRU stats in [1]
324 */
325 unsigned long recent_rotated[2];
326 unsigned long recent_scanned[2];
327};
328
329struct zone { 330struct zone {
330 /* Fields commonly accessed by the page allocator */ 331 /* Fields commonly accessed by the page allocator */
331 332
@@ -407,8 +408,6 @@ struct zone {
407 spinlock_t lru_lock; 408 spinlock_t lru_lock;
408 struct lruvec lruvec; 409 struct lruvec lruvec;
409 410
410 struct zone_reclaim_stat reclaim_stat;
411
412 unsigned long pages_scanned; /* since last reclaim */ 411 unsigned long pages_scanned; /* since last reclaim */
413 unsigned long flags; /* zone flags, see below */ 412 unsigned long flags; /* zone flags, see below */
414 413