diff options
author | Hugh Dickins <hughd@google.com> | 2012-05-29 18:06:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:25 -0400 |
commit | 89abfab133ef1f5902abafb744df72793213ac19 (patch) | |
tree | 29df29e2a34a0af3649417d2e430480c7e7e5fa1 /include/linux/mmzone.h | |
parent | c3c787e8c38557ccf44c670d73aebe630a2b1479 (diff) |
mm/memcg: move reclaim_stat into lruvec
With mem_cgroup_disabled() now explicit, it becomes clear that the
zone_reclaim_stat structure actually belongs in lruvec, per-zone when
memcg is disabled but per-memcg per-zone when it's enabled.
We can delete mem_cgroup_get_reclaim_stat(), and change
update_page_reclaim_stat() to update just the one set of stats, the one
which get_scan_count() will actually use.
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mmzone.h')
-rw-r--r-- | include/linux/mmzone.h | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4871e31ae277..1b89861eedc0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -185,8 +185,22 @@ static inline int is_unevictable_lru(enum lru_list lru) | |||
185 | return (lru == LRU_UNEVICTABLE); | 185 | return (lru == LRU_UNEVICTABLE); |
186 | } | 186 | } |
187 | 187 | ||
188 | struct zone_reclaim_stat { | ||
189 | /* | ||
190 | * The pageout code in vmscan.c keeps track of how many of the | ||
191 | * mem/swap backed and file backed pages are refeferenced. | ||
192 | * The higher the rotated/scanned ratio, the more valuable | ||
193 | * that cache is. | ||
194 | * | ||
195 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
196 | */ | ||
197 | unsigned long recent_rotated[2]; | ||
198 | unsigned long recent_scanned[2]; | ||
199 | }; | ||
200 | |||
188 | struct lruvec { | 201 | struct lruvec { |
189 | struct list_head lists[NR_LRU_LISTS]; | 202 | struct list_head lists[NR_LRU_LISTS]; |
203 | struct zone_reclaim_stat reclaim_stat; | ||
190 | }; | 204 | }; |
191 | 205 | ||
192 | /* Mask used at gathering information at once (see memcontrol.c) */ | 206 | /* Mask used at gathering information at once (see memcontrol.c) */ |
@@ -313,19 +327,6 @@ enum zone_type { | |||
313 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 327 | #error ZONES_SHIFT -- too many zones configured adjust calculation |
314 | #endif | 328 | #endif |
315 | 329 | ||
316 | struct zone_reclaim_stat { | ||
317 | /* | ||
318 | * The pageout code in vmscan.c keeps track of how many of the | ||
319 | * mem/swap backed and file backed pages are refeferenced. | ||
320 | * The higher the rotated/scanned ratio, the more valuable | ||
321 | * that cache is. | ||
322 | * | ||
323 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
324 | */ | ||
325 | unsigned long recent_rotated[2]; | ||
326 | unsigned long recent_scanned[2]; | ||
327 | }; | ||
328 | |||
329 | struct zone { | 330 | struct zone { |
330 | /* Fields commonly accessed by the page allocator */ | 331 | /* Fields commonly accessed by the page allocator */ |
331 | 332 | ||
@@ -407,8 +408,6 @@ struct zone { | |||
407 | spinlock_t lru_lock; | 408 | spinlock_t lru_lock; |
408 | struct lruvec lruvec; | 409 | struct lruvec lruvec; |
409 | 410 | ||
410 | struct zone_reclaim_stat reclaim_stat; | ||
411 | |||
412 | unsigned long pages_scanned; /* since last reclaim */ | 411 | unsigned long pages_scanned; /* since last reclaim */ |
413 | unsigned long flags; /* zone flags, see below */ | 412 | unsigned long flags; /* zone flags, see below */ |
414 | 413 | ||