diff options
| author | Hugh Dickins <hughd@google.com> | 2012-05-29 18:06:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:25 -0400 |
| commit | 89abfab133ef1f5902abafb744df72793213ac19 (patch) | |
| tree | 29df29e2a34a0af3649417d2e430480c7e7e5fa1 /include/linux | |
| parent | c3c787e8c38557ccf44c670d73aebe630a2b1479 (diff) | |
mm/memcg: move reclaim_stat into lruvec
With mem_cgroup_disabled() now explicit, it becomes clear that the
zone_reclaim_stat structure actually belongs in lruvec, per-zone when
memcg is disabled but per-memcg per-zone when it's enabled.
We can delete mem_cgroup_get_reclaim_stat(), and change
update_page_reclaim_stat() to update just the one set of stats, the one
which get_scan_count() will actually use.
Signed-off-by: Hugh Dickins <hughd@google.com>
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Reviewed-by: Minchan Kim <minchan@kernel.org>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Glauber Costa <glommer@parallels.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/memcontrol.h | 9 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 29 |
2 files changed, 14 insertions, 24 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 18ea0b7baf32..cfe9050ad8da 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -126,8 +126,6 @@ int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, | |||
| 126 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); | 126 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
| 127 | unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, | 127 | unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, |
| 128 | int nid, int zid, unsigned int lrumask); | 128 | int nid, int zid, unsigned int lrumask); |
| 129 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | ||
| 130 | struct zone *zone); | ||
| 131 | struct zone_reclaim_stat* | 129 | struct zone_reclaim_stat* |
| 132 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); | 130 | mem_cgroup_get_reclaim_stat_from_page(struct page *page); |
| 133 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, | 131 | extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, |
| @@ -356,13 +354,6 @@ mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, | |||
| 356 | return 0; | 354 | return 0; |
| 357 | } | 355 | } |
| 358 | 356 | ||
| 359 | |||
| 360 | static inline struct zone_reclaim_stat* | ||
| 361 | mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) | ||
| 362 | { | ||
| 363 | return NULL; | ||
| 364 | } | ||
| 365 | |||
| 366 | static inline struct zone_reclaim_stat* | 357 | static inline struct zone_reclaim_stat* |
| 367 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | 358 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) |
| 368 | { | 359 | { |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 4871e31ae277..1b89861eedc0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -185,8 +185,22 @@ static inline int is_unevictable_lru(enum lru_list lru) | |||
| 185 | return (lru == LRU_UNEVICTABLE); | 185 | return (lru == LRU_UNEVICTABLE); |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | struct zone_reclaim_stat { | ||
| 189 | /* | ||
| 190 | * The pageout code in vmscan.c keeps track of how many of the | ||
| 191 | * mem/swap backed and file backed pages are refeferenced. | ||
| 192 | * The higher the rotated/scanned ratio, the more valuable | ||
| 193 | * that cache is. | ||
| 194 | * | ||
| 195 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
| 196 | */ | ||
| 197 | unsigned long recent_rotated[2]; | ||
| 198 | unsigned long recent_scanned[2]; | ||
| 199 | }; | ||
| 200 | |||
| 188 | struct lruvec { | 201 | struct lruvec { |
| 189 | struct list_head lists[NR_LRU_LISTS]; | 202 | struct list_head lists[NR_LRU_LISTS]; |
| 203 | struct zone_reclaim_stat reclaim_stat; | ||
| 190 | }; | 204 | }; |
| 191 | 205 | ||
| 192 | /* Mask used at gathering information at once (see memcontrol.c) */ | 206 | /* Mask used at gathering information at once (see memcontrol.c) */ |
| @@ -313,19 +327,6 @@ enum zone_type { | |||
| 313 | #error ZONES_SHIFT -- too many zones configured adjust calculation | 327 | #error ZONES_SHIFT -- too many zones configured adjust calculation |
| 314 | #endif | 328 | #endif |
| 315 | 329 | ||
| 316 | struct zone_reclaim_stat { | ||
| 317 | /* | ||
| 318 | * The pageout code in vmscan.c keeps track of how many of the | ||
| 319 | * mem/swap backed and file backed pages are refeferenced. | ||
| 320 | * The higher the rotated/scanned ratio, the more valuable | ||
| 321 | * that cache is. | ||
| 322 | * | ||
| 323 | * The anon LRU stats live in [0], file LRU stats in [1] | ||
| 324 | */ | ||
| 325 | unsigned long recent_rotated[2]; | ||
| 326 | unsigned long recent_scanned[2]; | ||
| 327 | }; | ||
| 328 | |||
| 329 | struct zone { | 330 | struct zone { |
| 330 | /* Fields commonly accessed by the page allocator */ | 331 | /* Fields commonly accessed by the page allocator */ |
| 331 | 332 | ||
| @@ -407,8 +408,6 @@ struct zone { | |||
| 407 | spinlock_t lru_lock; | 408 | spinlock_t lru_lock; |
| 408 | struct lruvec lruvec; | 409 | struct lruvec lruvec; |
| 409 | 410 | ||
| 410 | struct zone_reclaim_stat reclaim_stat; | ||
| 411 | |||
| 412 | unsigned long pages_scanned; /* since last reclaim */ | 411 | unsigned long pages_scanned; /* since last reclaim */ |
| 413 | unsigned long flags; /* zone flags, see below */ | 412 | unsigned long flags; /* zone flags, see below */ |
| 414 | 413 | ||
