diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 29 | ||||
-rw-r--r-- | mm/swap.c | 34 | ||||
-rw-r--r-- | mm/vmscan.c | 27 |
3 files changed, 67 insertions, 23 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 313247e6c503..7b7f4dc05035 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -103,6 +103,8 @@ struct mem_cgroup_per_zone { | |||
103 | */ | 103 | */ |
104 | struct list_head lists[NR_LRU_LISTS]; | 104 | struct list_head lists[NR_LRU_LISTS]; |
105 | unsigned long count[NR_LRU_LISTS]; | 105 | unsigned long count[NR_LRU_LISTS]; |
106 | |||
107 | struct zone_reclaim_stat reclaim_stat; | ||
106 | }; | 108 | }; |
107 | /* Macro for accessing counter */ | 109 | /* Macro for accessing counter */ |
108 | #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) | 110 | #define MEM_CGROUP_ZSTAT(mz, idx) ((mz)->count[(idx)]) |
@@ -458,6 +460,33 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | |||
458 | return MEM_CGROUP_ZSTAT(mz, lru); | 460 | return MEM_CGROUP_ZSTAT(mz, lru); |
459 | } | 461 | } |
460 | 462 | ||
463 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | ||
464 | struct zone *zone) | ||
465 | { | ||
466 | int nid = zone->zone_pgdat->node_id; | ||
467 | int zid = zone_idx(zone); | ||
468 | struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid); | ||
469 | |||
470 | return &mz->reclaim_stat; | ||
471 | } | ||
472 | |||
473 | struct zone_reclaim_stat * | ||
474 | mem_cgroup_get_reclaim_stat_from_page(struct page *page) | ||
475 | { | ||
476 | struct page_cgroup *pc; | ||
477 | struct mem_cgroup_per_zone *mz; | ||
478 | |||
479 | if (mem_cgroup_disabled()) | ||
480 | return NULL; | ||
481 | |||
482 | pc = lookup_page_cgroup(page); | ||
483 | mz = page_cgroup_zoneinfo(pc); | ||
484 | if (!mz) | ||
485 | return NULL; | ||
486 | |||
487 | return &mz->reclaim_stat; | ||
488 | } | ||
489 | |||
461 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | 490 | unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, |
462 | struct list_head *dst, | 491 | struct list_head *dst, |
463 | unsigned long *scanned, int order, | 492 | unsigned long *scanned, int order, |
@@ -151,13 +151,32 @@ void rotate_reclaimable_page(struct page *page) | |||
151 | } | 151 | } |
152 | } | 152 | } |
153 | 153 | ||
154 | static void update_page_reclaim_stat(struct zone *zone, struct page *page, | ||
155 | int file, int rotated) | ||
156 | { | ||
157 | struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | ||
158 | struct zone_reclaim_stat *memcg_reclaim_stat; | ||
159 | |||
160 | memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); | ||
161 | |||
162 | reclaim_stat->recent_scanned[file]++; | ||
163 | if (rotated) | ||
164 | reclaim_stat->recent_rotated[file]++; | ||
165 | |||
166 | if (!memcg_reclaim_stat) | ||
167 | return; | ||
168 | |||
169 | memcg_reclaim_stat->recent_scanned[file]++; | ||
170 | if (rotated) | ||
171 | memcg_reclaim_stat->recent_rotated[file]++; | ||
172 | } | ||
173 | |||
154 | /* | 174 | /* |
155 | * FIXME: speed this up? | 175 | * FIXME: speed this up? |
156 | */ | 176 | */ |
157 | void activate_page(struct page *page) | 177 | void activate_page(struct page *page) |
158 | { | 178 | { |
159 | struct zone *zone = page_zone(page); | 179 | struct zone *zone = page_zone(page); |
160 | struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | ||
161 | 180 | ||
162 | spin_lock_irq(&zone->lru_lock); | 181 | spin_lock_irq(&zone->lru_lock); |
163 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 182 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
@@ -170,8 +189,7 @@ void activate_page(struct page *page) | |||
170 | add_page_to_lru_list(zone, page, lru); | 189 | add_page_to_lru_list(zone, page, lru); |
171 | __count_vm_event(PGACTIVATE); | 190 | __count_vm_event(PGACTIVATE); |
172 | 191 | ||
173 | reclaim_stat->recent_rotated[!!file]++; | 192 | update_page_reclaim_stat(zone, page, !!file, 1); |
174 | reclaim_stat->recent_scanned[!!file]++; | ||
175 | } | 193 | } |
176 | spin_unlock_irq(&zone->lru_lock); | 194 | spin_unlock_irq(&zone->lru_lock); |
177 | } | 195 | } |
@@ -386,7 +404,6 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
386 | { | 404 | { |
387 | int i; | 405 | int i; |
388 | struct zone *zone = NULL; | 406 | struct zone *zone = NULL; |
389 | struct zone_reclaim_stat *reclaim_stat = NULL; | ||
390 | 407 | ||
391 | VM_BUG_ON(is_unevictable_lru(lru)); | 408 | VM_BUG_ON(is_unevictable_lru(lru)); |
392 | 409 | ||
@@ -394,24 +411,23 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
394 | struct page *page = pvec->pages[i]; | 411 | struct page *page = pvec->pages[i]; |
395 | struct zone *pagezone = page_zone(page); | 412 | struct zone *pagezone = page_zone(page); |
396 | int file; | 413 | int file; |
414 | int active; | ||
397 | 415 | ||
398 | if (pagezone != zone) { | 416 | if (pagezone != zone) { |
399 | if (zone) | 417 | if (zone) |
400 | spin_unlock_irq(&zone->lru_lock); | 418 | spin_unlock_irq(&zone->lru_lock); |
401 | zone = pagezone; | 419 | zone = pagezone; |
402 | reclaim_stat = &zone->reclaim_stat; | ||
403 | spin_lock_irq(&zone->lru_lock); | 420 | spin_lock_irq(&zone->lru_lock); |
404 | } | 421 | } |
405 | VM_BUG_ON(PageActive(page)); | 422 | VM_BUG_ON(PageActive(page)); |
406 | VM_BUG_ON(PageUnevictable(page)); | 423 | VM_BUG_ON(PageUnevictable(page)); |
407 | VM_BUG_ON(PageLRU(page)); | 424 | VM_BUG_ON(PageLRU(page)); |
408 | SetPageLRU(page); | 425 | SetPageLRU(page); |
426 | active = is_active_lru(lru); | ||
409 | file = is_file_lru(lru); | 427 | file = is_file_lru(lru); |
410 | reclaim_stat->recent_scanned[file]++; | 428 | if (active) |
411 | if (is_active_lru(lru)) { | ||
412 | SetPageActive(page); | 429 | SetPageActive(page); |
413 | reclaim_stat->recent_rotated[file]++; | 430 | update_page_reclaim_stat(zone, page, file, active); |
414 | } | ||
415 | add_page_to_lru_list(zone, page, lru); | 431 | add_page_to_lru_list(zone, page, lru); |
416 | } | 432 | } |
417 | if (zone) | 433 | if (zone) |
diff --git a/mm/vmscan.c b/mm/vmscan.c index d958d624d3ae..56fc7abe4d23 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -133,6 +133,9 @@ static DECLARE_RWSEM(shrinker_rwsem); | |||
133 | static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, | 133 | static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, |
134 | struct scan_control *sc) | 134 | struct scan_control *sc) |
135 | { | 135 | { |
136 | if (!scan_global_lru(sc)) | ||
137 | return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone); | ||
138 | |||
136 | return &zone->reclaim_stat; | 139 | return &zone->reclaim_stat; |
137 | } | 140 | } |
138 | 141 | ||
@@ -1087,17 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1087 | __mod_zone_page_state(zone, NR_INACTIVE_ANON, | 1090 | __mod_zone_page_state(zone, NR_INACTIVE_ANON, |
1088 | -count[LRU_INACTIVE_ANON]); | 1091 | -count[LRU_INACTIVE_ANON]); |
1089 | 1092 | ||
1090 | if (scan_global_lru(sc)) { | 1093 | if (scan_global_lru(sc)) |
1091 | zone->pages_scanned += nr_scan; | 1094 | zone->pages_scanned += nr_scan; |
1092 | reclaim_stat->recent_scanned[0] += | 1095 | |
1093 | count[LRU_INACTIVE_ANON]; | 1096 | reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON]; |
1094 | reclaim_stat->recent_scanned[0] += | 1097 | reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON]; |
1095 | count[LRU_ACTIVE_ANON]; | 1098 | reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE]; |
1096 | reclaim_stat->recent_scanned[1] += | 1099 | reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE]; |
1097 | count[LRU_INACTIVE_FILE]; | 1100 | |
1098 | reclaim_stat->recent_scanned[1] += | ||
1099 | count[LRU_ACTIVE_FILE]; | ||
1100 | } | ||
1101 | spin_unlock_irq(&zone->lru_lock); | 1101 | spin_unlock_irq(&zone->lru_lock); |
1102 | 1102 | ||
1103 | nr_scanned += nr_scan; | 1103 | nr_scanned += nr_scan; |
@@ -1155,7 +1155,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1155 | SetPageLRU(page); | 1155 | SetPageLRU(page); |
1156 | lru = page_lru(page); | 1156 | lru = page_lru(page); |
1157 | add_page_to_lru_list(zone, page, lru); | 1157 | add_page_to_lru_list(zone, page, lru); |
1158 | if (PageActive(page) && scan_global_lru(sc)) { | 1158 | if (PageActive(page)) { |
1159 | int file = !!page_is_file_cache(page); | 1159 | int file = !!page_is_file_cache(page); |
1160 | reclaim_stat->recent_rotated[file]++; | 1160 | reclaim_stat->recent_rotated[file]++; |
1161 | } | 1161 | } |
@@ -1230,8 +1230,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1230 | */ | 1230 | */ |
1231 | if (scan_global_lru(sc)) { | 1231 | if (scan_global_lru(sc)) { |
1232 | zone->pages_scanned += pgscanned; | 1232 | zone->pages_scanned += pgscanned; |
1233 | reclaim_stat->recent_scanned[!!file] += pgmoved; | ||
1234 | } | 1233 | } |
1234 | reclaim_stat->recent_scanned[!!file] += pgmoved; | ||
1235 | 1235 | ||
1236 | if (file) | 1236 | if (file) |
1237 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); | 1237 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); |
@@ -1272,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1272 | * This helps balance scan pressure between file and anonymous | 1272 | * This helps balance scan pressure between file and anonymous |
1273 | * pages in get_scan_ratio. | 1273 | * pages in get_scan_ratio. |
1274 | */ | 1274 | */ |
1275 | if (scan_global_lru(sc)) | 1275 | reclaim_stat->recent_rotated[!!file] += pgmoved; |
1276 | reclaim_stat->recent_rotated[!!file] += pgmoved; | ||
1277 | 1276 | ||
1278 | while (!list_empty(&l_inactive)) { | 1277 | while (!list_empty(&l_inactive)) { |
1279 | page = lru_to_page(&l_inactive); | 1278 | page = lru_to_page(&l_inactive); |