diff options
author | Hillf Danton <dhillf@gmail.com> | 2012-03-21 19:34:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 20:54:57 -0400 |
commit | d563c0501bf8702b9b683206c09b9defb37d8a8a (patch) | |
tree | ac3c870c1885337e340d807cd8b5dc0ad8eb0315 /mm | |
parent | 978ea78b65794ef07eb66b9946064dea66b52554 (diff) |
vmscan: handle isolated pages with lru lock released
When shrinking inactive lru list, isolated pages are queued on locally
private list, so the lock-hold time could be reduced if pages are counted
without lock protection.
To achieve that, firstly updating reclaim stat is delayed until the
putback stage, after reacquiring the lru lock.
Secondly, operations related to vm and zone stats are now proteced with
preemption disabled as they are per-cpu operations.
Signed-off-by: Hillf Danton <dhillf@gmail.com>
Acked-by: Hugh Dickins <hughd@google.com>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index ae3bf0a09cdd..57d8ef6ee4dd 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1413,7 +1413,6 @@ update_isolated_counts(struct mem_cgroup_zone *mz, | |||
1413 | unsigned long *nr_anon, | 1413 | unsigned long *nr_anon, |
1414 | unsigned long *nr_file) | 1414 | unsigned long *nr_file) |
1415 | { | 1415 | { |
1416 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); | ||
1417 | struct zone *zone = mz->zone; | 1416 | struct zone *zone = mz->zone; |
1418 | unsigned int count[NR_LRU_LISTS] = { 0, }; | 1417 | unsigned int count[NR_LRU_LISTS] = { 0, }; |
1419 | unsigned long nr_active = 0; | 1418 | unsigned long nr_active = 0; |
@@ -1434,6 +1433,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz, | |||
1434 | count[lru] += numpages; | 1433 | count[lru] += numpages; |
1435 | } | 1434 | } |
1436 | 1435 | ||
1436 | preempt_disable(); | ||
1437 | __count_vm_events(PGDEACTIVATE, nr_active); | 1437 | __count_vm_events(PGDEACTIVATE, nr_active); |
1438 | 1438 | ||
1439 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, | 1439 | __mod_zone_page_state(zone, NR_ACTIVE_FILE, |
@@ -1448,8 +1448,9 @@ update_isolated_counts(struct mem_cgroup_zone *mz, | |||
1448 | *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; | 1448 | *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; |
1449 | *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; | 1449 | *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; |
1450 | 1450 | ||
1451 | reclaim_stat->recent_scanned[0] += *nr_anon; | 1451 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon); |
1452 | reclaim_stat->recent_scanned[1] += *nr_file; | 1452 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file); |
1453 | preempt_enable(); | ||
1453 | } | 1454 | } |
1454 | 1455 | ||
1455 | /* | 1456 | /* |
@@ -1511,6 +1512,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, | |||
1511 | unsigned long nr_writeback = 0; | 1512 | unsigned long nr_writeback = 0; |
1512 | isolate_mode_t isolate_mode = ISOLATE_INACTIVE; | 1513 | isolate_mode_t isolate_mode = ISOLATE_INACTIVE; |
1513 | struct zone *zone = mz->zone; | 1514 | struct zone *zone = mz->zone; |
1515 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); | ||
1514 | 1516 | ||
1515 | while (unlikely(too_many_isolated(zone, file, sc))) { | 1517 | while (unlikely(too_many_isolated(zone, file, sc))) { |
1516 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 1518 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
@@ -1544,19 +1546,13 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, | |||
1544 | __count_zone_vm_events(PGSCAN_DIRECT, zone, | 1546 | __count_zone_vm_events(PGSCAN_DIRECT, zone, |
1545 | nr_scanned); | 1547 | nr_scanned); |
1546 | } | 1548 | } |
1549 | spin_unlock_irq(&zone->lru_lock); | ||
1547 | 1550 | ||
1548 | if (nr_taken == 0) { | 1551 | if (nr_taken == 0) |
1549 | spin_unlock_irq(&zone->lru_lock); | ||
1550 | return 0; | 1552 | return 0; |
1551 | } | ||
1552 | 1553 | ||
1553 | update_isolated_counts(mz, &page_list, &nr_anon, &nr_file); | 1554 | update_isolated_counts(mz, &page_list, &nr_anon, &nr_file); |
1554 | 1555 | ||
1555 | __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon); | ||
1556 | __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file); | ||
1557 | |||
1558 | spin_unlock_irq(&zone->lru_lock); | ||
1559 | |||
1560 | nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority, | 1556 | nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority, |
1561 | &nr_dirty, &nr_writeback); | 1557 | &nr_dirty, &nr_writeback); |
1562 | 1558 | ||
@@ -1569,6 +1565,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, | |||
1569 | 1565 | ||
1570 | spin_lock_irq(&zone->lru_lock); | 1566 | spin_lock_irq(&zone->lru_lock); |
1571 | 1567 | ||
1568 | reclaim_stat->recent_scanned[0] += nr_anon; | ||
1569 | reclaim_stat->recent_scanned[1] += nr_file; | ||
1570 | |||
1572 | if (current_is_kswapd()) | 1571 | if (current_is_kswapd()) |
1573 | __count_vm_events(KSWAPD_STEAL, nr_reclaimed); | 1572 | __count_vm_events(KSWAPD_STEAL, nr_reclaimed); |
1574 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); | 1573 | __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); |