diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 53e4534885ad..5a8776eb0f43 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1439,6 +1439,19 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list) | |||
1439 | } | 1439 | } |
1440 | 1440 | ||
1441 | /* | 1441 | /* |
1442 | * If a kernel thread (such as nfsd for loop-back mounts) services | ||
1443 | * a backing device by writing to the page cache it sets PF_LESS_THROTTLE. | ||
1444 | * In that case we should only throttle if the backing device it is | ||
1445 | * writing to is congested. In other cases it is safe to throttle. | ||
1446 | */ | ||
1447 | static int current_may_throttle(void) | ||
1448 | { | ||
1449 | return !(current->flags & PF_LESS_THROTTLE) || | ||
1450 | current->backing_dev_info == NULL || | ||
1451 | bdi_write_congested(current->backing_dev_info); | ||
1452 | } | ||
1453 | |||
1454 | /* | ||
1442 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number | 1455 | * shrink_inactive_list() is a helper for shrink_zone(). It returns the number |
1443 | * of reclaimed pages | 1456 | * of reclaimed pages |
1444 | */ | 1457 | */ |
@@ -1566,7 +1579,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1566 | * implies that pages are cycling through the LRU faster than | 1579 | * implies that pages are cycling through the LRU faster than |
1567 | * they are written so also forcibly stall. | 1580 | * they are written so also forcibly stall. |
1568 | */ | 1581 | */ |
1569 | if (nr_unqueued_dirty == nr_taken || nr_immediate) | 1582 | if ((nr_unqueued_dirty == nr_taken || nr_immediate) && |
1583 | current_may_throttle()) | ||
1570 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 1584 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
1571 | } | 1585 | } |
1572 | 1586 | ||
@@ -1575,7 +1589,8 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, | |||
1575 | * is congested. Allow kswapd to continue until it starts encountering | 1589 | * is congested. Allow kswapd to continue until it starts encountering |
1576 | * unqueued dirty pages or cycling through the LRU too quickly. | 1590 | * unqueued dirty pages or cycling through the LRU too quickly. |
1577 | */ | 1591 | */ |
1578 | if (!sc->hibernation_mode && !current_is_kswapd()) | 1592 | if (!sc->hibernation_mode && !current_is_kswapd() && |
1593 | current_may_throttle()) | ||
1579 | wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); | 1594 | wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); |
1580 | 1595 | ||
1581 | trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, | 1596 | trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, |