aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bee53495a829..cd5dc3faaa57 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1780,6 +1780,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1780 set_bit(PGDAT_WRITEBACK, &pgdat->flags); 1780 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
1781 1781
1782 /* 1782 /*
1783 * If dirty pages are scanned that are not queued for IO, it
1784 * implies that flushers are not doing their job. This can
1785 * happen when memory pressure pushes dirty pages to the end of
1786 * the LRU before the dirty limits are breached and the dirty
1787 * data has expired. It can also happen when the proportion of
1788 * dirty pages grows not through writes but through memory
1789 * pressure reclaiming all the clean cache. And in some cases,
1790 * the flushers simply cannot keep up with the allocation
1791 * rate. Nudge the flusher threads in case they are asleep.
1792 */
1793 if (stat.nr_unqueued_dirty == nr_taken)
1794 wakeup_flusher_threads(WB_REASON_VMSCAN);
1795
1796 /*
1783 * Legacy memcg will stall in page writeback so avoid forcibly 1797 * Legacy memcg will stall in page writeback so avoid forcibly
1784 * stalling here. 1798 * stalling here.
1785 */ 1799 */
@@ -1791,22 +1805,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1791 if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested) 1805 if (stat.nr_dirty && stat.nr_dirty == stat.nr_congested)
1792 set_bit(PGDAT_CONGESTED, &pgdat->flags); 1806 set_bit(PGDAT_CONGESTED, &pgdat->flags);
1793 1807
1794 /* 1808 /* Allow kswapd to start writing pages during reclaim. */
1795 * If dirty pages are scanned that are not queued for IO, it 1809 if (stat.nr_unqueued_dirty == nr_taken)
1796 * implies that flushers are not doing their job. This can
1797 * happen when memory pressure pushes dirty pages to the end of
1798 * the LRU before the dirty limits are breached and the dirty
1799 * data has expired. It can also happen when the proportion of
1800 * dirty pages grows not through writes but through memory
1801 * pressure reclaiming all the clean cache. And in some cases,
1802 * the flushers simply cannot keep up with the allocation
1803 * rate. Nudge the flusher threads in case they are asleep, but
1804 * also allow kswapd to start writing pages during reclaim.
1805 */
1806 if (stat.nr_unqueued_dirty == nr_taken) {
1807 wakeup_flusher_threads(WB_REASON_VMSCAN);
1808 set_bit(PGDAT_DIRTY, &pgdat->flags); 1810 set_bit(PGDAT_DIRTY, &pgdat->flags);
1809 }
1810 1811
1811 /* 1812 /*
1812 * If kswapd scans pages marked marked for immediate 1813 * If kswapd scans pages marked marked for immediate