diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 16 |
1 files changed, 6 insertions, 10 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index e61445dce04e..8286938c70de 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
973 | * caller can stall after page list has been processed. | 973 | * caller can stall after page list has been processed. |
974 | * | 974 | * |
975 | * 2) Global or new memcg reclaim encounters a page that is | 975 | * 2) Global or new memcg reclaim encounters a page that is |
976 | * not marked for immediate reclaim or the caller does not | 976 | * not marked for immediate reclaim, or the caller does not |
977 | * have __GFP_IO. In this case mark the page for immediate | 977 | * have __GFP_FS (or __GFP_IO if it's simply going to swap, |
978 | * not to fs). In this case mark the page for immediate | ||
978 | * reclaim and continue scanning. | 979 | * reclaim and continue scanning. |
979 | * | 980 | * |
980 | * __GFP_IO is checked because a loop driver thread might | 981 | * Require may_enter_fs because we would wait on fs, which |
982 | * may not have submitted IO yet. And the loop driver might | ||
981 | * enter reclaim, and deadlock if it waits on a page for | 983 | * enter reclaim, and deadlock if it waits on a page for |
982 | * which it is needed to do the write (loop masks off | 984 | * which it is needed to do the write (loop masks off |
983 | * __GFP_IO|__GFP_FS for this reason); but more thought | 985 | * __GFP_IO|__GFP_FS for this reason); but more thought |
984 | * would probably show more reasons. | 986 | * would probably show more reasons. |
985 | * | 987 | * |
986 | * Don't require __GFP_FS, since we're not going into the | ||
987 | * FS, just waiting on its writeback completion. Worryingly, | ||
988 | * ext4 gfs2 and xfs allocate pages with | ||
989 | * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing | ||
990 | * may_enter_fs here is liable to OOM on them. | ||
991 | * | ||
992 | * 3) Legacy memcg encounters a page that is not already marked | 988 | * 3) Legacy memcg encounters a page that is not already marked |
993 | * PageReclaim. memcg does not have any dirty pages | 989 | * PageReclaim. memcg does not have any dirty pages |
994 | * throttling so we could easily OOM just because too many | 990 | * throttling so we could easily OOM just because too many |
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, | |||
1005 | 1001 | ||
1006 | /* Case 2 above */ | 1002 | /* Case 2 above */ |
1007 | } else if (sane_reclaim(sc) || | 1003 | } else if (sane_reclaim(sc) || |
1008 | !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { | 1004 | !PageReclaim(page) || !may_enter_fs) { |
1009 | /* | 1005 | /* |
1010 | * This is slightly racy - end_page_writeback() | 1006 | * This is slightly racy - end_page_writeback() |
1011 | * might have just cleared PageReclaim, then | 1007 | * might have just cleared PageReclaim, then |