aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/extent_io.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2015-11-06 19:28:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-06 20:50:42 -0500
commitd0164adc89f6bb374d304ffcc375c6d2652fe67d (patch)
treede1cbe09c86dcd24a4a476f7e0b41af239bbdc29 /fs/btrfs/extent_io.c
parent016c13daa5c9e4827eca703e2f0621c131f2cca3 (diff)
mm, page_alloc: distinguish between being unable to sleep, unwilling to sleep and avoiding waking kswapd
__GFP_WAIT has been used to identify atomic context in callers that hold spinlocks or are in interrupts. They are expected to be high priority and have access one of two watermarks lower than "min" which can be referred to as the "atomic reserve". __GFP_HIGH users get access to the first lower watermark and can be called the "high priority reserve". Over time, callers had a requirement to not block when fallback options were available. Some have abused __GFP_WAIT leading to a situation where an optimisitic allocation with a fallback option can access atomic reserves. This patch uses __GFP_ATOMIC to identify callers that are truely atomic, cannot sleep and have no alternative. High priority users continue to use __GFP_HIGH. __GFP_DIRECT_RECLAIM identifies callers that can sleep and are willing to enter direct reclaim. __GFP_KSWAPD_RECLAIM to identify callers that want to wake kswapd for background reclaim. __GFP_WAIT is redefined as a caller that is willing to enter direct reclaim and wake kswapd for background reclaim. This patch then converts a number of sites o __GFP_ATOMIC is used by callers that are high priority and have memory pools for those requests. GFP_ATOMIC uses this flag. o Callers that have a limited mempool to guarantee forward progress clear __GFP_DIRECT_RECLAIM but keep __GFP_KSWAPD_RECLAIM. bio allocations fall into this category where kswapd will still be woken but atomic reserves are not used as there is a one-entry mempool to guarantee progress. o Callers that are checking if they are non-blocking should use the helper gfpflags_allow_blocking() where possible. This is because checking for __GFP_WAIT as was done historically now can trigger false positives. Some exceptions like dm-crypt.c exist where the code intent is clearer if __GFP_DIRECT_RECLAIM is used instead of the helper due to flag manipulations. o Callers that built their own GFP flags instead of starting with GFP_KERNEL and friends now also need to specify __GFP_KSWAPD_RECLAIM. The first key hazard to watch out for is callers that removed __GFP_WAIT and was depending on access to atomic reserves for inconspicuous reasons. In some cases it may be appropriate for them to use __GFP_HIGH. The second key hazard is callers that assembled their own combination of GFP flags instead of starting with something like GFP_KERNEL. They may now wish to specify __GFP_KSWAPD_RECLAIM. It's almost certainly harmless if it's missed in most cases as other activity will wake kswapd. Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Vitaly Wool <vitalywool@gmail.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r--fs/btrfs/extent_io.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 3915c9473e94..032abfbebe76 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -594,7 +594,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
594 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY)) 594 if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
595 clear = 1; 595 clear = 1;
596again: 596again:
597 if (!prealloc && (mask & __GFP_WAIT)) { 597 if (!prealloc && gfpflags_allow_blocking(mask)) {
598 /* 598 /*
599 * Don't care for allocation failure here because we might end 599 * Don't care for allocation failure here because we might end
600 * up not needing the pre-allocated extent state at all, which 600 * up not needing the pre-allocated extent state at all, which
@@ -718,7 +718,7 @@ search_again:
718 if (start > end) 718 if (start > end)
719 goto out; 719 goto out;
720 spin_unlock(&tree->lock); 720 spin_unlock(&tree->lock);
721 if (mask & __GFP_WAIT) 721 if (gfpflags_allow_blocking(mask))
722 cond_resched(); 722 cond_resched();
723 goto again; 723 goto again;
724} 724}
@@ -850,7 +850,7 @@ __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
850 850
851 bits |= EXTENT_FIRST_DELALLOC; 851 bits |= EXTENT_FIRST_DELALLOC;
852again: 852again:
853 if (!prealloc && (mask & __GFP_WAIT)) { 853 if (!prealloc && gfpflags_allow_blocking(mask)) {
854 prealloc = alloc_extent_state(mask); 854 prealloc = alloc_extent_state(mask);
855 BUG_ON(!prealloc); 855 BUG_ON(!prealloc);
856 } 856 }
@@ -1028,7 +1028,7 @@ search_again:
1028 if (start > end) 1028 if (start > end)
1029 goto out; 1029 goto out;
1030 spin_unlock(&tree->lock); 1030 spin_unlock(&tree->lock);
1031 if (mask & __GFP_WAIT) 1031 if (gfpflags_allow_blocking(mask))
1032 cond_resched(); 1032 cond_resched();
1033 goto again; 1033 goto again;
1034} 1034}
@@ -1076,7 +1076,7 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1076 btrfs_debug_check_extent_io_range(tree, start, end); 1076 btrfs_debug_check_extent_io_range(tree, start, end);
1077 1077
1078again: 1078again:
1079 if (!prealloc && (mask & __GFP_WAIT)) { 1079 if (!prealloc && gfpflags_allow_blocking(mask)) {
1080 /* 1080 /*
1081 * Best effort, don't worry if extent state allocation fails 1081 * Best effort, don't worry if extent state allocation fails
1082 * here for the first iteration. We might have a cached state 1082 * here for the first iteration. We might have a cached state
@@ -1253,7 +1253,7 @@ search_again:
1253 if (start > end) 1253 if (start > end)
1254 goto out; 1254 goto out;
1255 spin_unlock(&tree->lock); 1255 spin_unlock(&tree->lock);
1256 if (mask & __GFP_WAIT) 1256 if (gfpflags_allow_blocking(mask))
1257 cond_resched(); 1257 cond_resched();
1258 first_iteration = false; 1258 first_iteration = false;
1259 goto again; 1259 goto again;
@@ -4319,7 +4319,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
4319 u64 start = page_offset(page); 4319 u64 start = page_offset(page);
4320 u64 end = start + PAGE_CACHE_SIZE - 1; 4320 u64 end = start + PAGE_CACHE_SIZE - 1;
4321 4321
4322 if ((mask & __GFP_WAIT) && 4322 if (gfpflags_allow_blocking(mask) &&
4323 page->mapping->host->i_size > 16 * 1024 * 1024) { 4323 page->mapping->host->i_size > 16 * 1024 * 1024) {
4324 u64 len; 4324 u64 len;
4325 while (start <= end) { 4325 while (start <= end) {