aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorBenjamin Poirier <bpoirier@suse.com>2018-12-28 03:39:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 15:11:51 -0500
commitaf3b854492f351d1ff3b4744a83bf5ff7eed4920 (patch)
tree1f5eb6d523acd89f2d3b57598878fde1136cb060 /mm/page_alloc.c
parentab41ee6879981b3d3a16a1079a33fa6fd043eb3c (diff)
mm/page_alloc.c: allow error injection
Model call chain after should_failslab(). Likewise, we can now use a kprobe to override the return value of should_fail_alloc_page() and inject allocation failures into alloc_page*(). This will allow injecting allocation failures using the BCC tools even without building kernel with CONFIG_FAIL_PAGE_ALLOC and booting it with a fail_page_alloc= parameter, which incurs some overhead even when failures are not being injected. On the other hand, this patch adds an unconditional call to should_fail_alloc_page() from page allocation hotpath. That overhead should be rather negligible with CONFIG_FAIL_PAGE_ALLOC=n when there's no kprobe attached, though. [vbabka@suse.cz: changelog addition] Link: http://lkml.kernel.org/r/20181214074330.18917-1-bpoirier@suse.com Signed-off-by: Benjamin Poirier <bpoirier@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Michal Hocko <mhocko@suse.com> Cc: Pavel Tatashin <pavel.tatashin@microsoft.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Mike Rapoport <rppt@linux.vnet.ibm.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Alexander Duyck <alexander.h.duyck@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c10
1 files changed, 8 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 75865e1325b5..cde5dac6229a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3131,7 +3131,7 @@ static int __init setup_fail_page_alloc(char *str)
3131} 3131}
3132__setup("fail_page_alloc=", setup_fail_page_alloc); 3132__setup("fail_page_alloc=", setup_fail_page_alloc);
3133 3133
3134static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3134static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3135{ 3135{
3136 if (order < fail_page_alloc.min_order) 3136 if (order < fail_page_alloc.min_order)
3137 return false; 3137 return false;
@@ -3181,13 +3181,19 @@ late_initcall(fail_page_alloc_debugfs);
3181 3181
3182#else /* CONFIG_FAIL_PAGE_ALLOC */ 3182#else /* CONFIG_FAIL_PAGE_ALLOC */
3183 3183
3184static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) 3184static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3185{ 3185{
3186 return false; 3186 return false;
3187} 3187}
3188 3188
3189#endif /* CONFIG_FAIL_PAGE_ALLOC */ 3189#endif /* CONFIG_FAIL_PAGE_ALLOC */
3190 3190
3191static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3192{
3193 return __should_fail_alloc_page(gfp_mask, order);
3194}
3195ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);
3196
3191/* 3197/*
3192 * Return true if free base pages are above 'mark'. For high-order checks it 3198 * Return true if free base pages are above 'mark'. For high-order checks it
3193 * will return true of the order-0 watermark is reached and there is at least 3199 * will return true of the order-0 watermark is reached and there is at least