aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/kernel.h
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2019-08-26 16:14:23 -0400
committerJason Gunthorpe <jgg@mellanox.com>2019-09-07 03:28:05 -0400
commit312364f3534cc974b79a96d062bde2386315201f (patch)
tree19103ff516fbff6a78ce059b63cb5d4cbf598f1a /include/linux/kernel.h
parentf2bc09e9519181c7ca7ad4778d46b804c5b4c8c9 (diff)
kernel.h: Add non_block_start/end()
In some special cases we must not block, but there's not a spinlock, preempt-off, irqs-off or similar critical section already that arms the might_sleep() debug checks. Add a non_block_start/end() pair to annotate these. This will be used in the oom paths of mmu-notifiers, where blocking is not allowed to make sure there's forward progress. Quoting Michal: "The notifier is called from quite a restricted context - oom_reaper - which shouldn't depend on any locks or sleepable conditionals. The code should be swift as well but we mostly do care about it to make a forward progress. Checking for sleepable context is the best thing we could come up with that would describe these demands at least partially." Peter also asked whether we want to catch spinlocks on top, but Michal said those are less of a problem because spinlocks can't have an indirect dependency upon the page allocator and hence close the loop with the oom reaper. Suggested by Michal Hocko. Link: https://lore.kernel.org/r/20190826201425.17547-4-daniel.vetter@ffwll.ch Acked-by: Christian König <christian.koenig@amd.com> (v1) Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Daniel Vetter <daniel.vetter@intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Diffstat (limited to 'include/linux/kernel.h')
-rw-r--r--include/linux/kernel.h23
1 files changed, 22 insertions, 1 deletions
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 4fa360a13c1e..d83d403dac2e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -217,7 +217,9 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
217 * might_sleep - annotation for functions that can sleep 217 * might_sleep - annotation for functions that can sleep
218 * 218 *
219 * this macro will print a stack trace if it is executed in an atomic 219 * this macro will print a stack trace if it is executed in an atomic
220 * context (spinlock, irq-handler, ...). 220 * context (spinlock, irq-handler, ...). Additional sections where blocking is
221 * not allowed can be annotated with non_block_start() and non_block_end()
222 * pairs.
221 * 223 *
222 * This is a useful debugging help to be able to catch problems early and not 224 * This is a useful debugging help to be able to catch problems early and not
223 * be bitten later when the calling function happens to sleep when it is not 225 * be bitten later when the calling function happens to sleep when it is not
@@ -233,6 +235,23 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
233# define cant_sleep() \ 235# define cant_sleep() \
234 do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) 236 do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
235# define sched_annotate_sleep() (current->task_state_change = 0) 237# define sched_annotate_sleep() (current->task_state_change = 0)
238/**
239 * non_block_start - annotate the start of section where sleeping is prohibited
240 *
241 * This is on behalf of the oom reaper, specifically when it is calling the mmu
242 * notifiers. The problem is that if the notifier were to block on, for example,
243 * mutex_lock() and if the process which holds that mutex were to perform a
244 * sleeping memory allocation, the oom reaper is now blocked on completion of
245 * that memory allocation. Other blocking calls like wait_event() pose similar
246 * issues.
247 */
248# define non_block_start() (current->non_block_count++)
249/**
250 * non_block_end - annotate the end of section where sleeping is prohibited
251 *
252 * Closes a section opened by non_block_start().
253 */
254# define non_block_end() WARN_ON(current->non_block_count-- == 0)
236#else 255#else
237 static inline void ___might_sleep(const char *file, int line, 256 static inline void ___might_sleep(const char *file, int line,
238 int preempt_offset) { } 257 int preempt_offset) { }
@@ -241,6 +260,8 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
241# define might_sleep() do { might_resched(); } while (0) 260# define might_sleep() do { might_resched(); } while (0)
242# define cant_sleep() do { } while (0) 261# define cant_sleep() do { } while (0)
243# define sched_annotate_sleep() do { } while (0) 262# define sched_annotate_sleep() do { } while (0)
263# define non_block_start() do { } while (0)
264# define non_block_end() do { } while (0)
244#endif 265#endif
245 266
246#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0) 267#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)