diff options
| author | Peter Zijlstra <peterz@infradead.org> | 2013-08-14 08:55:40 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2013-09-25 08:07:50 -0400 |
| commit | a787870924dbd6f321661e06d4ec1c7a408c9ccf (patch) | |
| tree | 43aae13f34d21b001efd4a4a4e64f3d085988bad /include/asm-generic | |
| parent | f27dde8deef33c9e58027df11ceab2198601d6a6 (diff) | |
sched, arch: Create asm/preempt.h
In order to prepare to per-arch implementations of preempt_count move
the required bits into an asm-generic header and use this for all
archs.
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-h5j0c1r3e3fk015m30h8f1zx@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/asm-generic')
| -rw-r--r-- | include/asm-generic/preempt.h | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h new file mode 100644 index 000000000000..a1fc6590a743 --- /dev/null +++ b/include/asm-generic/preempt.h | |||
| @@ -0,0 +1,54 @@ | |||
| 1 | #ifndef __ASM_PREEMPT_H | ||
| 2 | #define __ASM_PREEMPT_H | ||
| 3 | |||
| 4 | #include <linux/thread_info.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | ||
| 8 | * that think a non-zero value indicates we cannot preempt. | ||
| 9 | */ | ||
| 10 | static __always_inline int preempt_count(void) | ||
| 11 | { | ||
| 12 | return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED; | ||
| 13 | } | ||
| 14 | |||
| 15 | static __always_inline int *preempt_count_ptr(void) | ||
| 16 | { | ||
| 17 | return ¤t_thread_info()->preempt_count; | ||
| 18 | } | ||
| 19 | |||
| 20 | /* | ||
| 21 | * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the | ||
| 22 | * alternative is loosing a reschedule. Better schedule too often -- also this | ||
| 23 | * should be a very rare operation. | ||
| 24 | */ | ||
| 25 | static __always_inline void preempt_count_set(int pc) | ||
| 26 | { | ||
| 27 | *preempt_count_ptr() = pc; | ||
| 28 | } | ||
| 29 | |||
| 30 | /* | ||
| 31 | * We fold the NEED_RESCHED bit into the preempt count such that | ||
| 32 | * preempt_enable() can decrement and test for needing to reschedule with a | ||
| 33 | * single instruction. | ||
| 34 | * | ||
| 35 | * We invert the actual bit, so that when the decrement hits 0 we know we both | ||
| 36 | * need to resched (the bit is cleared) and can resched (no preempt count). | ||
| 37 | */ | ||
| 38 | |||
| 39 | static __always_inline void set_preempt_need_resched(void) | ||
| 40 | { | ||
| 41 | *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED; | ||
| 42 | } | ||
| 43 | |||
| 44 | static __always_inline void clear_preempt_need_resched(void) | ||
| 45 | { | ||
| 46 | *preempt_count_ptr() |= PREEMPT_NEED_RESCHED; | ||
| 47 | } | ||
| 48 | |||
| 49 | static __always_inline bool test_preempt_need_resched(void) | ||
| 50 | { | ||
| 51 | return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED); | ||
| 52 | } | ||
| 53 | |||
| 54 | #endif /* __ASM_PREEMPT_H */ | ||
