diff options
Diffstat (limited to 'include/linux/preempt.h')
| -rw-r--r-- | include/linux/preempt.h | 112 |
1 files changed, 51 insertions, 61 deletions
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index f5d4723cdb3d..a3d9dc8c2c00 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -6,106 +6,95 @@ | |||
| 6 | * preempt_count (used for kernel preemption, interrupt count, etc.) | 6 | * preempt_count (used for kernel preemption, interrupt count, etc.) |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #include <linux/thread_info.h> | ||
| 10 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
| 11 | #include <linux/list.h> | 10 | #include <linux/list.h> |
| 12 | 11 | ||
| 13 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) | 12 | /* |
| 14 | extern void add_preempt_count(int val); | 13 | * We use the MSB mostly because its available; see <linux/preempt_mask.h> for |
| 15 | extern void sub_preempt_count(int val); | 14 | * the other bits -- can't include that header due to inclusion hell. |
| 16 | #else | 15 | */ |
| 17 | # define add_preempt_count(val) do { preempt_count() += (val); } while (0) | 16 | #define PREEMPT_NEED_RESCHED 0x80000000 |
| 18 | # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0) | ||
| 19 | #endif | ||
| 20 | |||
| 21 | #define inc_preempt_count() add_preempt_count(1) | ||
| 22 | #define dec_preempt_count() sub_preempt_count(1) | ||
| 23 | |||
| 24 | #define preempt_count() (current_thread_info()->preempt_count) | ||
| 25 | |||
| 26 | #ifdef CONFIG_PREEMPT | ||
| 27 | |||
| 28 | asmlinkage void preempt_schedule(void); | ||
| 29 | |||
| 30 | #define preempt_check_resched() \ | ||
| 31 | do { \ | ||
| 32 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ | ||
| 33 | preempt_schedule(); \ | ||
| 34 | } while (0) | ||
| 35 | |||
| 36 | #ifdef CONFIG_CONTEXT_TRACKING | ||
| 37 | 17 | ||
| 38 | void preempt_schedule_context(void); | 18 | #include <asm/preempt.h> |
| 39 | 19 | ||
| 40 | #define preempt_check_resched_context() \ | 20 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
| 41 | do { \ | 21 | extern void preempt_count_add(int val); |
| 42 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \ | 22 | extern void preempt_count_sub(int val); |
| 43 | preempt_schedule_context(); \ | 23 | #define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); }) |
| 44 | } while (0) | ||
| 45 | #else | 24 | #else |
| 25 | #define preempt_count_add(val) __preempt_count_add(val) | ||
| 26 | #define preempt_count_sub(val) __preempt_count_sub(val) | ||
| 27 | #define preempt_count_dec_and_test() __preempt_count_dec_and_test() | ||
| 28 | #endif | ||
| 46 | 29 | ||
| 47 | #define preempt_check_resched_context() preempt_check_resched() | 30 | #define __preempt_count_inc() __preempt_count_add(1) |
| 48 | 31 | #define __preempt_count_dec() __preempt_count_sub(1) | |
| 49 | #endif /* CONFIG_CONTEXT_TRACKING */ | ||
| 50 | |||
| 51 | #else /* !CONFIG_PREEMPT */ | ||
| 52 | |||
| 53 | #define preempt_check_resched() do { } while (0) | ||
| 54 | #define preempt_check_resched_context() do { } while (0) | ||
| 55 | |||
| 56 | #endif /* CONFIG_PREEMPT */ | ||
| 57 | 32 | ||
| 33 | #define preempt_count_inc() preempt_count_add(1) | ||
| 34 | #define preempt_count_dec() preempt_count_sub(1) | ||
| 58 | 35 | ||
| 59 | #ifdef CONFIG_PREEMPT_COUNT | 36 | #ifdef CONFIG_PREEMPT_COUNT |
| 60 | 37 | ||
| 61 | #define preempt_disable() \ | 38 | #define preempt_disable() \ |
| 62 | do { \ | 39 | do { \ |
| 63 | inc_preempt_count(); \ | 40 | preempt_count_inc(); \ |
| 64 | barrier(); \ | 41 | barrier(); \ |
| 65 | } while (0) | 42 | } while (0) |
| 66 | 43 | ||
| 67 | #define sched_preempt_enable_no_resched() \ | 44 | #define sched_preempt_enable_no_resched() \ |
| 68 | do { \ | 45 | do { \ |
| 69 | barrier(); \ | 46 | barrier(); \ |
| 70 | dec_preempt_count(); \ | 47 | preempt_count_dec(); \ |
| 71 | } while (0) | 48 | } while (0) |
| 72 | 49 | ||
| 73 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() | 50 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
| 74 | 51 | ||
| 52 | #ifdef CONFIG_PREEMPT | ||
| 75 | #define preempt_enable() \ | 53 | #define preempt_enable() \ |
| 76 | do { \ | 54 | do { \ |
| 77 | preempt_enable_no_resched(); \ | ||
| 78 | barrier(); \ | 55 | barrier(); \ |
| 79 | preempt_check_resched(); \ | 56 | if (unlikely(preempt_count_dec_and_test())) \ |
| 57 | __preempt_schedule(); \ | ||
| 58 | } while (0) | ||
| 59 | |||
| 60 | #define preempt_check_resched() \ | ||
| 61 | do { \ | ||
| 62 | if (should_resched()) \ | ||
| 63 | __preempt_schedule(); \ | ||
| 80 | } while (0) | 64 | } while (0) |
| 81 | 65 | ||
| 82 | /* For debugging and tracer internals only! */ | 66 | #else |
| 83 | #define add_preempt_count_notrace(val) \ | 67 | #define preempt_enable() preempt_enable_no_resched() |
| 84 | do { preempt_count() += (val); } while (0) | 68 | #define preempt_check_resched() do { } while (0) |
| 85 | #define sub_preempt_count_notrace(val) \ | 69 | #endif |
| 86 | do { preempt_count() -= (val); } while (0) | ||
| 87 | #define inc_preempt_count_notrace() add_preempt_count_notrace(1) | ||
| 88 | #define dec_preempt_count_notrace() sub_preempt_count_notrace(1) | ||
| 89 | 70 | ||
| 90 | #define preempt_disable_notrace() \ | 71 | #define preempt_disable_notrace() \ |
| 91 | do { \ | 72 | do { \ |
| 92 | inc_preempt_count_notrace(); \ | 73 | __preempt_count_inc(); \ |
| 93 | barrier(); \ | 74 | barrier(); \ |
| 94 | } while (0) | 75 | } while (0) |
| 95 | 76 | ||
| 96 | #define preempt_enable_no_resched_notrace() \ | 77 | #define preempt_enable_no_resched_notrace() \ |
| 97 | do { \ | 78 | do { \ |
| 98 | barrier(); \ | 79 | barrier(); \ |
| 99 | dec_preempt_count_notrace(); \ | 80 | __preempt_count_dec(); \ |
| 100 | } while (0) | 81 | } while (0) |
| 101 | 82 | ||
| 102 | /* preempt_check_resched is OK to trace */ | 83 | #ifdef CONFIG_PREEMPT |
| 84 | |||
| 85 | #ifndef CONFIG_CONTEXT_TRACKING | ||
| 86 | #define __preempt_schedule_context() __preempt_schedule() | ||
| 87 | #endif | ||
| 88 | |||
| 103 | #define preempt_enable_notrace() \ | 89 | #define preempt_enable_notrace() \ |
| 104 | do { \ | 90 | do { \ |
| 105 | preempt_enable_no_resched_notrace(); \ | ||
| 106 | barrier(); \ | 91 | barrier(); \ |
| 107 | preempt_check_resched_context(); \ | 92 | if (unlikely(__preempt_count_dec_and_test())) \ |
| 93 | __preempt_schedule_context(); \ | ||
| 108 | } while (0) | 94 | } while (0) |
| 95 | #else | ||
| 96 | #define preempt_enable_notrace() preempt_enable_no_resched_notrace() | ||
| 97 | #endif | ||
| 109 | 98 | ||
| 110 | #else /* !CONFIG_PREEMPT_COUNT */ | 99 | #else /* !CONFIG_PREEMPT_COUNT */ |
| 111 | 100 | ||
| @@ -115,10 +104,11 @@ do { \ | |||
| 115 | * that can cause faults and scheduling migrate into our preempt-protected | 104 | * that can cause faults and scheduling migrate into our preempt-protected |
| 116 | * region. | 105 | * region. |
| 117 | */ | 106 | */ |
| 118 | #define preempt_disable() barrier() | 107 | #define preempt_disable() barrier() |
| 119 | #define sched_preempt_enable_no_resched() barrier() | 108 | #define sched_preempt_enable_no_resched() barrier() |
| 120 | #define preempt_enable_no_resched() barrier() | 109 | #define preempt_enable_no_resched() barrier() |
| 121 | #define preempt_enable() barrier() | 110 | #define preempt_enable() barrier() |
| 111 | #define preempt_check_resched() do { } while (0) | ||
| 122 | 112 | ||
| 123 | #define preempt_disable_notrace() barrier() | 113 | #define preempt_disable_notrace() barrier() |
| 124 | #define preempt_enable_no_resched_notrace() barrier() | 114 | #define preempt_enable_no_resched_notrace() barrier() |
