diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /include/linux/rcutiny.h | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'include/linux/rcutiny.h')
-rw-r--r-- | include/linux/rcutiny.h | 115 |
1 files changed, 62 insertions, 53 deletions
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e2e893144a84..52b3e0281fd0 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -27,116 +27,125 @@ | |||
27 | 27 | ||
28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
29 | 29 | ||
30 | void rcu_sched_qs(int cpu); | 30 | static inline void rcu_init(void) |
31 | void rcu_bh_qs(int cpu); | ||
32 | static inline void rcu_note_context_switch(int cpu) | ||
33 | { | 31 | { |
34 | rcu_sched_qs(cpu); | ||
35 | } | 32 | } |
36 | 33 | ||
37 | #define __rcu_read_lock() preempt_disable() | 34 | #ifdef CONFIG_TINY_RCU |
38 | #define __rcu_read_unlock() preempt_enable() | ||
39 | #define __rcu_read_lock_bh() local_bh_disable() | ||
40 | #define __rcu_read_unlock_bh() local_bh_enable() | ||
41 | #define call_rcu_sched call_rcu | ||
42 | |||
43 | #define rcu_init_sched() do { } while (0) | ||
44 | extern void rcu_check_callbacks(int cpu, int user); | ||
45 | 35 | ||
46 | static inline int rcu_needs_cpu(int cpu) | 36 | static inline void synchronize_rcu_expedited(void) |
47 | { | 37 | { |
48 | return 0; | 38 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ |
49 | } | 39 | } |
50 | 40 | ||
51 | /* | 41 | static inline void rcu_barrier(void) |
52 | * Return the number of grace periods. | ||
53 | */ | ||
54 | static inline long rcu_batches_completed(void) | ||
55 | { | 42 | { |
56 | return 0; | 43 | rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
57 | } | 44 | } |
58 | 45 | ||
59 | /* | 46 | #else /* #ifdef CONFIG_TINY_RCU */ |
60 | * Return the number of bottom-half grace periods. | ||
61 | */ | ||
62 | static inline long rcu_batches_completed_bh(void) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
66 | 47 | ||
67 | static inline void rcu_force_quiescent_state(void) | 48 | void rcu_barrier(void); |
49 | void synchronize_rcu_expedited(void); | ||
50 | |||
51 | #endif /* #else #ifdef CONFIG_TINY_RCU */ | ||
52 | |||
53 | static inline void synchronize_rcu_bh(void) | ||
68 | { | 54 | { |
55 | synchronize_sched(); | ||
69 | } | 56 | } |
70 | 57 | ||
71 | static inline void rcu_bh_force_quiescent_state(void) | 58 | static inline void synchronize_rcu_bh_expedited(void) |
72 | { | 59 | { |
60 | synchronize_sched(); | ||
73 | } | 61 | } |
74 | 62 | ||
75 | static inline void rcu_sched_force_quiescent_state(void) | 63 | static inline void synchronize_sched_expedited(void) |
76 | { | 64 | { |
65 | synchronize_sched(); | ||
77 | } | 66 | } |
78 | 67 | ||
79 | extern void synchronize_sched(void); | 68 | #ifdef CONFIG_TINY_RCU |
80 | 69 | ||
81 | static inline void synchronize_rcu(void) | 70 | static inline void rcu_preempt_note_context_switch(void) |
82 | { | 71 | { |
83 | synchronize_sched(); | ||
84 | } | 72 | } |
85 | 73 | ||
86 | static inline void synchronize_rcu_bh(void) | 74 | static inline void exit_rcu(void) |
87 | { | 75 | { |
88 | synchronize_sched(); | ||
89 | } | 76 | } |
90 | 77 | ||
91 | static inline void synchronize_rcu_expedited(void) | 78 | static inline int rcu_needs_cpu(int cpu) |
92 | { | 79 | { |
93 | synchronize_sched(); | 80 | return 0; |
94 | } | 81 | } |
95 | 82 | ||
96 | static inline void synchronize_rcu_bh_expedited(void) | 83 | #else /* #ifdef CONFIG_TINY_RCU */ |
84 | |||
85 | void rcu_preempt_note_context_switch(void); | ||
86 | extern void exit_rcu(void); | ||
87 | int rcu_preempt_needs_cpu(void); | ||
88 | |||
89 | static inline int rcu_needs_cpu(int cpu) | ||
97 | { | 90 | { |
98 | synchronize_sched(); | 91 | return rcu_preempt_needs_cpu(); |
99 | } | 92 | } |
100 | 93 | ||
101 | struct notifier_block; | 94 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
102 | 95 | ||
103 | #ifdef CONFIG_NO_HZ | 96 | static inline void rcu_note_context_switch(int cpu) |
97 | { | ||
98 | rcu_sched_qs(cpu); | ||
99 | rcu_preempt_note_context_switch(); | ||
100 | } | ||
104 | 101 | ||
105 | extern void rcu_enter_nohz(void); | 102 | /* |
106 | extern void rcu_exit_nohz(void); | 103 | * Take advantage of the fact that there is only one CPU, which |
104 | * allows us to ignore virtualization-based context switches. | ||
105 | */ | ||
106 | static inline void rcu_virt_note_context_switch(int cpu) | ||
107 | { | ||
108 | } | ||
107 | 109 | ||
108 | #else /* #ifdef CONFIG_NO_HZ */ | 110 | /* |
111 | * Return the number of grace periods. | ||
112 | */ | ||
113 | static inline long rcu_batches_completed(void) | ||
114 | { | ||
115 | return 0; | ||
116 | } | ||
109 | 117 | ||
110 | static inline void rcu_enter_nohz(void) | 118 | /* |
119 | * Return the number of bottom-half grace periods. | ||
120 | */ | ||
121 | static inline long rcu_batches_completed_bh(void) | ||
111 | { | 122 | { |
123 | return 0; | ||
112 | } | 124 | } |
113 | 125 | ||
114 | static inline void rcu_exit_nohz(void) | 126 | static inline void rcu_force_quiescent_state(void) |
115 | { | 127 | { |
116 | } | 128 | } |
117 | 129 | ||
118 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 130 | static inline void rcu_bh_force_quiescent_state(void) |
131 | { | ||
132 | } | ||
119 | 133 | ||
120 | static inline void exit_rcu(void) | 134 | static inline void rcu_sched_force_quiescent_state(void) |
121 | { | 135 | { |
122 | } | 136 | } |
123 | 137 | ||
124 | static inline int rcu_preempt_depth(void) | 138 | static inline void rcu_cpu_stall_reset(void) |
125 | { | 139 | { |
126 | return 0; | ||
127 | } | 140 | } |
128 | 141 | ||
129 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 142 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
130 | |||
131 | extern int rcu_scheduler_active __read_mostly; | 143 | extern int rcu_scheduler_active __read_mostly; |
132 | extern void rcu_scheduler_starting(void); | 144 | extern void rcu_scheduler_starting(void); |
133 | |||
134 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 145 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
135 | |||
136 | static inline void rcu_scheduler_starting(void) | 146 | static inline void rcu_scheduler_starting(void) |
137 | { | 147 | { |
138 | } | 148 | } |
139 | |||
140 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 149 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
141 | 150 | ||
142 | #endif /* __LINUX_RCUTINY_H */ | 151 | #endif /* __LINUX_RCUTINY_H */ |