diff options
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 50 |
1 files changed, 45 insertions, 5 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b3d697f3b573..c2413703f45d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -112,17 +112,36 @@ struct task_group; | |||
112 | 112 | ||
113 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | 113 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
114 | 114 | ||
115 | /* | ||
116 | * Special states are those that do not use the normal wait-loop pattern. See | ||
117 | * the comment with set_special_state(). | ||
118 | */ | ||
119 | #define is_special_task_state(state) \ | ||
120 | ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD)) | ||
121 | |||
115 | #define __set_current_state(state_value) \ | 122 | #define __set_current_state(state_value) \ |
116 | do { \ | 123 | do { \ |
124 | WARN_ON_ONCE(is_special_task_state(state_value));\ | ||
117 | current->task_state_change = _THIS_IP_; \ | 125 | current->task_state_change = _THIS_IP_; \ |
118 | current->state = (state_value); \ | 126 | current->state = (state_value); \ |
119 | } while (0) | 127 | } while (0) |
128 | |||
120 | #define set_current_state(state_value) \ | 129 | #define set_current_state(state_value) \ |
121 | do { \ | 130 | do { \ |
131 | WARN_ON_ONCE(is_special_task_state(state_value));\ | ||
122 | current->task_state_change = _THIS_IP_; \ | 132 | current->task_state_change = _THIS_IP_; \ |
123 | smp_store_mb(current->state, (state_value)); \ | 133 | smp_store_mb(current->state, (state_value)); \ |
124 | } while (0) | 134 | } while (0) |
125 | 135 | ||
136 | #define set_special_state(state_value) \ | ||
137 | do { \ | ||
138 | unsigned long flags; /* may shadow */ \ | ||
139 | WARN_ON_ONCE(!is_special_task_state(state_value)); \ | ||
140 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ | ||
141 | current->task_state_change = _THIS_IP_; \ | ||
142 | current->state = (state_value); \ | ||
143 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ | ||
144 | } while (0) | ||
126 | #else | 145 | #else |
127 | /* | 146 | /* |
128 | * set_current_state() includes a barrier so that the write of current->state | 147 | * set_current_state() includes a barrier so that the write of current->state |
@@ -144,8 +163,8 @@ struct task_group; | |||
144 | * | 163 | * |
145 | * The above is typically ordered against the wakeup, which does: | 164 | * The above is typically ordered against the wakeup, which does: |
146 | * | 165 | * |
147 | * need_sleep = false; | 166 | * need_sleep = false; |
148 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); | 167 | * wake_up_state(p, TASK_UNINTERRUPTIBLE); |
149 | * | 168 | * |
150 | * Where wake_up_state() (and all other wakeup primitives) imply enough | 169 | * Where wake_up_state() (and all other wakeup primitives) imply enough |
151 | * barriers to order the store of the variable against wakeup. | 170 | * barriers to order the store of the variable against wakeup. |
@@ -154,12 +173,33 @@ struct task_group; | |||
154 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a | 173 | * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a |
155 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). | 174 | * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING). |
156 | * | 175 | * |
157 | * This is obviously fine, since they both store the exact same value. | 176 | * However, with slightly different timing the wakeup TASK_RUNNING store can |
177 | * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not | ||
178 | * a problem either because that will result in one extra go around the loop | ||
179 | * and our @cond test will save the day. | ||
158 | * | 180 | * |
159 | * Also see the comments of try_to_wake_up(). | 181 | * Also see the comments of try_to_wake_up(). |
160 | */ | 182 | */ |
161 | #define __set_current_state(state_value) do { current->state = (state_value); } while (0) | 183 | #define __set_current_state(state_value) \ |
162 | #define set_current_state(state_value) smp_store_mb(current->state, (state_value)) | 184 | current->state = (state_value) |
185 | |||
186 | #define set_current_state(state_value) \ | ||
187 | smp_store_mb(current->state, (state_value)) | ||
188 | |||
189 | /* | ||
190 | * set_special_state() should be used for those states when the blocking task | ||
191 | * can not use the regular condition based wait-loop. In that case we must | ||
192 | * serialize against wakeups such that any possible in-flight TASK_RUNNING stores | ||
193 | * will not collide with our state change. | ||
194 | */ | ||
195 | #define set_special_state(state_value) \ | ||
196 | do { \ | ||
197 | unsigned long flags; /* may shadow */ \ | ||
198 | raw_spin_lock_irqsave(¤t->pi_lock, flags); \ | ||
199 | current->state = (state_value); \ | ||
200 | raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ | ||
201 | } while (0) | ||
202 | |||
163 | #endif | 203 | #endif |
164 | 204 | ||
165 | /* Task command name length: */ | 205 | /* Task command name length: */ |