diff options
Diffstat (limited to 'include/linux/swait.h')
| -rw-r--r-- | include/linux/swait.h | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/include/linux/swait.h b/include/linux/swait.h index bf8cb0dee23c..73e06e9986d4 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | * wait-queues, but the semantics are actually completely different, and | 16 | * wait-queues, but the semantics are actually completely different, and |
| 17 | * every single user we have ever had has been buggy (or pointless). | 17 | * every single user we have ever had has been buggy (or pointless). |
| 18 | * | 18 | * |
| 19 | * A "swake_up()" only wakes up _one_ waiter, which is not at all what | 19 | * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what |
| 20 | * "wake_up()" does, and has led to problems. In other cases, it has | 20 | * "wake_up()" does, and has led to problems. In other cases, it has |
| 21 | * been fine, because there's only ever one waiter (kvm), but in that | 21 | * been fine, because there's only ever one waiter (kvm), but in that |
| 22 | * case gthe whole "simple" wait-queue is just pointless to begin with, | 22 | * case gthe whole "simple" wait-queue is just pointless to begin with, |
| @@ -38,8 +38,8 @@ | |||
| 38 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right | 38 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right |
| 39 | * sleeper state. | 39 | * sleeper state. |
| 40 | * | 40 | * |
| 41 | * - the exclusive mode; because this requires preserving the list order | 41 | * - the !exclusive mode; because that leads to O(n) wakeups, everything is |
| 42 | * and this is hard. | 42 | * exclusive. |
| 43 | * | 43 | * |
| 44 | * - custom wake callback functions; because you cannot give any guarantees | 44 | * - custom wake callback functions; because you cannot give any guarantees |
| 45 | * about random code. This also allows swait to be used in RT, such that | 45 | * about random code. This also allows swait to be used in RT, such that |
| @@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name | |||
| 115 | * CPU0 - waker CPU1 - waiter | 115 | * CPU0 - waker CPU1 - waiter |
| 116 | * | 116 | * |
| 117 | * for (;;) { | 117 | * for (;;) { |
| 118 | * @cond = true; prepare_to_swait(&wq_head, &wait, state); | 118 | * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); |
| 119 | * smp_mb(); // smp_mb() from set_current_state() | 119 | * smp_mb(); // smp_mb() from set_current_state() |
| 120 | * if (swait_active(wq_head)) if (@cond) | 120 | * if (swait_active(wq_head)) if (@cond) |
| 121 | * wake_up(wq_head); break; | 121 | * wake_up(wq_head); break; |
| @@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq) | |||
| 157 | return swait_active(wq); | 157 | return swait_active(wq); |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | extern void swake_up(struct swait_queue_head *q); | 160 | extern void swake_up_one(struct swait_queue_head *q); |
| 161 | extern void swake_up_all(struct swait_queue_head *q); | 161 | extern void swake_up_all(struct swait_queue_head *q); |
| 162 | extern void swake_up_locked(struct swait_queue_head *q); | 162 | extern void swake_up_locked(struct swait_queue_head *q); |
| 163 | 163 | ||
| 164 | extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); | 164 | extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); |
| 165 | extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); | ||
| 166 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); | 165 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); |
| 167 | 166 | ||
| 168 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | 167 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| 169 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | 168 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); |
| 170 | 169 | ||
| 171 | /* as per ___wait_event() but for swait, therefore "exclusive == 0" */ | 170 | /* as per ___wait_event() but for swait, therefore "exclusive == 1" */ |
| 172 | #define ___swait_event(wq, condition, state, ret, cmd) \ | 171 | #define ___swait_event(wq, condition, state, ret, cmd) \ |
| 173 | ({ \ | 172 | ({ \ |
| 173 | __label__ __out; \ | ||
| 174 | struct swait_queue __wait; \ | 174 | struct swait_queue __wait; \ |
| 175 | long __ret = ret; \ | 175 | long __ret = ret; \ |
| 176 | \ | 176 | \ |
| @@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | |||
| 183 | \ | 183 | \ |
| 184 | if (___wait_is_interruptible(state) && __int) { \ | 184 | if (___wait_is_interruptible(state) && __int) { \ |
| 185 | __ret = __int; \ | 185 | __ret = __int; \ |
| 186 | break; \ | 186 | goto __out; \ |
| 187 | } \ | 187 | } \ |
| 188 | \ | 188 | \ |
| 189 | cmd; \ | 189 | cmd; \ |
| 190 | } \ | 190 | } \ |
| 191 | finish_swait(&wq, &__wait); \ | 191 | finish_swait(&wq, &__wait); \ |
| 192 | __ret; \ | 192 | __out: __ret; \ |
| 193 | }) | 193 | }) |
| 194 | 194 | ||
| 195 | #define __swait_event(wq, condition) \ | 195 | #define __swait_event(wq, condition) \ |
| 196 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | 196 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ |
| 197 | schedule()) | 197 | schedule()) |
| 198 | 198 | ||
| 199 | #define swait_event(wq, condition) \ | 199 | #define swait_event_exclusive(wq, condition) \ |
| 200 | do { \ | 200 | do { \ |
| 201 | if (condition) \ | 201 | if (condition) \ |
| 202 | break; \ | 202 | break; \ |
| @@ -208,7 +208,7 @@ do { \ | |||
| 208 | TASK_UNINTERRUPTIBLE, timeout, \ | 208 | TASK_UNINTERRUPTIBLE, timeout, \ |
| 209 | __ret = schedule_timeout(__ret)) | 209 | __ret = schedule_timeout(__ret)) |
| 210 | 210 | ||
| 211 | #define swait_event_timeout(wq, condition, timeout) \ | 211 | #define swait_event_timeout_exclusive(wq, condition, timeout) \ |
| 212 | ({ \ | 212 | ({ \ |
| 213 | long __ret = timeout; \ | 213 | long __ret = timeout; \ |
| 214 | if (!___wait_cond_timeout(condition)) \ | 214 | if (!___wait_cond_timeout(condition)) \ |
| @@ -220,7 +220,7 @@ do { \ | |||
| 220 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ | 220 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ |
| 221 | schedule()) | 221 | schedule()) |
| 222 | 222 | ||
| 223 | #define swait_event_interruptible(wq, condition) \ | 223 | #define swait_event_interruptible_exclusive(wq, condition) \ |
| 224 | ({ \ | 224 | ({ \ |
| 225 | int __ret = 0; \ | 225 | int __ret = 0; \ |
| 226 | if (!(condition)) \ | 226 | if (!(condition)) \ |
| @@ -233,7 +233,7 @@ do { \ | |||
| 233 | TASK_INTERRUPTIBLE, timeout, \ | 233 | TASK_INTERRUPTIBLE, timeout, \ |
| 234 | __ret = schedule_timeout(__ret)) | 234 | __ret = schedule_timeout(__ret)) |
| 235 | 235 | ||
| 236 | #define swait_event_interruptible_timeout(wq, condition, timeout) \ | 236 | #define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ |
| 237 | ({ \ | 237 | ({ \ |
| 238 | long __ret = timeout; \ | 238 | long __ret = timeout; \ |
| 239 | if (!___wait_cond_timeout(condition)) \ | 239 | if (!___wait_cond_timeout(condition)) \ |
| @@ -246,7 +246,7 @@ do { \ | |||
| 246 | (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) | 246 | (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) |
| 247 | 247 | ||
| 248 | /** | 248 | /** |
| 249 | * swait_event_idle - wait without system load contribution | 249 | * swait_event_idle_exclusive - wait without system load contribution |
| 250 | * @wq: the waitqueue to wait on | 250 | * @wq: the waitqueue to wait on |
| 251 | * @condition: a C expression for the event to wait for | 251 | * @condition: a C expression for the event to wait for |
| 252 | * | 252 | * |
| @@ -257,7 +257,7 @@ do { \ | |||
| 257 | * condition and doesn't want to contribute to system load. Signals are | 257 | * condition and doesn't want to contribute to system load. Signals are |
| 258 | * ignored. | 258 | * ignored. |
| 259 | */ | 259 | */ |
| 260 | #define swait_event_idle(wq, condition) \ | 260 | #define swait_event_idle_exclusive(wq, condition) \ |
| 261 | do { \ | 261 | do { \ |
| 262 | if (condition) \ | 262 | if (condition) \ |
| 263 | break; \ | 263 | break; \ |
| @@ -270,7 +270,7 @@ do { \ | |||
| 270 | __ret = schedule_timeout(__ret)) | 270 | __ret = schedule_timeout(__ret)) |
| 271 | 271 | ||
| 272 | /** | 272 | /** |
| 273 | * swait_event_idle_timeout - wait up to timeout without load contribution | 273 | * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution |
| 274 | * @wq: the waitqueue to wait on | 274 | * @wq: the waitqueue to wait on |
| 275 | * @condition: a C expression for the event to wait for | 275 | * @condition: a C expression for the event to wait for |
| 276 | * @timeout: timeout at which we'll give up in jiffies | 276 | * @timeout: timeout at which we'll give up in jiffies |
| @@ -288,7 +288,7 @@ do { \ | |||
| 288 | * or the remaining jiffies (at least 1) if the @condition evaluated | 288 | * or the remaining jiffies (at least 1) if the @condition evaluated |
| 289 | * to %true before the @timeout elapsed. | 289 | * to %true before the @timeout elapsed. |
| 290 | */ | 290 | */ |
| 291 | #define swait_event_idle_timeout(wq, condition, timeout) \ | 291 | #define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ |
| 292 | ({ \ | 292 | ({ \ |
| 293 | long __ret = timeout; \ | 293 | long __ret = timeout; \ |
| 294 | if (!___wait_cond_timeout(condition)) \ | 294 | if (!___wait_cond_timeout(condition)) \ |
