aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra (Intel) <peterz@infradead.org>2016-02-19 03:46:37 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-02-25 05:27:16 -0500
commit13b35686e8b934ff78f59cef0c65fa3a43f8eeaf (patch)
treef4d92d7d6fc1e70d6fa67aea9efba546c9cc4781
parentf4bcfa1da6fdcbc7a0854a28603bffc3c5656332 (diff)
wait.[ch]: Introduce the simple waitqueue (swait) implementation
The existing wait queue support has support for custom wake up call backs, wake flags, wake key (passed to call back) and exclusive flags that allow wakers to be tagged as exclusive, for limiting the number of wakers. In a lot of cases, none of these features are used, and hence we can benefit from a slimmed down version that lowers memory overhead and reduces runtime overhead. The concept originated from -rt, where waitqueues are a constant source of trouble, as we can't convert the head lock to a raw spinlock due to fancy and long lasting callbacks. With the removal of custom callbacks, we can use a raw lock for queue list manipulations, hence allowing the simple wait support to be used in -rt. [Patch is from PeterZ which is based on Thomas version. Commit message is written by Paul G. Daniel: - Fixed some compile issues - Added non-lazy implementation of swake_up_locked as suggested by Boqun Feng.] Originally-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: linux-rt-users@vger.kernel.org Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Gortmaker <paul.gortmaker@windriver.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com> Link: http://lkml.kernel.org/r/1455871601-27484-2-git-send-email-wagi@monom.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/swait.h172
-rw-r--r--kernel/sched/Makefile2
-rw-r--r--kernel/sched/swait.c123
3 files changed, 296 insertions, 1 deletions
diff --git a/include/linux/swait.h b/include/linux/swait.h
new file mode 100644
index 000000000000..c1f9c62a8a50
--- /dev/null
+++ b/include/linux/swait.h
@@ -0,0 +1,172 @@
1#ifndef _LINUX_SWAIT_H
2#define _LINUX_SWAIT_H
3
4#include <linux/list.h>
5#include <linux/stddef.h>
6#include <linux/spinlock.h>
7#include <asm/current.h>
8
9/*
10 * Simple wait queues
11 *
12 * While these are very similar to the other/complex wait queues (wait.h) the
13 * most important difference is that the simple waitqueue allows for
14 * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold
15 * times.
16 *
17 * In order to make this so, we had to drop a fair number of features of the
18 * other waitqueue code; notably:
19 *
20 * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue;
21 * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
22 * sleeper state.
23 *
24 * - the exclusive mode; because this requires preserving the list order
25 * and this is hard.
26 *
27 * - custom wake functions; because you cannot give any guarantees about
28 * random code.
29 *
30 * As a side effect of this; the data structures are slimmer.
31 *
32 * One would recommend using this wait queue where possible.
33 */
34
35struct task_struct;
36
37struct swait_queue_head {
38 raw_spinlock_t lock;
39 struct list_head task_list;
40};
41
42struct swait_queue {
43 struct task_struct *task;
44 struct list_head task_list;
45};
46
47#define __SWAITQUEUE_INITIALIZER(name) { \
48 .task = current, \
49 .task_list = LIST_HEAD_INIT((name).task_list), \
50}
51
52#define DECLARE_SWAITQUEUE(name) \
53 struct swait_queue name = __SWAITQUEUE_INITIALIZER(name)
54
55#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \
56 .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
57 .task_list = LIST_HEAD_INIT((name).task_list), \
58}
59
60#define DECLARE_SWAIT_QUEUE_HEAD(name) \
61 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name)
62
63extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
64 struct lock_class_key *key);
65
66#define init_swait_queue_head(q) \
67 do { \
68 static struct lock_class_key __key; \
69 __init_swait_queue_head((q), #q, &__key); \
70 } while (0)
71
72#ifdef CONFIG_LOCKDEP
73# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \
74 ({ init_swait_queue_head(&name); name; })
75# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
76 struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name)
77#else
78# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \
79 DECLARE_SWAIT_QUEUE_HEAD(name)
80#endif
81
82static inline int swait_active(struct swait_queue_head *q)
83{
84 return !list_empty(&q->task_list);
85}
86
87extern void swake_up(struct swait_queue_head *q);
88extern void swake_up_all(struct swait_queue_head *q);
89extern void swake_up_locked(struct swait_queue_head *q);
90
91extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
92extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
93extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
94
95extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
96extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
97
98/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
99#define ___swait_event(wq, condition, state, ret, cmd) \
100({ \
101 struct swait_queue __wait; \
102 long __ret = ret; \
103 \
104 INIT_LIST_HEAD(&__wait.task_list); \
105 for (;;) { \
106 long __int = prepare_to_swait_event(&wq, &__wait, state);\
107 \
108 if (condition) \
109 break; \
110 \
111 if (___wait_is_interruptible(state) && __int) { \
112 __ret = __int; \
113 break; \
114 } \
115 \
116 cmd; \
117 } \
118 finish_swait(&wq, &__wait); \
119 __ret; \
120})
121
122#define __swait_event(wq, condition) \
123 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
124 schedule())
125
126#define swait_event(wq, condition) \
127do { \
128 if (condition) \
129 break; \
130 __swait_event(wq, condition); \
131} while (0)
132
133#define __swait_event_timeout(wq, condition, timeout) \
134 ___swait_event(wq, ___wait_cond_timeout(condition), \
135 TASK_UNINTERRUPTIBLE, timeout, \
136 __ret = schedule_timeout(__ret))
137
138#define swait_event_timeout(wq, condition, timeout) \
139({ \
140 long __ret = timeout; \
141 if (!___wait_cond_timeout(condition)) \
142 __ret = __swait_event_timeout(wq, condition, timeout); \
143 __ret; \
144})
145
146#define __swait_event_interruptible(wq, condition) \
147 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
148 schedule())
149
150#define swait_event_interruptible(wq, condition) \
151({ \
152 int __ret = 0; \
153 if (!(condition)) \
154 __ret = __swait_event_interruptible(wq, condition); \
155 __ret; \
156})
157
158#define __swait_event_interruptible_timeout(wq, condition, timeout) \
159 ___swait_event(wq, ___wait_cond_timeout(condition), \
160 TASK_INTERRUPTIBLE, timeout, \
161 __ret = schedule_timeout(__ret))
162
163#define swait_event_interruptible_timeout(wq, condition, timeout) \
164({ \
165 long __ret = timeout; \
166 if (!___wait_cond_timeout(condition)) \
167 __ret = __swait_event_interruptible_timeout(wq, \
168 condition, timeout); \
169 __ret; \
170})
171
172#endif /* _LINUX_SWAIT_H */
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 67687973ce80..7d4cba227cbd 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -13,7 +13,7 @@ endif
13 13
14obj-y += core.o loadavg.o clock.o cputime.o 14obj-y += core.o loadavg.o clock.o cputime.o
15obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o 15obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
16obj-y += wait.o completion.o idle.o 16obj-y += wait.o swait.o completion.o idle.o
17obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o 17obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
18obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o 18obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
19obj-$(CONFIG_SCHEDSTATS) += stats.o 19obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
new file mode 100644
index 000000000000..82f0dff90030
--- /dev/null
+++ b/kernel/sched/swait.c
@@ -0,0 +1,123 @@
1#include <linux/sched.h>
2#include <linux/swait.h>
3
4void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
5 struct lock_class_key *key)
6{
7 raw_spin_lock_init(&q->lock);
8 lockdep_set_class_and_name(&q->lock, key, name);
9 INIT_LIST_HEAD(&q->task_list);
10}
11EXPORT_SYMBOL(__init_swait_queue_head);
12
13/*
14 * The thing about the wake_up_state() return value; I think we can ignore it.
15 *
16 * If for some reason it would return 0, that means the previously waiting
17 * task is already running, so it will observe condition true (or has already).
18 */
19void swake_up_locked(struct swait_queue_head *q)
20{
21 struct swait_queue *curr;
22
23 if (list_empty(&q->task_list))
24 return;
25
26 curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
27 wake_up_process(curr->task);
28 list_del_init(&curr->task_list);
29}
30EXPORT_SYMBOL(swake_up_locked);
31
32void swake_up(struct swait_queue_head *q)
33{
34 unsigned long flags;
35
36 if (!swait_active(q))
37 return;
38
39 raw_spin_lock_irqsave(&q->lock, flags);
40 swake_up_locked(q);
41 raw_spin_unlock_irqrestore(&q->lock, flags);
42}
43EXPORT_SYMBOL(swake_up);
44
45/*
46 * Does not allow usage from IRQ disabled, since we must be able to
47 * release IRQs to guarantee bounded hold time.
48 */
49void swake_up_all(struct swait_queue_head *q)
50{
51 struct swait_queue *curr;
52 LIST_HEAD(tmp);
53
54 if (!swait_active(q))
55 return;
56
57 raw_spin_lock_irq(&q->lock);
58 list_splice_init(&q->task_list, &tmp);
59 while (!list_empty(&tmp)) {
60 curr = list_first_entry(&tmp, typeof(*curr), task_list);
61
62 wake_up_state(curr->task, TASK_NORMAL);
63 list_del_init(&curr->task_list);
64
65 if (list_empty(&tmp))
66 break;
67
68 raw_spin_unlock_irq(&q->lock);
69 raw_spin_lock_irq(&q->lock);
70 }
71 raw_spin_unlock_irq(&q->lock);
72}
73EXPORT_SYMBOL(swake_up_all);
74
75void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
76{
77 wait->task = current;
78 if (list_empty(&wait->task_list))
79 list_add(&wait->task_list, &q->task_list);
80}
81
82void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
83{
84 unsigned long flags;
85
86 raw_spin_lock_irqsave(&q->lock, flags);
87 __prepare_to_swait(q, wait);
88 set_current_state(state);
89 raw_spin_unlock_irqrestore(&q->lock, flags);
90}
91EXPORT_SYMBOL(prepare_to_swait);
92
93long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
94{
95 if (signal_pending_state(state, current))
96 return -ERESTARTSYS;
97
98 prepare_to_swait(q, wait, state);
99
100 return 0;
101}
102EXPORT_SYMBOL(prepare_to_swait_event);
103
104void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
105{
106 __set_current_state(TASK_RUNNING);
107 if (!list_empty(&wait->task_list))
108 list_del_init(&wait->task_list);
109}
110
111void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
112{
113 unsigned long flags;
114
115 __set_current_state(TASK_RUNNING);
116
117 if (!list_empty_careful(&wait->task_list)) {
118 raw_spin_lock_irqsave(&q->lock, flags);
119 list_del_init(&wait->task_list);
120 raw_spin_unlock_irqrestore(&q->lock, flags);
121 }
122}
123EXPORT_SYMBOL(finish_swait);