diff options
-rw-r--r-- | include/linux/swait.h | 172 | ||||
-rw-r--r-- | kernel/sched/Makefile | 2 | ||||
-rw-r--r-- | kernel/sched/swait.c | 123 |
3 files changed, 296 insertions, 1 deletions
diff --git a/include/linux/swait.h b/include/linux/swait.h new file mode 100644 index 000000000000..c1f9c62a8a50 --- /dev/null +++ b/include/linux/swait.h | |||
@@ -0,0 +1,172 @@ | |||
1 | #ifndef _LINUX_SWAIT_H | ||
2 | #define _LINUX_SWAIT_H | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | #include <linux/stddef.h> | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <asm/current.h> | ||
8 | |||
9 | /* | ||
10 | * Simple wait queues | ||
11 | * | ||
12 | * While these are very similar to the other/complex wait queues (wait.h) the | ||
13 | * most important difference is that the simple waitqueue allows for | ||
14 | * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold | ||
15 | * times. | ||
16 | * | ||
17 | * In order to make this so, we had to drop a fair number of features of the | ||
18 | * other waitqueue code; notably: | ||
19 | * | ||
20 | * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; | ||
21 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right | ||
22 | * sleeper state. | ||
23 | * | ||
24 | * - the exclusive mode; because this requires preserving the list order | ||
25 | * and this is hard. | ||
26 | * | ||
27 | * - custom wake functions; because you cannot give any guarantees about | ||
28 | * random code. | ||
29 | * | ||
30 | * As a side effect of this; the data structures are slimmer. | ||
31 | * | ||
32 | * One would recommend using this wait queue where possible. | ||
33 | */ | ||
34 | |||
35 | struct task_struct; | ||
36 | |||
37 | struct swait_queue_head { | ||
38 | raw_spinlock_t lock; | ||
39 | struct list_head task_list; | ||
40 | }; | ||
41 | |||
42 | struct swait_queue { | ||
43 | struct task_struct *task; | ||
44 | struct list_head task_list; | ||
45 | }; | ||
46 | |||
47 | #define __SWAITQUEUE_INITIALIZER(name) { \ | ||
48 | .task = current, \ | ||
49 | .task_list = LIST_HEAD_INIT((name).task_list), \ | ||
50 | } | ||
51 | |||
52 | #define DECLARE_SWAITQUEUE(name) \ | ||
53 | struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) | ||
54 | |||
55 | #define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ | ||
56 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ | ||
57 | .task_list = LIST_HEAD_INIT((name).task_list), \ | ||
58 | } | ||
59 | |||
60 | #define DECLARE_SWAIT_QUEUE_HEAD(name) \ | ||
61 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) | ||
62 | |||
63 | extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, | ||
64 | struct lock_class_key *key); | ||
65 | |||
66 | #define init_swait_queue_head(q) \ | ||
67 | do { \ | ||
68 | static struct lock_class_key __key; \ | ||
69 | __init_swait_queue_head((q), #q, &__key); \ | ||
70 | } while (0) | ||
71 | |||
72 | #ifdef CONFIG_LOCKDEP | ||
73 | # define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ | ||
74 | ({ init_swait_queue_head(&name); name; }) | ||
75 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ | ||
76 | struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) | ||
77 | #else | ||
78 | # define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ | ||
79 | DECLARE_SWAIT_QUEUE_HEAD(name) | ||
80 | #endif | ||
81 | |||
82 | static inline int swait_active(struct swait_queue_head *q) | ||
83 | { | ||
84 | return !list_empty(&q->task_list); | ||
85 | } | ||
86 | |||
87 | extern void swake_up(struct swait_queue_head *q); | ||
88 | extern void swake_up_all(struct swait_queue_head *q); | ||
89 | extern void swake_up_locked(struct swait_queue_head *q); | ||
90 | |||
91 | extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); | ||
92 | extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); | ||
93 | extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); | ||
94 | |||
95 | extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | ||
96 | extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); | ||
97 | |||
98 | /* as per ___wait_event() but for swait, therefore "exclusive == 0" */ | ||
99 | #define ___swait_event(wq, condition, state, ret, cmd) \ | ||
100 | ({ \ | ||
101 | struct swait_queue __wait; \ | ||
102 | long __ret = ret; \ | ||
103 | \ | ||
104 | INIT_LIST_HEAD(&__wait.task_list); \ | ||
105 | for (;;) { \ | ||
106 | long __int = prepare_to_swait_event(&wq, &__wait, state);\ | ||
107 | \ | ||
108 | if (condition) \ | ||
109 | break; \ | ||
110 | \ | ||
111 | if (___wait_is_interruptible(state) && __int) { \ | ||
112 | __ret = __int; \ | ||
113 | break; \ | ||
114 | } \ | ||
115 | \ | ||
116 | cmd; \ | ||
117 | } \ | ||
118 | finish_swait(&wq, &__wait); \ | ||
119 | __ret; \ | ||
120 | }) | ||
121 | |||
122 | #define __swait_event(wq, condition) \ | ||
123 | (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ | ||
124 | schedule()) | ||
125 | |||
126 | #define swait_event(wq, condition) \ | ||
127 | do { \ | ||
128 | if (condition) \ | ||
129 | break; \ | ||
130 | __swait_event(wq, condition); \ | ||
131 | } while (0) | ||
132 | |||
133 | #define __swait_event_timeout(wq, condition, timeout) \ | ||
134 | ___swait_event(wq, ___wait_cond_timeout(condition), \ | ||
135 | TASK_UNINTERRUPTIBLE, timeout, \ | ||
136 | __ret = schedule_timeout(__ret)) | ||
137 | |||
138 | #define swait_event_timeout(wq, condition, timeout) \ | ||
139 | ({ \ | ||
140 | long __ret = timeout; \ | ||
141 | if (!___wait_cond_timeout(condition)) \ | ||
142 | __ret = __swait_event_timeout(wq, condition, timeout); \ | ||
143 | __ret; \ | ||
144 | }) | ||
145 | |||
146 | #define __swait_event_interruptible(wq, condition) \ | ||
147 | ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ | ||
148 | schedule()) | ||
149 | |||
150 | #define swait_event_interruptible(wq, condition) \ | ||
151 | ({ \ | ||
152 | int __ret = 0; \ | ||
153 | if (!(condition)) \ | ||
154 | __ret = __swait_event_interruptible(wq, condition); \ | ||
155 | __ret; \ | ||
156 | }) | ||
157 | |||
158 | #define __swait_event_interruptible_timeout(wq, condition, timeout) \ | ||
159 | ___swait_event(wq, ___wait_cond_timeout(condition), \ | ||
160 | TASK_INTERRUPTIBLE, timeout, \ | ||
161 | __ret = schedule_timeout(__ret)) | ||
162 | |||
163 | #define swait_event_interruptible_timeout(wq, condition, timeout) \ | ||
164 | ({ \ | ||
165 | long __ret = timeout; \ | ||
166 | if (!___wait_cond_timeout(condition)) \ | ||
167 | __ret = __swait_event_interruptible_timeout(wq, \ | ||
168 | condition, timeout); \ | ||
169 | __ret; \ | ||
170 | }) | ||
171 | |||
172 | #endif /* _LINUX_SWAIT_H */ | ||
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 67687973ce80..7d4cba227cbd 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile | |||
@@ -13,7 +13,7 @@ endif | |||
13 | 13 | ||
14 | obj-y += core.o loadavg.o clock.o cputime.o | 14 | obj-y += core.o loadavg.o clock.o cputime.o |
15 | obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o | 15 | obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o |
16 | obj-y += wait.o completion.o idle.o | 16 | obj-y += wait.o swait.o completion.o idle.o |
17 | obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o | 17 | obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o |
18 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o | 18 | obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o |
19 | obj-$(CONFIG_SCHEDSTATS) += stats.o | 19 | obj-$(CONFIG_SCHEDSTATS) += stats.o |
diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c new file mode 100644 index 000000000000..82f0dff90030 --- /dev/null +++ b/kernel/sched/swait.c | |||
@@ -0,0 +1,123 @@ | |||
1 | #include <linux/sched.h> | ||
2 | #include <linux/swait.h> | ||
3 | |||
4 | void __init_swait_queue_head(struct swait_queue_head *q, const char *name, | ||
5 | struct lock_class_key *key) | ||
6 | { | ||
7 | raw_spin_lock_init(&q->lock); | ||
8 | lockdep_set_class_and_name(&q->lock, key, name); | ||
9 | INIT_LIST_HEAD(&q->task_list); | ||
10 | } | ||
11 | EXPORT_SYMBOL(__init_swait_queue_head); | ||
12 | |||
13 | /* | ||
14 | * The thing about the wake_up_state() return value; I think we can ignore it. | ||
15 | * | ||
16 | * If for some reason it would return 0, that means the previously waiting | ||
17 | * task is already running, so it will observe condition true (or has already). | ||
18 | */ | ||
19 | void swake_up_locked(struct swait_queue_head *q) | ||
20 | { | ||
21 | struct swait_queue *curr; | ||
22 | |||
23 | if (list_empty(&q->task_list)) | ||
24 | return; | ||
25 | |||
26 | curr = list_first_entry(&q->task_list, typeof(*curr), task_list); | ||
27 | wake_up_process(curr->task); | ||
28 | list_del_init(&curr->task_list); | ||
29 | } | ||
30 | EXPORT_SYMBOL(swake_up_locked); | ||
31 | |||
32 | void swake_up(struct swait_queue_head *q) | ||
33 | { | ||
34 | unsigned long flags; | ||
35 | |||
36 | if (!swait_active(q)) | ||
37 | return; | ||
38 | |||
39 | raw_spin_lock_irqsave(&q->lock, flags); | ||
40 | swake_up_locked(q); | ||
41 | raw_spin_unlock_irqrestore(&q->lock, flags); | ||
42 | } | ||
43 | EXPORT_SYMBOL(swake_up); | ||
44 | |||
45 | /* | ||
46 | * Does not allow usage from IRQ disabled, since we must be able to | ||
47 | * release IRQs to guarantee bounded hold time. | ||
48 | */ | ||
49 | void swake_up_all(struct swait_queue_head *q) | ||
50 | { | ||
51 | struct swait_queue *curr; | ||
52 | LIST_HEAD(tmp); | ||
53 | |||
54 | if (!swait_active(q)) | ||
55 | return; | ||
56 | |||
57 | raw_spin_lock_irq(&q->lock); | ||
58 | list_splice_init(&q->task_list, &tmp); | ||
59 | while (!list_empty(&tmp)) { | ||
60 | curr = list_first_entry(&tmp, typeof(*curr), task_list); | ||
61 | |||
62 | wake_up_state(curr->task, TASK_NORMAL); | ||
63 | list_del_init(&curr->task_list); | ||
64 | |||
65 | if (list_empty(&tmp)) | ||
66 | break; | ||
67 | |||
68 | raw_spin_unlock_irq(&q->lock); | ||
69 | raw_spin_lock_irq(&q->lock); | ||
70 | } | ||
71 | raw_spin_unlock_irq(&q->lock); | ||
72 | } | ||
73 | EXPORT_SYMBOL(swake_up_all); | ||
74 | |||
75 | void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) | ||
76 | { | ||
77 | wait->task = current; | ||
78 | if (list_empty(&wait->task_list)) | ||
79 | list_add(&wait->task_list, &q->task_list); | ||
80 | } | ||
81 | |||
82 | void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) | ||
83 | { | ||
84 | unsigned long flags; | ||
85 | |||
86 | raw_spin_lock_irqsave(&q->lock, flags); | ||
87 | __prepare_to_swait(q, wait); | ||
88 | set_current_state(state); | ||
89 | raw_spin_unlock_irqrestore(&q->lock, flags); | ||
90 | } | ||
91 | EXPORT_SYMBOL(prepare_to_swait); | ||
92 | |||
93 | long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) | ||
94 | { | ||
95 | if (signal_pending_state(state, current)) | ||
96 | return -ERESTARTSYS; | ||
97 | |||
98 | prepare_to_swait(q, wait, state); | ||
99 | |||
100 | return 0; | ||
101 | } | ||
102 | EXPORT_SYMBOL(prepare_to_swait_event); | ||
103 | |||
104 | void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) | ||
105 | { | ||
106 | __set_current_state(TASK_RUNNING); | ||
107 | if (!list_empty(&wait->task_list)) | ||
108 | list_del_init(&wait->task_list); | ||
109 | } | ||
110 | |||
111 | void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) | ||
112 | { | ||
113 | unsigned long flags; | ||
114 | |||
115 | __set_current_state(TASK_RUNNING); | ||
116 | |||
117 | if (!list_empty_careful(&wait->task_list)) { | ||
118 | raw_spin_lock_irqsave(&q->lock, flags); | ||
119 | list_del_init(&wait->task_list); | ||
120 | raw_spin_unlock_irqrestore(&q->lock, flags); | ||
121 | } | ||
122 | } | ||
123 | EXPORT_SYMBOL(finish_swait); | ||