aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/litmus/litmus_softirq.h11
-rw-r--r--litmus/Kconfig25
-rw-r--r--litmus/Makefile2
-rw-r--r--litmus/litmus_softirq.c389
-rw-r--r--litmus/sched_gsn_edf.c1
5 files changed, 428 insertions, 0 deletions
diff --git a/include/litmus/litmus_softirq.h b/include/litmus/litmus_softirq.h
new file mode 100644
index 000000000000..37804fcfe8a3
--- /dev/null
+++ b/include/litmus/litmus_softirq.h
@@ -0,0 +1,11 @@
1
2#include <linux/interrupt.h>
3
4#define NR_LITMUS_SOFTIRQD CONFIG_NR_LITMUS_SOFTIRQD
5
6void trigger_litirqs(struct task_struct*);
7
8void spawn_klitirqd(void);
9
10void kill_klitirqd(void);
11
diff --git a/litmus/Kconfig b/litmus/Kconfig
index ad8dc8308cf0..a354e3dce19f 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -182,4 +182,29 @@ config SCHED_DEBUG_TRACE_CALLER
182 182
183endmenu 183endmenu
184 184
185menu "Interrupt Handling"
186
187config LITMUS_SOFTIRQD
188 bool "Spawn ksoftirqlitmusd interrupt handling threads."
189 depends on LITMUS_LOCKING
190 default n
191 help
192 Create ksoftirqlitmusd interrupt handling threads. Work must be
193 specifically dispatched to these workers. (Softirqs for
194 Litmus tasks are not magically redirected to ksoftirqlitmusd.)
195
196 G-EDF ONLY for now!
197
198 If unsure, say No.
199
200config NR_LITMUS_SOFTIRQD
201 int "Number of klitirqd."
202 depends on LITMUS_SOFTIRQD
203 range 1 4096
204 default "1"
205 help
206 Should be <= to the number of CPUs in your system.
207
208endmenu
209
185endmenu 210endmenu
diff --git a/litmus/Makefile b/litmus/Makefile
index ad9936e07b83..0d0214c54896 100644
--- a/litmus/Makefile
+++ b/litmus/Makefile
@@ -26,3 +26,5 @@ obj-$(CONFIG_FEATHER_TRACE) += ft_event.o ftdev.o
26obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o 26obj-$(CONFIG_SCHED_TASK_TRACE) += sched_task_trace.o
27obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o 27obj-$(CONFIG_SCHED_DEBUG_TRACE) += sched_trace.o
28obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o 28obj-$(CONFIG_SCHED_OVERHEAD_TRACE) += trace.o
29
30obj-$(CONFIG_LITMUS_SOFTIRQD) += litmus_softirq.o
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c
new file mode 100644
index 000000000000..4517a94823e7
--- /dev/null
+++ b/litmus/litmus_softirq.c
@@ -0,0 +1,389 @@
1/* much copied shamelessly from Linux's softirq.c
2 as well as PREEMPT_RT's softirq.c */
3
4#include <linux/module.h>
5#include <linux/kernel_stat.h>
6#include <linux/interrupt.h>
7#include <linux/init.h>
8#include <linux/mm.h>
9#include <linux/notifier.h>
10#include <linux/percpu.h>
11#include <linux/cpu.h>
12#include <linux/freezer.h>
13#include <linux/kthread.h>
14#include <linux/rcupdate.h>
15#include <linux/ftrace.h>
16#include <linux/smp.h>
17#include <linux/tick.h>
18
19#define CREATE_TRACE_POINTS
20#include <trace/events/irq.h>
21
22#include <asm/irq.h>
23
24#include <litmus/litmus.h>
25#include <litmus/jobs.h>
26#include <litmus/sched_plugin.h>
27#include <litmus/litmus_softirq.h>
28
29/* only support tasklet processing for now. */
30struct tasklet_head
31{
32 struct tasklet_struct *head;
33 struct tasklet_struct **tail;
34};
35
36struct tasklet_owner
37{
38 struct tasklet_owner* next;
39 struct task_struct* job;
40};
41
42/* used to create a parallel list to lit_tasklet_*_vec to
43 associate a litmus priority with the tasklet */
44struct tasklet_owner_head
45{
46 struct tasklet_owner *head;
47 struct tasklet_owner **tail;
48};
49
50static struct task_struct* klitirqd[NR_LITMUS_SOFTIRQD];
51
52/* one list for each klitirqd */
53static raw_spinlock_t litirq_locks[NR_LITMUS_SOFTIRQD];
54
55static struct tasklet_head lit_tasklet_vec[NR_LITMUS_SOFTIRQD];
56static struct tasklet_owner_head lit_tasklet_owner_vec[NR_LITMUS_SOFTIRQD];
57
58static struct tasklet_head lit_tasklet_hi_vec[NR_LITMUS_SOFTIRQD];
59static struct tasklet_owner_head lit_tasklet_hi_owner_vec[NR_LITMUS_SOFTIRQD];
60
61
62inline int klitirqd_id(struct task_struct* tsk)
63{
64 int i;
65 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
66 {
67 if(klitirqd[i] == tsk)
68 {
69 return i;
70 }
71 }
72
73 BUG();
74
75 return -1;
76}
77
78enum pending_flags
79{
80 LIT_TASKLET_LOW = 0x1,
81 LIT_TASKLET_HI = 0x2
82};
83
84
85inline static u32 litirq_pending_hi_irqoff(struct task_struct* which)
86{
87 u32 pending = 0;
88
89 int offset = klitirqd_id(which);
90
91 if(lit_tasklet_hi_vec[offset].tail != &lit_tasklet_hi_vec[offset].head)
92 {
93 pending = LIT_TASKLET_HI;
94 }
95
96 return pending;
97};
98
99inline static u32 litirq_pending_low_irqoff(struct task_struct* which)
100{
101 u32 pending = 0;
102
103 int offset = klitirqd_id(which);
104
105 if(lit_tasklet_vec[offset].tail != &lit_tasklet_vec[offset].head)
106 {
107 pending = LIT_TASKLET_LOW;
108 }
109
110 return pending;
111};
112
113
114static u32 litirq_pending_irqoff(struct task_struct* which)
115{
116 u32 pending = 0;
117
118 pending |= litirq_pending_hi_irqoff(which);
119 pending |= litirq_pending_low_irqoff(which);
120
121 return pending;
122};
123
124
125static u32 litirq_pending(struct task_struct* which)
126{
127 unsigned long flags;
128 u32 pending;
129
130 int offset = klitirqd_id(which);
131
132 raw_spin_lock_irqsave(&litirq_locks[offset], flags);
133 pending = litirq_pending_irqoff(which);
134 raw_spin_unlock_irqrestore(&litirq_locks[offset], flags);
135
136 return pending;
137};
138
139
140static int needs_prio_change(struct task_struct* tsk,
141 struct tasklet_head* tasklet_vec)
142{
143 unsigned long flags;
144 int ret = 0;
145
146 int offset = klitirqd_id(tsk);
147 raw_spin_lock_irqsave(&litirq_locks[offset], flags);
148
149 if((tasklet_vec == lit_tasklet_hi_vec) && litirq_pending_hi_irqoff(tsk))
150 {
151 if(lit_tasklet_hi_owner_vec[offset].head->job != tsk_rt(current)->inh_task)
152 {
153 ret = 1;
154 }
155 }
156 else if((tasklet_vec == lit_tasklet_vec) && litirq_pending_irqoff(tsk))
157 {
158 if(lit_tasklet_owner_vec[offset].head->job != tsk_rt(current)->inh_task)
159 {
160 ret = 1;
161 }
162 }
163
164 raw_spin_unlock_irqrestore(&litirq_locks[offset], flags);
165
166 return ret;
167}
168
169
170static void reeval_prio(struct task_struct* tsk)
171{
172 unsigned long flags;
173 u32 pending = 0;
174 struct task_struct* new_prio = tsk_rt(tsk)->inh_task;
175
176 int offset = klitirqd_id(tsk);
177
178 raw_spin_lock_irqsave(&litirq_locks[offset], flags);
179
180 if(pending |= litirq_pending_irqoff(tsk))
181 {
182 if(lit_tasklet_owner_vec[offset].head->job != tsk_rt(current)->inh_task)
183 {
184 new_prio = lit_tasklet_owner_vec[offset].head->job;
185 }
186 }
187
188 if(pending |= litirq_pending_hi_irqoff(tsk))
189 {
190 if(lit_tasklet_hi_owner_vec[offset].head->job != tsk_rt(current)->inh_task)
191 {
192 new_prio = lit_tasklet_hi_owner_vec[offset].head->job;
193 }
194 }
195
196 if(!pending)
197 {
198 new_prio = NULL;
199 }
200
201 if(new_prio != tsk_rt(tsk)->inh_task)
202 {
203 /* Change priority!! */
204 }
205
206 raw_spin_unlock_irqrestore(&litirq_locks[offset], flags);
207}
208
209
210static void wakeup_litirqd(struct task_struct* which)
211{
212 /* Interrupts are disabled: no need to stop preemption */
213
214 if (which && which->state != TASK_RUNNING)
215 {
216 reeval_prio(which); /* configure the proper priority */
217 wake_up_process(which);
218 }
219}
220
221
222static void do_lit_tasklet(struct tasklet_head* tasklet_vec,
223 struct tasklet_owner_head* owner_vec)
224{
225 unsigned long flags;
226 struct tasklet_struct *list;
227 struct tasklet_owner *owner;
228 int id = klitirqd_id(current);
229
230 raw_spin_lock_irqsave(&litirq_locks[id], flags);
231
232 /* copy out the tasklets for our private use. */
233 list = tasklet_vec[id].head;
234 tasklet_vec[id].head = NULL;
235 tasklet_vec[id].tail = &tasklet_vec[id].head;
236
237 owner = owner_vec[id].head;
238 owner_vec[id].head = NULL;
239 owner_vec[id].tail = &owner_vec[id].head;
240
241 raw_spin_unlock_irqrestore(&litirq_locks[id], flags);
242
243 while(list)
244 {
245 struct tasklet_struct *t = list;
246 struct tasklet_owner *t_owner = owner;
247
248 /* advance, lest we forget */
249 list = list->next;
250 owner = owner->next;
251
252 /* execute tasklet if it has my priority and is free */
253 if ((t_owner->job == tsk_rt(current)->inh_task) && tasklet_trylock(t)) {
254 if (!atomic_read(&t->count)) {
255 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
256 BUG();
257 t->func(t->data);
258 tasklet_unlock(t);
259 continue; /* process more tasklets */
260 }
261 tasklet_unlock(t);
262 }
263
264 /* couldn't process tasklet. put it back at the end of the main queue. */
265 t->next = NULL;
266 t_owner->next = NULL;
267
268 raw_spin_lock_irqsave(&litirq_locks[id], flags);
269
270 *(tasklet_vec[id].tail) = t;
271 tasklet_vec[id].tail = &(t->next);
272
273 *(owner_vec[id].tail) = t_owner;
274 owner_vec[id].tail = &(t_owner->next);
275
276 raw_spin_unlock_irqrestore(&litirq_locks[id], flags);
277 }
278}
279
280static void do_litirq(void)
281{
282 u32 pending;
283 int resched = 0;
284
285 if (in_interrupt() || !is_realtime(current))
286 {
287 return;
288 }
289
290 /* since we only handle tasklets, no need for RCU triggers? */
291
292 pending = litirq_pending(current);
293 if(pending)
294 {
295 /* extract the work to do and do it! */
296 if(pending & LIT_TASKLET_HI)
297 {
298 do_lit_tasklet(lit_tasklet_hi_vec, lit_tasklet_hi_owner_vec);
299 resched = needs_prio_change(current, lit_tasklet_hi_vec);
300 }
301
302 if(!resched && (pending & LIT_TASKLET_LOW))
303 {
304 do_lit_tasklet(lit_tasklet_vec, lit_tasklet_owner_vec);
305 resched = needs_prio_change(current, lit_tasklet_vec);
306 }
307 }
308}
309
310
311/* TODO: WHAT'S THE DEAL WITH BOTTOM HALVES? */
312
313/* main loop for klitsoftirqd */
314static int run_klitirqd(void* dummy)
315{
316 /* TODO: Set as best-effort Litmus thread. */
317
318 set_current_state(TASK_INTERRUPTIBLE);
319
320 while (!kthread_should_stop()) {
321 preempt_disable();
322 if (!litirq_pending(current)) {
323 /* sleep for work */
324 preempt_enable_no_resched();
325 schedule();
326 preempt_disable();
327 }
328
329 __set_current_state(TASK_RUNNING);
330
331 while (litirq_pending(current)) {
332
333 do_litirq();
334
335 reeval_prio(current); /* check if we need to change priority here */
336
337 preempt_enable_no_resched();
338 cond_resched();
339
340 preempt_disable();
341 }
342 preempt_enable();
343 set_current_state(TASK_INTERRUPTIBLE);
344 }
345 __set_current_state(TASK_RUNNING);
346 return 0;
347}
348
349
350void trigger_litirqs(struct task_struct* which)
351{
352 while (litirq_pending(which))
353 {
354 wakeup_litirqd(which);
355 }
356}
357
358
359void spawn_klitirqd(void)
360{
361 int i;
362 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
363 {
364 klitirqd[i] = kthread_create(run_klitirqd, NULL, "klitirqd_th%d", i);
365 }
366
367 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
368 {
369 wake_up_process(klitirqd[i]);
370 }
371}
372
373void kill_klitirqd(void)
374{
375 int i;
376
377 /* TODO: Put pending tasklets SOMEWHERE-- back to the OS? */
378 for(i = 0; i < NR_LITMUS_SOFTIRQD; ++i)
379 {
380 kthread_stop(klitirqd[i]);
381 }
382}
383
384
385
386
387
388
389
diff --git a/litmus/sched_gsn_edf.c b/litmus/sched_gsn_edf.c
index c5c9600c33d8..d5d834cc411b 100644
--- a/litmus/sched_gsn_edf.c
+++ b/litmus/sched_gsn_edf.c
@@ -946,6 +946,7 @@ static long gsnedf_activate_plugin(void)
946 } 946 }
947#endif 947#endif
948 } 948 }
949
949 return 0; 950 return 0;
950} 951}
951 952