diff options
Diffstat (limited to 'litmus/litmus_softirq.c')
-rw-r--r-- | litmus/litmus_softirq.c | 1205 |
1 files changed, 1205 insertions, 0 deletions
diff --git a/litmus/litmus_softirq.c b/litmus/litmus_softirq.c new file mode 100644 index 000000000000..464a78d780ad --- /dev/null +++ b/litmus/litmus_softirq.c | |||
@@ -0,0 +1,1205 @@ | |||
1 | #include <linux/interrupt.h> | ||
2 | #include <linux/percpu.h> | ||
3 | #include <linux/cpu.h> | ||
4 | #include <linux/kthread.h> | ||
5 | #include <linux/ftrace.h> | ||
6 | #include <linux/smp.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/mutex.h> | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/cpuset.h> | ||
12 | |||
13 | #include <litmus/litmus.h> | ||
14 | #include <litmus/sched_trace.h> | ||
15 | #include <litmus/jobs.h> | ||
16 | #include <litmus/sched_plugin.h> | ||
17 | #include <litmus/litmus_softirq.h> | ||
18 | |||
19 | /* TODO: Remove unneeded mb() and other barriers. */ | ||
20 | |||
21 | enum pending_flags | ||
22 | { | ||
23 | LIT_TASKLET_LOW = 0x1, | ||
24 | LIT_TASKLET_HI = LIT_TASKLET_LOW<<1, | ||
25 | LIT_WORK = LIT_TASKLET_HI<<1 | ||
26 | }; | ||
27 | |||
28 | struct klmirqd_registration | ||
29 | { | ||
30 | raw_spinlock_t lock; | ||
31 | u32 nr_threads; | ||
32 | unsigned int initialized:1; | ||
33 | unsigned int shuttingdown:1; | ||
34 | struct list_head threads; | ||
35 | }; | ||
36 | |||
37 | static atomic_t klmirqd_id_gen = ATOMIC_INIT(-1); | ||
38 | |||
39 | static struct klmirqd_registration klmirqd_state; | ||
40 | |||
41 | |||
42 | |||
43 | void init_klmirqd(void) | ||
44 | { | ||
45 | raw_spin_lock_init(&klmirqd_state.lock); | ||
46 | |||
47 | klmirqd_state.nr_threads = 0; | ||
48 | klmirqd_state.initialized = 1; | ||
49 | klmirqd_state.shuttingdown = 0; | ||
50 | INIT_LIST_HEAD(&klmirqd_state.threads); | ||
51 | } | ||
52 | |||
53 | static int __klmirqd_is_ready(void) | ||
54 | { | ||
55 | return (klmirqd_state.initialized == 1 && klmirqd_state.shuttingdown == 0); | ||
56 | } | ||
57 | |||
58 | int klmirqd_is_ready(void) | ||
59 | { | ||
60 | unsigned long flags; | ||
61 | int ret; | ||
62 | |||
63 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
64 | ret = __klmirqd_is_ready(); | ||
65 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
66 | |||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | int klmirqd_is_dead(void) | ||
71 | { | ||
72 | return(!klmirqd_is_ready()); | ||
73 | } | ||
74 | |||
75 | |||
76 | void kill_klmirqd(void) | ||
77 | { | ||
78 | if(!klmirqd_is_dead()) | ||
79 | { | ||
80 | unsigned long flags; | ||
81 | struct list_head *pos; | ||
82 | struct list_head *q; | ||
83 | |||
84 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
85 | |||
86 | TRACE("%s: Killing all klmirqd threads! (%d of them)\n", __FUNCTION__, klmirqd_state.nr_threads); | ||
87 | |||
88 | klmirqd_state.shuttingdown = 1; | ||
89 | |||
90 | list_for_each_safe(pos, q, &klmirqd_state.threads) { | ||
91 | struct klmirqd_info* info = list_entry(pos, struct klmirqd_info, klmirqd_reg); | ||
92 | |||
93 | if(info->terminating != 1) | ||
94 | { | ||
95 | info->terminating = 1; | ||
96 | mb(); /* just to be sure? */ | ||
97 | flush_pending(info->klmirqd); | ||
98 | |||
99 | /* signal termination */ | ||
100 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
101 | kthread_stop(info->klmirqd); | ||
102 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
103 | } | ||
104 | } | ||
105 | |||
106 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
107 | } | ||
108 | } | ||
109 | |||
110 | |||
111 | |||
112 | void kill_klmirqd_thread(struct task_struct* klmirqd_thread) | ||
113 | { | ||
114 | unsigned long flags; | ||
115 | struct klmirqd_info* info; | ||
116 | |||
117 | if (!tsk_rt(klmirqd_thread)->is_interrupt_thread) { | ||
118 | TRACE("%s/%d is not a klmirqd thread\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
119 | return; | ||
120 | } | ||
121 | |||
122 | TRACE("%s: Killing klmirqd thread %s/%d\n", __FUNCTION__, klmirqd_thread->comm, klmirqd_thread->pid); | ||
123 | |||
124 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
125 | |||
126 | info = tsk_rt(klmirqd_thread)->klmirqd_info; | ||
127 | |||
128 | if(info->terminating != 1) { | ||
129 | info->terminating = 1; | ||
130 | mb(); | ||
131 | |||
132 | flush_pending(klmirqd_thread); | ||
133 | kthread_stop(klmirqd_thread); | ||
134 | } | ||
135 | |||
136 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
137 | } | ||
138 | |||
139 | struct klmirqd_launch_data | ||
140 | { | ||
141 | int cpu_affinity; | ||
142 | klmirqd_callback_t* cb; | ||
143 | char name[MAX_KLMIRQD_NAME_LEN+1]; | ||
144 | struct work_struct work; | ||
145 | }; | ||
146 | |||
147 | static int run_klmirqd(void* callback); | ||
148 | |||
149 | |||
150 | /* executed by a kworker from workqueues */ | ||
151 | static void __launch_klmirqd_thread(struct work_struct *work) | ||
152 | { | ||
153 | int id; | ||
154 | struct task_struct* thread = NULL; | ||
155 | struct klmirqd_launch_data* launch_data = | ||
156 | container_of(work, struct klmirqd_launch_data, work); | ||
157 | |||
158 | TRACE("Creating klmirqd thread\n"); | ||
159 | |||
160 | |||
161 | |||
162 | if (launch_data->cpu_affinity != -1) { | ||
163 | if (launch_data->name[0] == '\0') { | ||
164 | id = atomic_inc_return(&klmirqd_id_gen); | ||
165 | TRACE("Launching klmirqd_th%d/%d\n", id, launch_data->cpu_affinity); | ||
166 | |||
167 | thread = kthread_create( | ||
168 | run_klmirqd, | ||
169 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
170 | (void*)launch_data->cb, | ||
171 | "klmirqd_th%d/%d", | ||
172 | id, | ||
173 | launch_data->cpu_affinity); | ||
174 | } | ||
175 | else { | ||
176 | TRACE("Launching %s/%d\n", launch_data->name, launch_data->cpu_affinity); | ||
177 | |||
178 | thread = kthread_create( | ||
179 | run_klmirqd, | ||
180 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
181 | (void*)launch_data->cb, | ||
182 | "%s/%d", | ||
183 | launch_data->name, | ||
184 | launch_data->cpu_affinity); | ||
185 | } | ||
186 | |||
187 | /* litmus will put is in the right cluster. */ | ||
188 | kthread_bind(thread, launch_data->cpu_affinity); | ||
189 | } | ||
190 | else { | ||
191 | if (launch_data->name[0] == '\0') { | ||
192 | id = atomic_inc_return(&klmirqd_id_gen); | ||
193 | TRACE("Launching klmirqd_th%d\n", id); | ||
194 | |||
195 | thread = kthread_create( | ||
196 | run_klmirqd, | ||
197 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
198 | (void*)launch_data->cb, | ||
199 | "klmirqd_th%d", | ||
200 | id); | ||
201 | |||
202 | } | ||
203 | else { | ||
204 | TRACE("Launching %s\n", launch_data->name); | ||
205 | |||
206 | thread = kthread_create( | ||
207 | run_klmirqd, | ||
208 | /* treat the affinity as a pointer, we'll cast it back later */ | ||
209 | (void*)launch_data->cb, | ||
210 | launch_data->name); | ||
211 | } | ||
212 | |||
213 | |||
214 | } | ||
215 | |||
216 | if (thread) { | ||
217 | wake_up_process(thread); | ||
218 | } | ||
219 | else { | ||
220 | TRACE("Could not create thread!\n"); | ||
221 | } | ||
222 | |||
223 | kfree(launch_data); | ||
224 | } | ||
225 | |||
226 | |||
227 | int launch_klmirqd_thread(char* name, int cpu, klmirqd_callback_t* cb) | ||
228 | { | ||
229 | struct klmirqd_launch_data* delayed_launch; | ||
230 | |||
231 | if (!klmirqd_is_ready()) { | ||
232 | TRACE("klmirqd is not ready. Check that it was initialized!\n"); | ||
233 | return -1; | ||
234 | } | ||
235 | |||
236 | /* tell a work queue to launch the threads. we can't make scheduling | ||
237 | calls since we're in an atomic state. */ | ||
238 | delayed_launch = kmalloc(sizeof(struct klmirqd_launch_data), GFP_ATOMIC); | ||
239 | delayed_launch->cpu_affinity = cpu; | ||
240 | delayed_launch->cb = cb; | ||
241 | INIT_WORK(&delayed_launch->work, __launch_klmirqd_thread); | ||
242 | |||
243 | if(name) { | ||
244 | snprintf(delayed_launch->name, MAX_KLMIRQD_NAME_LEN+1, "%s", name); | ||
245 | } | ||
246 | else { | ||
247 | delayed_launch->name[0] = '\0'; | ||
248 | } | ||
249 | |||
250 | schedule_work(&delayed_launch->work); | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | |||
256 | |||
257 | |||
258 | #define KLMIRQD_SLICE_NR_JIFFIES 1 | ||
259 | #define KLMIRQD_SLICE_NS ((NSEC_PER_SEC / HZ) * KLMIRQD_SLICE_NR_JIFFIES) | ||
260 | |||
261 | static int become_litmus_daemon(struct task_struct* tsk) | ||
262 | { | ||
263 | int ret = 0; | ||
264 | |||
265 | struct rt_task tp = { | ||
266 | .period = KLMIRQD_SLICE_NS, /* dummy 1 second period */ | ||
267 | .relative_deadline = KLMIRQD_SLICE_NS, | ||
268 | .exec_cost = KLMIRQD_SLICE_NS, | ||
269 | .phase = 0, | ||
270 | .cpu = task_cpu(current), | ||
271 | .budget_policy = NO_ENFORCEMENT, | ||
272 | .budget_signal_policy = NO_SIGNALS, | ||
273 | .cls = RT_CLASS_BEST_EFFORT | ||
274 | }; | ||
275 | |||
276 | struct sched_param param = { .sched_priority = 0}; | ||
277 | |||
278 | TRACE_CUR("Setting %s/%d as daemon thread.\n", tsk->comm, tsk->pid); | ||
279 | |||
280 | /* set task params */ | ||
281 | tsk_rt(tsk)->task_params = tp; | ||
282 | tsk_rt(tsk)->is_interrupt_thread = 1; | ||
283 | |||
284 | /* inform the OS we're SCHED_LITMUS -- | ||
285 | sched_setscheduler_nocheck() calls litmus_admit_task(). */ | ||
286 | sched_setscheduler_nocheck(tsk, SCHED_LITMUS, ¶m); | ||
287 | |||
288 | return ret; | ||
289 | } | ||
290 | |||
291 | static int become_normal_daemon(struct task_struct* tsk) | ||
292 | { | ||
293 | int ret = 0; | ||
294 | |||
295 | struct sched_param param = { .sched_priority = 0}; | ||
296 | sched_setscheduler_nocheck(tsk, SCHED_NORMAL, ¶m); | ||
297 | |||
298 | return ret; | ||
299 | } | ||
300 | |||
301 | static int register_klmirqd(struct task_struct* tsk) | ||
302 | { | ||
303 | int retval = 0; | ||
304 | unsigned long flags; | ||
305 | struct klmirqd_info *info = NULL; | ||
306 | |||
307 | if (!tsk_rt(tsk)->is_interrupt_thread) { | ||
308 | TRACE("Only proxy threads already running in Litmus may become klmirqd threads!\n"); | ||
309 | WARN_ON(1); | ||
310 | retval = -1; | ||
311 | goto out; | ||
312 | } | ||
313 | |||
314 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
315 | |||
316 | if (!__klmirqd_is_ready()) { | ||
317 | TRACE("klmirqd is not ready! Did you forget to initialize it?\n"); | ||
318 | WARN_ON(1); | ||
319 | retval = -1; | ||
320 | goto out_unlock; | ||
321 | } | ||
322 | |||
323 | /* allocate and initialize klmirqd data for the thread */ | ||
324 | info = kmalloc(sizeof(struct klmirqd_info), GFP_KERNEL); | ||
325 | if (!info) { | ||
326 | TRACE("Failed to allocate klmirqd_info struct!\n"); | ||
327 | retval = -1; /* todo: pick better code */ | ||
328 | goto out_unlock; | ||
329 | } | ||
330 | memset(info, 0, sizeof(struct klmirqd_info)); | ||
331 | info->klmirqd = tsk; | ||
332 | info->pending_tasklets_hi.tail = &info->pending_tasklets_hi.head; | ||
333 | info->pending_tasklets.tail = &info->pending_tasklets.head; | ||
334 | INIT_LIST_HEAD(&info->worklist); | ||
335 | INIT_LIST_HEAD(&info->klmirqd_reg); | ||
336 | raw_spin_lock_init(&info->lock); | ||
337 | |||
338 | |||
339 | /* now register with klmirqd */ | ||
340 | list_add_tail(&info->klmirqd_reg, &klmirqd_state.threads); | ||
341 | ++klmirqd_state.nr_threads; | ||
342 | |||
343 | /* update the task struct to point to klmirqd info */ | ||
344 | tsk_rt(tsk)->klmirqd_info = info; | ||
345 | |||
346 | out_unlock: | ||
347 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
348 | |||
349 | out: | ||
350 | return retval; | ||
351 | } | ||
352 | |||
353 | static int unregister_klmirqd(struct task_struct* tsk) | ||
354 | { | ||
355 | int retval = 0; | ||
356 | unsigned long flags; | ||
357 | struct klmirqd_info *info = tsk_rt(tsk)->klmirqd_info; | ||
358 | |||
359 | if (!tsk_rt(tsk)->is_interrupt_thread || !info) { | ||
360 | TRACE("%s/%d is not a klmirqd thread!\n", tsk->comm, tsk->pid); | ||
361 | WARN_ON(1); | ||
362 | retval = -1; | ||
363 | goto out; | ||
364 | } | ||
365 | |||
366 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
367 | |||
368 | /* remove the entry in the klmirqd thread list */ | ||
369 | list_del(&info->klmirqd_reg); | ||
370 | mb(); | ||
371 | --klmirqd_state.nr_threads; | ||
372 | |||
373 | /* remove link to klmirqd info from thread */ | ||
374 | tsk_rt(tsk)->klmirqd_info = NULL; | ||
375 | |||
376 | /* clean up memory */ | ||
377 | kfree(info); | ||
378 | |||
379 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
380 | |||
381 | out: | ||
382 | return retval; | ||
383 | } | ||
384 | |||
385 | |||
386 | |||
387 | |||
388 | |||
389 | |||
390 | int proc_read_klmirqd_stats(char *page, char **start, | ||
391 | off_t off, int count, | ||
392 | int *eof, void *data) | ||
393 | { | ||
394 | unsigned long flags; | ||
395 | int len; | ||
396 | |||
397 | raw_spin_lock_irqsave(&klmirqd_state.lock, flags); | ||
398 | |||
399 | if (klmirqd_state.initialized) { | ||
400 | if (!klmirqd_state.shuttingdown) { | ||
401 | struct list_head *pos; | ||
402 | |||
403 | len = snprintf(page, PAGE_SIZE, | ||
404 | "num ready klmirqds: %d\n\n", | ||
405 | klmirqd_state.nr_threads); | ||
406 | |||
407 | list_for_each(pos, &klmirqd_state.threads) { | ||
408 | struct klmirqd_info* info = list_entry(pos, struct klmirqd_info, klmirqd_reg); | ||
409 | |||
410 | len += | ||
411 | snprintf(page + len - 1, PAGE_SIZE, /* -1 to strip off \0 */ | ||
412 | "klmirqd_thread: %s/%d\n" | ||
413 | "\tcurrent_owner: %s/%d\n" | ||
414 | "\tpending: %x\n" | ||
415 | "\tnum hi: %d\n" | ||
416 | "\tnum low: %d\n" | ||
417 | "\tnum work: %d\n\n", | ||
418 | info->klmirqd->comm, info->klmirqd->pid, | ||
419 | (info->current_owner != NULL) ? | ||
420 | info->current_owner->comm : "(null)", | ||
421 | (info->current_owner != NULL) ? | ||
422 | info->current_owner->pid : 0, | ||
423 | info->pending, | ||
424 | atomic_read(&info->num_hi_pending), | ||
425 | atomic_read(&info->num_low_pending), | ||
426 | atomic_read(&info->num_work_pending)); | ||
427 | } | ||
428 | } | ||
429 | else { | ||
430 | len = snprintf(page, PAGE_SIZE, "klmirqd is shutting down\n"); | ||
431 | } | ||
432 | } | ||
433 | else { | ||
434 | len = snprintf(page, PAGE_SIZE, "klmirqd is not initialized!\n"); | ||
435 | } | ||
436 | |||
437 | raw_spin_unlock_irqrestore(&klmirqd_state.lock, flags); | ||
438 | |||
439 | return(len); | ||
440 | } | ||
441 | |||
442 | |||
443 | |||
444 | |||
445 | |||
446 | #if 0 | ||
447 | static atomic_t dump_id = ATOMIC_INIT(0); | ||
448 | |||
449 | static void __dump_state(struct klmirqd_info* which, const char* caller) | ||
450 | { | ||
451 | struct tasklet_struct* list; | ||
452 | |||
453 | int id = atomic_inc_return(&dump_id); | ||
454 | |||
455 | //if(in_interrupt()) | ||
456 | { | ||
457 | if(which->current_owner) | ||
458 | { | ||
459 | TRACE("(id: %d caller: %s)\n" | ||
460 | "klmirqd: %s/%d\n" | ||
461 | "current owner: %s/%d\n" | ||
462 | "pending: %x\n", | ||
463 | id, caller, | ||
464 | which->klmirqd->comm, which->klmirqd->pid, | ||
465 | which->current_owner->comm, which->current_owner->pid, | ||
466 | which->pending); | ||
467 | } | ||
468 | else | ||
469 | { | ||
470 | TRACE("(id: %d caller: %s)\n" | ||
471 | "klmirqd: %s/%d\n" | ||
472 | "current owner: %p\n" | ||
473 | "pending: %x\n", | ||
474 | id, caller, | ||
475 | which->klmirqd->comm, which->klmirqd->pid, | ||
476 | NULL, | ||
477 | which->pending); | ||
478 | } | ||
479 | |||
480 | list = which->pending_tasklets.head; | ||
481 | while(list) | ||
482 | { | ||
483 | struct tasklet_struct *t = list; | ||
484 | list = list->next; /* advance */ | ||
485 | if(t->owner) | ||
486 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %s/%d\n", id, caller, t, t->owner->comm, t->owner->pid); | ||
487 | else | ||
488 | TRACE("(id: %d caller: %s) Tasklet: %x, Owner = %p\n", id, caller, t, NULL); | ||
489 | } | ||
490 | } | ||
491 | } | ||
492 | |||
493 | static void dump_state(struct klmirqd_info* which, const char* caller) | ||
494 | { | ||
495 | unsigned long flags; | ||
496 | |||
497 | raw_spin_lock_irqsave(&which->lock, flags); | ||
498 | __dump_state(which, caller); | ||
499 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
500 | } | ||
501 | #endif | ||
502 | |||
503 | |||
504 | |||
505 | |||
506 | |||
507 | |||
508 | |||
509 | |||
510 | |||
511 | |||
512 | |||
513 | /* forward declarations */ | ||
514 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
515 | struct klmirqd_info *which, | ||
516 | int wakeup); | ||
517 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
518 | struct klmirqd_info *which, | ||
519 | int wakeup); | ||
520 | static void ___litmus_schedule_work(struct work_struct *w, | ||
521 | struct klmirqd_info *which, | ||
522 | int wakeup); | ||
523 | |||
524 | |||
525 | inline static u32 litirq_pending_hi_irqoff(struct klmirqd_info* which) | ||
526 | { | ||
527 | return (which->pending & LIT_TASKLET_HI); | ||
528 | } | ||
529 | |||
530 | inline static u32 litirq_pending_low_irqoff(struct klmirqd_info* which) | ||
531 | { | ||
532 | return (which->pending & LIT_TASKLET_LOW); | ||
533 | } | ||
534 | |||
535 | inline static u32 litirq_pending_work_irqoff(struct klmirqd_info* which) | ||
536 | { | ||
537 | return (which->pending & LIT_WORK); | ||
538 | } | ||
539 | |||
540 | inline static u32 litirq_pending_irqoff(struct klmirqd_info* which) | ||
541 | { | ||
542 | return(which->pending); | ||
543 | } | ||
544 | |||
545 | |||
546 | inline static u32 litirq_pending(struct klmirqd_info* which) | ||
547 | { | ||
548 | unsigned long flags; | ||
549 | u32 pending; | ||
550 | |||
551 | raw_spin_lock_irqsave(&which->lock, flags); | ||
552 | pending = litirq_pending_irqoff(which); | ||
553 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
554 | |||
555 | return pending; | ||
556 | }; | ||
557 | |||
558 | static void wakeup_litirqd_locked(struct klmirqd_info* which) | ||
559 | { | ||
560 | /* Interrupts are disabled: no need to stop preemption */ | ||
561 | if (which && which->klmirqd) | ||
562 | { | ||
563 | if(which->klmirqd->state != TASK_RUNNING) | ||
564 | { | ||
565 | TRACE("%s: Waking up klmirqd: %s/%d\n", __FUNCTION__, | ||
566 | which->klmirqd->comm, which->klmirqd->pid); | ||
567 | |||
568 | wake_up_process(which->klmirqd); | ||
569 | } | ||
570 | } | ||
571 | } | ||
572 | |||
573 | |||
574 | static void do_lit_tasklet(struct klmirqd_info* which, | ||
575 | struct tasklet_head* pending_tasklets) | ||
576 | { | ||
577 | unsigned long flags; | ||
578 | struct tasklet_struct *list; | ||
579 | atomic_t* count; | ||
580 | |||
581 | raw_spin_lock_irqsave(&which->lock, flags); | ||
582 | |||
583 | //__dump_state(which, "do_lit_tasklet: before steal"); | ||
584 | |||
585 | /* copy out the tasklets for our private use. */ | ||
586 | list = pending_tasklets->head; | ||
587 | pending_tasklets->head = NULL; | ||
588 | pending_tasklets->tail = &pending_tasklets->head; | ||
589 | |||
590 | /* remove pending flag */ | ||
591 | which->pending &= (pending_tasklets == &which->pending_tasklets) ? | ||
592 | ~LIT_TASKLET_LOW : | ||
593 | ~LIT_TASKLET_HI; | ||
594 | |||
595 | count = (pending_tasklets == &which->pending_tasklets) ? | ||
596 | &which->num_low_pending: | ||
597 | &which->num_hi_pending; | ||
598 | |||
599 | //__dump_state(which, "do_lit_tasklet: after steal"); | ||
600 | |||
601 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
602 | |||
603 | |||
604 | while(list) | ||
605 | { | ||
606 | struct tasklet_struct *t = list; | ||
607 | |||
608 | /* advance, lest we forget */ | ||
609 | list = list->next; | ||
610 | |||
611 | /* execute tasklet if it has my priority and is free */ | ||
612 | if (tasklet_trylock(t)) { | ||
613 | if (!atomic_read(&t->count)) { | ||
614 | |||
615 | sched_trace_tasklet_begin(t->owner); | ||
616 | |||
617 | if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) | ||
618 | { | ||
619 | BUG(); | ||
620 | } | ||
621 | TRACE_CUR("%s: Invoking tasklet.\n", __FUNCTION__); | ||
622 | t->func(t->data); | ||
623 | tasklet_unlock(t); | ||
624 | |||
625 | atomic_dec(count); | ||
626 | |||
627 | sched_trace_tasklet_end(t->owner, 0ul); | ||
628 | |||
629 | continue; /* process more tasklets */ | ||
630 | } | ||
631 | tasklet_unlock(t); | ||
632 | } | ||
633 | |||
634 | TRACE_CUR("%s: Could not invoke tasklet. Requeuing.\n", __FUNCTION__); | ||
635 | |||
636 | /* couldn't process tasklet. put it back at the end of the queue. */ | ||
637 | if(pending_tasklets == &which->pending_tasklets) | ||
638 | ___litmus_tasklet_schedule(t, which, 0); | ||
639 | else | ||
640 | ___litmus_tasklet_hi_schedule(t, which, 0); | ||
641 | } | ||
642 | } | ||
643 | |||
644 | |||
645 | // returns 1 if priorities need to be changed to continue processing | ||
646 | // pending tasklets. | ||
647 | static void do_litirq(struct klmirqd_info* which) | ||
648 | { | ||
649 | u32 pending; | ||
650 | |||
651 | if(in_interrupt()) | ||
652 | { | ||
653 | TRACE("%s: exiting early: in interrupt context!\n", __FUNCTION__); | ||
654 | return; | ||
655 | } | ||
656 | |||
657 | if(which->klmirqd != current) | ||
658 | { | ||
659 | TRACE_CUR("%s: exiting early: thread/info mismatch! Running %s/%d but given %s/%d.\n", | ||
660 | __FUNCTION__, current->comm, current->pid, | ||
661 | which->klmirqd->comm, which->klmirqd->pid); | ||
662 | return; | ||
663 | } | ||
664 | |||
665 | if(!is_realtime(current)) | ||
666 | { | ||
667 | TRACE_CUR("%s: exiting early: klmirqd is not real-time. Sched Policy = %d\n", | ||
668 | __FUNCTION__, current->policy); | ||
669 | return; | ||
670 | } | ||
671 | |||
672 | |||
673 | /* We only handle tasklets & work objects, no need for RCU triggers? */ | ||
674 | |||
675 | pending = litirq_pending(which); | ||
676 | if(pending) { | ||
677 | /* extract the work to do and do it! */ | ||
678 | if(pending & LIT_TASKLET_HI) { | ||
679 | TRACE_CUR("%s: Invoking HI tasklets.\n", __FUNCTION__); | ||
680 | do_lit_tasklet(which, &which->pending_tasklets_hi); | ||
681 | } | ||
682 | |||
683 | if(pending & LIT_TASKLET_LOW) { | ||
684 | TRACE_CUR("%s: Invoking LOW tasklets.\n", __FUNCTION__); | ||
685 | do_lit_tasklet(which, &which->pending_tasklets); | ||
686 | } | ||
687 | } | ||
688 | } | ||
689 | |||
690 | |||
691 | static void do_work(struct klmirqd_info* which) | ||
692 | { | ||
693 | unsigned long flags; | ||
694 | struct work_struct* work; | ||
695 | work_func_t f; | ||
696 | |||
697 | // only execute one work-queue item to yield to tasklets. | ||
698 | // ...is this a good idea, or should we just batch them? | ||
699 | raw_spin_lock_irqsave(&which->lock, flags); | ||
700 | |||
701 | if(!litirq_pending_work_irqoff(which)) | ||
702 | { | ||
703 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
704 | goto no_work; | ||
705 | } | ||
706 | |||
707 | work = list_first_entry(&which->worklist, struct work_struct, entry); | ||
708 | list_del_init(&work->entry); | ||
709 | |||
710 | if(list_empty(&which->worklist)) | ||
711 | { | ||
712 | which->pending &= ~LIT_WORK; | ||
713 | } | ||
714 | |||
715 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
716 | |||
717 | |||
718 | TRACE_CUR("%s: Invoking work object.\n", __FUNCTION__); | ||
719 | // do the work! | ||
720 | work_clear_pending(work); | ||
721 | f = work->func; | ||
722 | f(work); /* can't touch 'work' after this point, | ||
723 | the user may have freed it. */ | ||
724 | |||
725 | atomic_dec(&which->num_work_pending); | ||
726 | |||
727 | no_work: | ||
728 | return; | ||
729 | } | ||
730 | |||
731 | |||
732 | |||
733 | /* main loop for klitsoftirqd */ | ||
734 | static int run_klmirqd(void* callback) | ||
735 | { | ||
736 | int retval = 0; | ||
737 | struct klmirqd_info* info = NULL; | ||
738 | klmirqd_callback_t* cb = (klmirqd_callback_t*)(callback); | ||
739 | |||
740 | retval = become_litmus_daemon(current); | ||
741 | if (retval != 0) { | ||
742 | TRACE_CUR("%s: Failed to transition to rt-task.\n", __FUNCTION__); | ||
743 | goto failed; | ||
744 | } | ||
745 | |||
746 | retval = register_klmirqd(current); | ||
747 | if (retval != 0) { | ||
748 | TRACE_CUR("%s: Failed to become a klmirqd thread.\n", __FUNCTION__); | ||
749 | goto failed_sched_normal; | ||
750 | } | ||
751 | |||
752 | if (cb && cb->func) { | ||
753 | retval = cb->func(cb->arg); | ||
754 | if (retval != 0) { | ||
755 | TRACE_CUR("%s: klmirqd callback reported failure. retval = %d\n", __FUNCTION__, retval); | ||
756 | goto failed_unregister; | ||
757 | } | ||
758 | } | ||
759 | |||
760 | /* enter the interrupt handling workloop */ | ||
761 | |||
762 | info = tsk_rt(current)->klmirqd_info; | ||
763 | |||
764 | set_current_state(TASK_INTERRUPTIBLE); | ||
765 | |||
766 | while (!kthread_should_stop()) | ||
767 | { | ||
768 | preempt_disable(); | ||
769 | if (!litirq_pending(info)) | ||
770 | { | ||
771 | /* sleep for work */ | ||
772 | TRACE_CUR("%s: No more tasklets or work objects. Going to sleep.\n", | ||
773 | __FUNCTION__); | ||
774 | preempt_enable_no_resched(); | ||
775 | schedule(); | ||
776 | |||
777 | if(kthread_should_stop()) /* bail out */ | ||
778 | { | ||
779 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
780 | continue; | ||
781 | } | ||
782 | |||
783 | preempt_disable(); | ||
784 | } | ||
785 | |||
786 | __set_current_state(TASK_RUNNING); | ||
787 | |||
788 | while (litirq_pending(info)) | ||
789 | { | ||
790 | preempt_enable_no_resched(); | ||
791 | |||
792 | if(kthread_should_stop()) | ||
793 | { | ||
794 | TRACE_CUR("%s:%d: Signaled to terminate.\n", __FUNCTION__, __LINE__); | ||
795 | break; | ||
796 | } | ||
797 | |||
798 | preempt_disable(); | ||
799 | |||
800 | /* Double check that there's still pending work and the owner hasn't | ||
801 | * changed. Pending items may have been flushed while we were sleeping. | ||
802 | */ | ||
803 | if(litirq_pending(info)) | ||
804 | { | ||
805 | TRACE_CUR("%s: Executing tasklets and/or work objects.\n", | ||
806 | __FUNCTION__); | ||
807 | |||
808 | do_litirq(info); | ||
809 | |||
810 | preempt_enable_no_resched(); | ||
811 | |||
812 | // work objects are preemptible. | ||
813 | do_work(info); | ||
814 | } | ||
815 | else | ||
816 | { | ||
817 | TRACE_CUR("%s: Pending work was flushed!\n", __FUNCTION__); | ||
818 | |||
819 | preempt_enable_no_resched(); | ||
820 | } | ||
821 | |||
822 | cond_resched(); | ||
823 | preempt_disable(); | ||
824 | } | ||
825 | preempt_enable(); | ||
826 | set_current_state(TASK_INTERRUPTIBLE); | ||
827 | } | ||
828 | __set_current_state(TASK_RUNNING); | ||
829 | |||
830 | failed_unregister: | ||
831 | /* remove our registration from klmirqd */ | ||
832 | unregister_klmirqd(current); | ||
833 | |||
834 | failed_sched_normal: | ||
835 | become_normal_daemon(current); | ||
836 | |||
837 | failed: | ||
838 | return retval; | ||
839 | } | ||
840 | |||
841 | |||
842 | void flush_pending(struct task_struct* tsk) | ||
843 | { | ||
844 | unsigned long flags; | ||
845 | struct tasklet_struct *list; | ||
846 | u32 work_flushed = 0; | ||
847 | |||
848 | struct klmirqd_info *which; | ||
849 | |||
850 | if (!tsk_rt(tsk)->is_interrupt_thread) { | ||
851 | TRACE("%s/%d is not a proxy thread\n", tsk->comm, tsk->pid); | ||
852 | WARN_ON(1); | ||
853 | return; | ||
854 | } | ||
855 | |||
856 | which = tsk_rt(tsk)->klmirqd_info; | ||
857 | if (!which) { | ||
858 | TRACE("%s/%d is not a klmirqd thread!\n", tsk->comm, tsk->pid); | ||
859 | WARN_ON(1); | ||
860 | return; | ||
861 | } | ||
862 | |||
863 | |||
864 | raw_spin_lock_irqsave(&which->lock, flags); | ||
865 | |||
866 | //__dump_state(which, "flush_pending: before"); | ||
867 | |||
868 | // flush hi tasklets. | ||
869 | if(litirq_pending_hi_irqoff(which)) | ||
870 | { | ||
871 | which->pending &= ~LIT_TASKLET_HI; | ||
872 | |||
873 | list = which->pending_tasklets_hi.head; | ||
874 | which->pending_tasklets_hi.head = NULL; | ||
875 | which->pending_tasklets_hi.tail = &which->pending_tasklets_hi.head; | ||
876 | |||
877 | TRACE("%s: Handing HI tasklets back to Linux.\n", __FUNCTION__); | ||
878 | |||
879 | while(list) | ||
880 | { | ||
881 | struct tasklet_struct *t = list; | ||
882 | list = list->next; | ||
883 | |||
884 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
885 | { | ||
886 | BUG(); | ||
887 | } | ||
888 | |||
889 | work_flushed |= LIT_TASKLET_HI; | ||
890 | |||
891 | t->owner = NULL; | ||
892 | |||
893 | // WTF? | ||
894 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
895 | { | ||
896 | atomic_dec(&which->num_hi_pending); | ||
897 | ___tasklet_hi_schedule(t); | ||
898 | } | ||
899 | else | ||
900 | { | ||
901 | TRACE("%s: dropped hi tasklet??\n", __FUNCTION__); | ||
902 | BUG(); | ||
903 | } | ||
904 | |||
905 | } | ||
906 | } | ||
907 | |||
908 | // flush low tasklets. | ||
909 | if(litirq_pending_low_irqoff(which)) | ||
910 | { | ||
911 | which->pending &= ~LIT_TASKLET_LOW; | ||
912 | |||
913 | list = which->pending_tasklets.head; | ||
914 | which->pending_tasklets.head = NULL; | ||
915 | which->pending_tasklets.tail = &which->pending_tasklets.head; | ||
916 | |||
917 | TRACE("%s: Handing LOW tasklets back to Linux.\n", __FUNCTION__); | ||
918 | |||
919 | while(list) | ||
920 | { | ||
921 | struct tasklet_struct *t = list; | ||
922 | list = list->next; | ||
923 | |||
924 | if(unlikely(!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))) | ||
925 | { | ||
926 | BUG(); | ||
927 | } | ||
928 | |||
929 | work_flushed |= LIT_TASKLET_LOW; | ||
930 | |||
931 | t->owner = NULL; | ||
932 | // sched_trace_tasklet_end(owner, 1ul); | ||
933 | |||
934 | if(!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) | ||
935 | { | ||
936 | atomic_dec(&which->num_low_pending); | ||
937 | ___tasklet_schedule(t); | ||
938 | } | ||
939 | else | ||
940 | { | ||
941 | TRACE("%s: dropped tasklet??\n", __FUNCTION__); | ||
942 | BUG(); | ||
943 | } | ||
944 | } | ||
945 | } | ||
946 | |||
947 | // flush work objects | ||
948 | if(litirq_pending_work_irqoff(which)) | ||
949 | { | ||
950 | which->pending &= ~LIT_WORK; | ||
951 | |||
952 | TRACE("%s: Handing work objects back to Linux.\n", __FUNCTION__); | ||
953 | |||
954 | while(!list_empty(&which->worklist)) | ||
955 | { | ||
956 | struct work_struct* work = | ||
957 | list_first_entry(&which->worklist, struct work_struct, entry); | ||
958 | list_del_init(&work->entry); | ||
959 | |||
960 | work_flushed |= LIT_WORK; | ||
961 | atomic_dec(&which->num_work_pending); | ||
962 | |||
963 | work->owner = NULL; | ||
964 | // sched_trace_work_end(owner, current, 1ul); | ||
965 | __schedule_work(work); | ||
966 | } | ||
967 | } | ||
968 | |||
969 | //__dump_state(which, "flush_pending: after (before reeval prio)"); | ||
970 | |||
971 | |||
972 | mb(); /* commit changes to pending flags */ | ||
973 | |||
974 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
975 | } | ||
976 | |||
977 | |||
978 | |||
979 | |||
980 | static void ___litmus_tasklet_schedule(struct tasklet_struct *t, | ||
981 | struct klmirqd_info *which, | ||
982 | int wakeup) | ||
983 | { | ||
984 | unsigned long flags; | ||
985 | u32 old_pending; | ||
986 | |||
987 | t->next = NULL; | ||
988 | |||
989 | raw_spin_lock_irqsave(&which->lock, flags); | ||
990 | |||
991 | //__dump_state(which, "___litmus_tasklet_schedule: before queuing"); | ||
992 | |||
993 | *(which->pending_tasklets.tail) = t; | ||
994 | which->pending_tasklets.tail = &t->next; | ||
995 | |||
996 | old_pending = which->pending; | ||
997 | which->pending |= LIT_TASKLET_LOW; | ||
998 | |||
999 | atomic_inc(&which->num_low_pending); | ||
1000 | |||
1001 | mb(); | ||
1002 | |||
1003 | if(!old_pending && wakeup) | ||
1004 | { | ||
1005 | wakeup_litirqd_locked(which); /* wake up the klmirqd */ | ||
1006 | } | ||
1007 | |||
1008 | //__dump_state(which, "___litmus_tasklet_schedule: after queuing"); | ||
1009 | |||
1010 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1011 | } | ||
1012 | |||
1013 | |||
1014 | int __litmus_tasklet_schedule(struct tasklet_struct *t, struct task_struct* klmirqd_thread) | ||
1015 | { | ||
1016 | int ret = 0; /* assume failure */ | ||
1017 | struct klmirqd_info* info; | ||
1018 | |||
1019 | if (unlikely(!is_realtime(klmirqd_thread) || | ||
1020 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || | ||
1021 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { | ||
1022 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1023 | return ret; | ||
1024 | } | ||
1025 | |||
1026 | info = tsk_rt(klmirqd_thread)->klmirqd_info; | ||
1027 | |||
1028 | if (likely(!info->terminating)) { | ||
1029 | ret = 1; | ||
1030 | ___litmus_tasklet_schedule(t, info, 1); | ||
1031 | } | ||
1032 | else { | ||
1033 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1034 | } | ||
1035 | return(ret); | ||
1036 | } | ||
1037 | |||
1038 | EXPORT_SYMBOL(__litmus_tasklet_schedule); | ||
1039 | |||
1040 | |||
1041 | static void ___litmus_tasklet_hi_schedule(struct tasklet_struct *t, | ||
1042 | struct klmirqd_info *which, | ||
1043 | int wakeup) | ||
1044 | { | ||
1045 | unsigned long flags; | ||
1046 | u32 old_pending; | ||
1047 | |||
1048 | t->next = NULL; | ||
1049 | |||
1050 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1051 | |||
1052 | *(which->pending_tasklets_hi.tail) = t; | ||
1053 | which->pending_tasklets_hi.tail = &t->next; | ||
1054 | |||
1055 | old_pending = which->pending; | ||
1056 | which->pending |= LIT_TASKLET_HI; | ||
1057 | |||
1058 | atomic_inc(&which->num_hi_pending); | ||
1059 | |||
1060 | mb(); | ||
1061 | |||
1062 | if(!old_pending && wakeup) | ||
1063 | { | ||
1064 | wakeup_litirqd_locked(which); /* wake up the klmirqd */ | ||
1065 | } | ||
1066 | |||
1067 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1068 | } | ||
1069 | |||
1070 | int __litmus_tasklet_hi_schedule(struct tasklet_struct *t, struct task_struct* klmirqd_thread) | ||
1071 | { | ||
1072 | int ret = 0; /* assume failure */ | ||
1073 | struct klmirqd_info* info; | ||
1074 | |||
1075 | if (unlikely(!is_realtime(klmirqd_thread) || | ||
1076 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || | ||
1077 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { | ||
1078 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1079 | return ret; | ||
1080 | } | ||
1081 | |||
1082 | info = tsk_rt(klmirqd_thread)->klmirqd_info; | ||
1083 | |||
1084 | if (likely(!info->terminating)) { | ||
1085 | ret = 1; | ||
1086 | ___litmus_tasklet_hi_schedule(t, info, 1); | ||
1087 | } | ||
1088 | else { | ||
1089 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1090 | } | ||
1091 | |||
1092 | return(ret); | ||
1093 | } | ||
1094 | |||
1095 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule); | ||
1096 | |||
1097 | |||
1098 | int __litmus_tasklet_hi_schedule_first(struct tasklet_struct *t, struct task_struct* klmirqd_thread) | ||
1099 | { | ||
1100 | int ret = 0; /* assume failure */ | ||
1101 | u32 old_pending; | ||
1102 | struct klmirqd_info* info; | ||
1103 | |||
1104 | BUG_ON(!irqs_disabled()); | ||
1105 | |||
1106 | if (unlikely(!is_realtime(klmirqd_thread) || | ||
1107 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || | ||
1108 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { | ||
1109 | TRACE("%s: %s/%d can't handle tasklets\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1110 | return ret; | ||
1111 | } | ||
1112 | |||
1113 | info = tsk_rt(klmirqd_thread)->klmirqd_info; | ||
1114 | |||
1115 | if (likely(!info->terminating)) { | ||
1116 | |||
1117 | raw_spin_lock(&info->lock); | ||
1118 | |||
1119 | ret = 1; // success! | ||
1120 | |||
1121 | t->next = info->pending_tasklets_hi.head; | ||
1122 | info->pending_tasklets_hi.head = t; | ||
1123 | |||
1124 | old_pending = info->pending; | ||
1125 | info->pending |= LIT_TASKLET_HI; | ||
1126 | |||
1127 | atomic_inc(&info->num_hi_pending); | ||
1128 | |||
1129 | mb(); | ||
1130 | |||
1131 | if(!old_pending) { | ||
1132 | wakeup_litirqd_locked(info); /* wake up the klmirqd */ | ||
1133 | } | ||
1134 | |||
1135 | raw_spin_unlock(&info->lock); | ||
1136 | } | ||
1137 | else { | ||
1138 | TRACE("%s: Tasklet rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1139 | } | ||
1140 | |||
1141 | return(ret); | ||
1142 | } | ||
1143 | |||
1144 | EXPORT_SYMBOL(__litmus_tasklet_hi_schedule_first); | ||
1145 | |||
1146 | |||
1147 | |||
1148 | static void ___litmus_schedule_work(struct work_struct *w, | ||
1149 | struct klmirqd_info *which, | ||
1150 | int wakeup) | ||
1151 | { | ||
1152 | unsigned long flags; | ||
1153 | u32 old_pending; | ||
1154 | |||
1155 | raw_spin_lock_irqsave(&which->lock, flags); | ||
1156 | |||
1157 | work_pending(w); | ||
1158 | list_add_tail(&w->entry, &which->worklist); | ||
1159 | |||
1160 | old_pending = which->pending; | ||
1161 | which->pending |= LIT_WORK; | ||
1162 | |||
1163 | atomic_inc(&which->num_work_pending); | ||
1164 | |||
1165 | mb(); | ||
1166 | |||
1167 | if(!old_pending && wakeup) | ||
1168 | { | ||
1169 | wakeup_litirqd_locked(which); /* wakeup the klmirqd */ | ||
1170 | } | ||
1171 | |||
1172 | raw_spin_unlock_irqrestore(&which->lock, flags); | ||
1173 | } | ||
1174 | |||
1175 | int __litmus_schedule_work(struct work_struct *w, struct task_struct* klmirqd_thread) | ||
1176 | { | ||
1177 | int ret = 1; /* assume success */ | ||
1178 | struct klmirqd_info* info; | ||
1179 | |||
1180 | if (unlikely(!is_realtime(klmirqd_thread) || | ||
1181 | !tsk_rt(klmirqd_thread)->is_interrupt_thread || | ||
1182 | !tsk_rt(klmirqd_thread)->klmirqd_info)) { | ||
1183 | TRACE("%s: %s/%d can't handle work items\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1184 | return ret; | ||
1185 | } | ||
1186 | |||
1187 | info = tsk_rt(klmirqd_thread)->klmirqd_info; | ||
1188 | |||
1189 | |||
1190 | if (likely(!info->terminating)) { | ||
1191 | ___litmus_schedule_work(w, info, 1); | ||
1192 | } | ||
1193 | else { | ||
1194 | TRACE("%s: Work rejected because %s/%d is terminating\n", klmirqd_thread->comm, klmirqd_thread->pid); | ||
1195 | ret = 0; | ||
1196 | } | ||
1197 | |||
1198 | return(ret); | ||
1199 | } | ||
1200 | EXPORT_SYMBOL(__litmus_schedule_work); | ||
1201 | |||
1202 | |||
1203 | |||
1204 | |||
1205 | |||