aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-12-07 15:01:58 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-12-07 15:01:58 -0500
commitd0f2d3babee1dbb2f1f6d7dadf7ff287b01de375 (patch)
tree91accf9a2205296bb73f2707740767652ed51bd2
parent4a36db417c95a0ce3e70d2896d0d81b98d478b53 (diff)
remove scheduler_signal() support
-rw-r--r--include/litmus/litmus.h11
-rw-r--r--litmus/litmus.c95
2 files changed, 0 insertions, 106 deletions
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index c56e004b9a..96ac99b70a 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -52,17 +52,6 @@ static inline int in_list(struct list_head* list)
52 52
53#define NO_CPU 0xffffffff 53#define NO_CPU 0xffffffff
54 54
55#define RT_PREEMPTIVE 0x2050 /* = NP */
56#define RT_NON_PREEMPTIVE 0x4e50 /* = P */
57#define RT_EXIT_NP_REQUESTED 0x5251 /* = RQ */
58
59
60/* kill naughty tasks
61 */
62void scheduler_signal(struct task_struct *t, unsigned int signal);
63void send_scheduler_signals(void);
64void np_mem_kill(struct task_struct *t);
65
66void litmus_fork(struct task_struct *tsk); 55void litmus_fork(struct task_struct *tsk);
67void litmus_exec(void); 56void litmus_exec(void);
68/* clean up real-time state of a task */ 57/* clean up real-time state of a task */
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 1afd41e394..3e84fe5ca7 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -29,12 +29,6 @@ atomic_t __log_seq_no = ATOMIC_INIT(0);
29/* current master CPU for handling timer IRQs */ 29/* current master CPU for handling timer IRQs */
30atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU); 30atomic_t release_master_cpu = ATOMIC_INIT(NO_CPU);
31 31
32/* To send signals from the scheduler
33 * Must drop locks first.
34 */
35static LIST_HEAD(sched_sig_list);
36static DEFINE_SPINLOCK(sched_sig_list_lock);
37
38static struct kmem_cache * heap_node_cache; 32static struct kmem_cache * heap_node_cache;
39extern struct kmem_cache * release_heap_cache; 33extern struct kmem_cache * release_heap_cache;
40 34
@@ -244,95 +238,6 @@ asmlinkage long sys_query_job_no(unsigned int __user *job)
244 return retval; 238 return retval;
245} 239}
246 240
247struct sched_sig {
248 struct list_head list;
249 struct task_struct* task;
250 unsigned int signal:31;
251 int force:1;
252};
253
254static void __scheduler_signal(struct task_struct *t, unsigned int signo,
255 int force)
256{
257 struct sched_sig* sig;
258
259 sig = kmalloc(GFP_ATOMIC, sizeof(*sig));
260 if (!sig) {
261 TRACE_TASK(t, "dropping signal: %u\n", t);
262 return;
263 }
264
265 spin_lock(&sched_sig_list_lock);
266
267 sig->signal = signo;
268 sig->force = force;
269 sig->task = t;
270 get_task_struct(t);
271 list_add(&sig->list, &sched_sig_list);
272
273 spin_unlock(&sched_sig_list_lock);
274}
275
276void scheduler_signal(struct task_struct *t, unsigned int signo)
277{
278 __scheduler_signal(t, signo, 0);
279}
280
281void force_scheduler_signal(struct task_struct *t, unsigned int signo)
282{
283 __scheduler_signal(t, signo, 1);
284}
285
286/* FIXME: get rid of the locking and do this on a per-processor basis */
287void send_scheduler_signals(void)
288{
289 unsigned long flags;
290 struct list_head *p, *extra;
291 struct siginfo info;
292 struct sched_sig* sig;
293 struct task_struct* t;
294 struct list_head claimed;
295
296 if (spin_trylock_irqsave(&sched_sig_list_lock, flags)) {
297 if (list_empty(&sched_sig_list))
298 p = NULL;
299 else {
300 p = sched_sig_list.next;
301 list_del(&sched_sig_list);
302 INIT_LIST_HEAD(&sched_sig_list);
303 }
304 spin_unlock_irqrestore(&sched_sig_list_lock, flags);
305
306 /* abort if there are no signals */
307 if (!p)
308 return;
309
310 /* take signal list we just obtained */
311 list_add(&claimed, p);
312
313 list_for_each_safe(p, extra, &claimed) {
314 list_del(p);
315 sig = list_entry(p, struct sched_sig, list);
316 t = sig->task;
317 info.si_signo = sig->signal;
318 info.si_errno = 0;
319 info.si_code = SI_KERNEL;
320 info.si_pid = 1;
321 info.si_uid = 0;
322 TRACE("sending signal %d to %d\n", info.si_signo,
323 t->pid);
324 if (sig->force)
325 force_sig_info(sig->signal, &info, t);
326 else
327 send_sig_info(sig->signal, &info, t);
328 put_task_struct(t);
329 kfree(sig);
330 }
331 }
332
333}
334
335
336/* sys_null_call() is only used for determining raw system call 241/* sys_null_call() is only used for determining raw system call
337 * overheads (kernel entry, kernel exit). It has no useful side effects. 242 * overheads (kernel entry, kernel exit). It has no useful side effects.
338 * If ts is non-NULL, then the current Feather-Trace time is recorded. 243 * If ts is non-NULL, then the current Feather-Trace time is recorded.