aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjoern B. Brandenburg <bbb@cs.unc.edu>2009-12-07 14:55:50 -0500
committerBjoern B. Brandenburg <bbb@cs.unc.edu>2009-12-07 14:55:50 -0500
commit51c41c7b109e6da035c42ca85d6da44586a18359 (patch)
tree6dd8c2e830de38aa255ab07c5d8b02a57f5a451b
parent4ad2bcf6f6fff31ace097f7616b002df445110ec (diff)
Remove non-preemptive section hack.
-rw-r--r--arch/x86/kernel/syscall_table_32.S8
-rw-r--r--include/litmus/litmus.h14
-rw-r--r--include/litmus/unistd.h26
-rw-r--r--litmus/Kconfig15
-rw-r--r--litmus/litmus.c155
5 files changed, 16 insertions, 202 deletions
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
index 65496c26da..27868b3742 100644
--- a/arch/x86/kernel/syscall_table_32.S
+++ b/arch/x86/kernel/syscall_table_32.S
@@ -328,16 +328,14 @@ ENTRY(sys_call_table)
328 .long sys_set_rt_task_param /* 325 */ 328 .long sys_set_rt_task_param /* 325 */
329 .long sys_get_rt_task_param 329 .long sys_get_rt_task_param
330 .long sys_complete_job 330 .long sys_complete_job
331 .long sys_register_np_flag 331 .long sys_od_open
332 .long sys_exit_np
333 .long sys_od_open /* 330 */
334 .long sys_od_close 332 .long sys_od_close
335 .long sys_fmlp_down 333 .long sys_fmlp_down
336 .long sys_fmlp_up 334 .long sys_fmlp_up
337 .long sys_srp_down 335 .long sys_srp_down
338 .long sys_srp_up /* 335 */ 336 .long sys_srp_up
339 .long sys_query_job_no 337 .long sys_query_job_no
340 .long sys_wait_for_job_release 338 .long sys_wait_for_job_release
341 .long sys_wait_for_ts_release 339 .long sys_wait_for_ts_release
342 .long sys_release_ts 340 .long sys_release_ts
343 .long sys_null_call /* 340 */ 341 .long sys_null_call
diff --git a/include/litmus/litmus.h b/include/litmus/litmus.h
index 6c7a4c5234..c56e004b9a 100644
--- a/include/litmus/litmus.h
+++ b/include/litmus/litmus.h
@@ -146,18 +146,6 @@ void srp_ceiling_block(void);
146 146
147#define heap2task(hn) ((struct task_struct*) hn->value) 147#define heap2task(hn) ((struct task_struct*) hn->value)
148 148
149
150#ifdef CONFIG_NP_SECTION
151/* returns 1 if task t has registered np flag and set it to RT_NON_PREEMPTIVE
152 */
153int is_np(struct task_struct *t);
154
155/* request that the task should call sys_exit_np()
156 */
157void request_exit_np(struct task_struct *t);
158
159#else
160
161static inline int is_np(struct task_struct *t) 149static inline int is_np(struct task_struct *t)
162{ 150{
163 return tsk_rt(t)->kernel_np; 151 return tsk_rt(t)->kernel_np;
@@ -165,8 +153,6 @@ static inline int is_np(struct task_struct *t)
165 153
166#define request_exit_np(t) 154#define request_exit_np(t)
167 155
168#endif
169
170static inline int is_present(struct task_struct* t) 156static inline int is_present(struct task_struct* t)
171{ 157{
172 return t && tsk_rt(t)->present; 158 return t && tsk_rt(t)->present;
diff --git a/include/litmus/unistd.h b/include/litmus/unistd.h
index 5ef367f491..d83842d3e5 100644
--- a/include/litmus/unistd.h
+++ b/include/litmus/unistd.h
@@ -4,18 +4,16 @@
4#define __NR_set_rt_task_param __LSC(0) 4#define __NR_set_rt_task_param __LSC(0)
5#define __NR_get_rt_task_param __LSC(1) 5#define __NR_get_rt_task_param __LSC(1)
6#define __NR_sleep_next_period __LSC(2) 6#define __NR_sleep_next_period __LSC(2)
7#define __NR_register_np_flag __LSC(3) 7#define __NR_od_open __LSC(3)
8#define __NR_exit_np __LSC(4) 8#define __NR_od_close __LSC(4)
9#define __NR_od_open __LSC(5) 9#define __NR_fmlp_down __LSC(5)
10#define __NR_od_close __LSC(6) 10#define __NR_fmlp_up __LSC(6)
11#define __NR_fmlp_down __LSC(7) 11#define __NR_srp_down __LSC(7)
12#define __NR_fmlp_up __LSC(8) 12#define __NR_srp_up __LSC(8)
13#define __NR_srp_down __LSC(9) 13#define __NR_query_job_no __LSC(9)
14#define __NR_srp_up __LSC(10) 14#define __NR_wait_for_job_release __LSC(10)
15#define __NR_query_job_no __LSC(11) 15#define __NR_wait_for_ts_release __LSC(11)
16#define __NR_wait_for_job_release __LSC(12) 16#define __NR_release_ts __LSC(12)
17#define __NR_wait_for_ts_release __LSC(13) 17#define __NR_null_call __LSC(13)
18#define __NR_release_ts __LSC(14)
19#define __NR_null_call __LSC(15)
20 18
21#define NR_litmus_syscalls 16 19#define NR_litmus_syscalls 14
diff --git a/litmus/Kconfig b/litmus/Kconfig
index f73a454c4f..90c9717cc5 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -2,19 +2,6 @@ menu "LITMUS^RT"
2 2
3menu "Real-Time Synchronization" 3menu "Real-Time Synchronization"
4 4
5config NP_SECTION
6 bool "Non-preemptive section support"
7 depends on !SPARC64
8 default n
9 help
10 Include support for flag-based non-preemptive section signaling
11 from userspace.
12
13 (currently broken on SPARC64)
14
15 Say Yes if you want FMLP short critical section synchronization support.
16
17
18config SRP 5config SRP
19 bool "Stack Resource Policy (SRP)" 6 bool "Stack Resource Policy (SRP)"
20 default n 7 default n
@@ -25,7 +12,7 @@ config SRP
25 12
26config FMLP 13config FMLP
27 bool "FMLP support" 14 bool "FMLP support"
28 depends on NP_SECTION 15# depends on NP_SECTION
29 default n 16 default n
30 help 17 help
31 Include support for deterministic multiprocessor real-time 18 Include support for deterministic multiprocessor real-time
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 3562322827..1afd41e394 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -332,161 +332,6 @@ void send_scheduler_signals(void)
332 332
333} 333}
334 334
335#ifdef CONFIG_NP_SECTION
336
337static inline void np_mem_error(struct task_struct* t, const char* reason)
338{
339 if (t->state != TASK_DEAD && !(t->flags & PF_EXITING)) {
340 TRACE("np section: %s => %s/%d killed\n",
341 reason, t->comm, t->pid);
342 force_scheduler_signal(t, SIGKILL);
343 }
344}
345
346/* sys_register_np_flag() allows real-time tasks to register an
347 * np section indicator.
348 * returns 0 if the flag was successfully registered
349 * returns EINVAL if current task is not a real-time task
350 * returns EFAULT if *flag couldn't be written
351 */
352asmlinkage long sys_register_np_flag(short __user *flag)
353{
354 int retval = -EINVAL;
355 short test_val = RT_PREEMPTIVE;
356
357 /* avoid races with the scheduler */
358 preempt_disable();
359 TRACE("reg_np_flag(%p) for %s/%d\n", flag,
360 current->comm, current->pid);
361
362 /* Let's first try to write to the address.
363 * That way it is initialized and any bugs
364 * involving dangling pointers will caught
365 * early.
366 * NULL indicates disabling np section support
367 * and should not be tested.
368 */
369 if (flag)
370 retval = poke_kernel_address(test_val, flag);
371 else
372 retval = 0;
373 TRACE("reg_np_flag: retval=%d\n", retval);
374 if (unlikely(0 != retval))
375 np_mem_error(current, "np flag: not writable");
376 else
377 /* the pointer is ok */
378 current->rt_param.np_flag = flag;
379
380 preempt_enable();
381 return retval;
382}
383
384
385void request_exit_np(struct task_struct *t)
386{
387 int ret;
388 short flag;
389
390 /* We can only do this if t is actually currently scheduled on this CPU
391 * because otherwise we are in the wrong address space. Thus make sure
392 * to check.
393 */
394 BUG_ON(t != current);
395
396 if (unlikely(!is_realtime(t) || !t->rt_param.np_flag)) {
397 TRACE_TASK(t, "request_exit_np(): BAD TASK!\n");
398 return;
399 }
400
401 flag = RT_EXIT_NP_REQUESTED;
402 ret = poke_kernel_address(flag, t->rt_param.np_flag + 1);
403 TRACE("request_exit_np(%s/%d)\n", t->comm, t->pid);
404 if (unlikely(0 != ret))
405 np_mem_error(current, "request_exit_np(): flag not writable");
406
407}
408
409
410int is_np(struct task_struct* t)
411{
412 int ret;
413 unsigned short flag = 0x5858; /* = XX, looks nicer in debug*/
414
415 BUG_ON(t != current);
416
417 if (unlikely(t->rt_param.kernel_np))
418 return 1;
419 else if (unlikely(t->rt_param.np_flag == NULL) ||
420 t->flags & PF_EXITING ||
421 t->state == TASK_DEAD)
422 return 0;
423 else {
424 /* This is the tricky part. The process has registered a
425 * non-preemptive section marker. We now need to check whether
426 * it is set to to NON_PREEMPTIVE. Along the way we could
427 * discover that the pointer points to an unmapped region (=>
428 * kill the task) or that the location contains some garbage
429 * value (=> also kill the task). Killing the task in any case
430 * forces userspace to play nicely. Any bugs will be discovered
431 * immediately.
432 */
433 ret = probe_kernel_address(t->rt_param.np_flag, flag);
434 if (0 == ret && (flag == RT_NON_PREEMPTIVE ||
435 flag == RT_PREEMPTIVE))
436 return flag != RT_PREEMPTIVE;
437 else {
438 /* either we could not read from the address or
439 * it contained garbage => kill the process
440 * FIXME: Should we cause a SEGFAULT instead?
441 */
442 TRACE("is_np: ret=%d flag=%c%c (%x)\n", ret,
443 flag & 0xff, (flag >> 8) & 0xff, flag);
444 np_mem_error(t, "is_np() could not read");
445 return 0;
446 }
447 }
448}
449
450/*
451 * sys_exit_np() allows real-time tasks to signal that it left a
452 * non-preemptable section. It will be called after the kernel requested a
453 * callback in the preemption indicator flag.
454 * returns 0 if the signal was valid and processed.
455 * returns EINVAL if current task is not a real-time task
456 */
457asmlinkage long sys_exit_np(void)
458{
459 int retval = -EINVAL;
460
461 TS_EXIT_NP_START;
462
463 if (!is_realtime(current))
464 goto out;
465
466 TRACE("sys_exit_np(%s/%d)\n", current->comm, current->pid);
467 /* force rescheduling so that we can be preempted */
468 set_tsk_need_resched(current);
469 retval = 0;
470 out:
471
472 TS_EXIT_NP_END;
473 return retval;
474}
475
476#else /* !CONFIG_NP_SECTION */
477
478asmlinkage long sys_register_np_flag(short __user *flag)
479{
480 return -ENOSYS;
481}
482
483asmlinkage long sys_exit_np(void)
484{
485 return -ENOSYS;
486}
487
488#endif /* CONFIG_NP_SECTION */
489
490 335
491/* sys_null_call() is only used for determining raw system call 336/* sys_null_call() is only used for determining raw system call
492 * overheads (kernel entry, kernel exit). It has no useful side effects. 337 * overheads (kernel entry, kernel exit). It has no useful side effects.