aboutsummaryrefslogtreecommitdiffstats
path: root/litmus/litmus.c
diff options
context:
space:
mode:
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r--litmus/litmus.c102
1 files changed, 42 insertions, 60 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 6a1095aa7725..2911e7ec7029 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/reboot.h> 12#include <linux/reboot.h>
13#include <linux/stop_machine.h>
13 14
14#include <litmus/litmus.h> 15#include <litmus/litmus.h>
15#include <litmus/bheap.h> 16#include <litmus/bheap.h>
@@ -32,9 +33,6 @@
32 33
33/* Number of RT tasks that exist in the system */ 34/* Number of RT tasks that exist in the system */
34atomic_t rt_task_count = ATOMIC_INIT(0); 35atomic_t rt_task_count = ATOMIC_INIT(0);
35static DEFINE_RAW_SPINLOCK(task_transition_lock);
36/* synchronize plugin switching */
37atomic_t cannot_use_plugin = ATOMIC_INIT(0);
38 36
39/* Give log messages sequential IDs. */ 37/* Give log messages sequential IDs. */
40atomic_t __log_seq_no = ATOMIC_INIT(0); 38atomic_t __log_seq_no = ATOMIC_INIT(0);
@@ -479,13 +477,9 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
479long __litmus_admit_task(struct task_struct* tsk) 477long __litmus_admit_task(struct task_struct* tsk)
480{ 478{
481 long retval = 0; 479 long retval = 0;
482 unsigned long flags;
483 480
484 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 481 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
485 482
486 /* avoid scheduler plugin changing underneath us */
487 raw_spin_lock_irqsave(&task_transition_lock, flags);
488
489 /* allocate heap node for this task */ 483 /* allocate heap node for this task */
490 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 484 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
491 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); 485 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
@@ -493,11 +487,8 @@ long __litmus_admit_task(struct task_struct* tsk)
493 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { 487 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
494 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 488 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
495 489
496 bheap_node_free(tsk_rt(tsk)->heap_node);
497 release_heap_free(tsk_rt(tsk)->rel_heap);
498
499 retval = -ENOMEM; 490 retval = -ENOMEM;
500 goto out_unlock; 491 goto out;
501 } else { 492 } else {
502 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); 493 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
503 } 494 }
@@ -511,14 +502,10 @@ long __litmus_admit_task(struct task_struct* tsk)
511#ifdef CONFIG_LITMUS_NESTED_LOCKING 502#ifdef CONFIG_LITMUS_NESTED_LOCKING
512 tsk_rt(tsk)->blocked_lock = NULL; 503 tsk_rt(tsk)->blocked_lock = NULL;
513 raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock); 504 raw_spin_lock_init(&tsk_rt(tsk)->hp_blocked_tasks_lock);
514 //INIT_BINHEAP_HANDLE(&tsk_rt(tsk)->hp_blocked_tasks, prio_order); // done by scheduler
515#endif
516#ifdef CONFIG_LITMUS_SOFTIRQD
517 /* not an interrupt thread by default */
518 //tsk_rt(tsk)->is_interrupt_thread = 0;
519 //tsk_rt(tsk)->klmirqd_info = NULL;
520#endif 505#endif
521 506
507 preempt_disable();
508
522 retval = litmus->admit_task(tsk); 509 retval = litmus->admit_task(tsk);
523 510
524 if (!retval) { 511 if (!retval) {
@@ -527,9 +514,14 @@ long __litmus_admit_task(struct task_struct* tsk)
527 atomic_inc(&rt_task_count); 514 atomic_inc(&rt_task_count);
528 } 515 }
529 516
530out_unlock: 517 preempt_enable();
531 raw_spin_unlock_irqrestore(&task_transition_lock, flags); 518
519 if (retval) {
520 bheap_node_free(tsk_rt(tsk)->heap_node);
521 release_heap_free(tsk_rt(tsk)->rel_heap);
522 }
532 523
524out:
533 return retval; 525 return retval;
534} 526}
535 527
@@ -590,39 +582,10 @@ void litmus_exit_task(struct task_struct* tsk)
590 } 582 }
591} 583}
592 584
593/* IPI callback to synchronize plugin switching */ 585static int do_plugin_switch(void *_plugin)
594static void synch_on_plugin_switch(void* info)
595{
596 atomic_inc(&cannot_use_plugin);
597 while (atomic_read(&cannot_use_plugin) > 0)
598 cpu_relax();
599}
600
601int switch_sched_plugin(struct sched_plugin* plugin)
602{ 586{
603 //unsigned long flags; 587 int ret;
604 int ret = 0; 588 struct sched_plugin* plugin = _plugin;
605
606 BUG_ON(!plugin);
607
608#ifdef CONFIG_LITMUS_SOFTIRQD
609 if (!klmirqd_is_dead()) {
610 kill_klmirqd();
611 }
612#endif
613
614 /* forbid other cpus to use the plugin */
615 atomic_set(&cannot_use_plugin, 1);
616 /* send IPI to force other CPUs to synch with us */
617 smp_call_function(synch_on_plugin_switch, NULL, 0);
618
619 /* wait until all other CPUs have started synch */
620 while (atomic_read(&cannot_use_plugin) < num_online_cpus()) {
621 cpu_relax();
622 }
623
624 /* stop task transitions */
625 //raw_spin_lock_irqsave(&task_transition_lock, flags);
626 589
627 /* don't switch if there are active real-time tasks */ 590 /* don't switch if there are active real-time tasks */
628 if (atomic_read(&rt_task_count) == 0) { 591 if (atomic_read(&rt_task_count) == 0) {
@@ -630,24 +593,45 @@ int switch_sched_plugin(struct sched_plugin* plugin)
630 if (0 != ret) 593 if (0 != ret)
631 goto out; 594 goto out;
632 595
633 litmus = plugin; // switch 596 litmus = plugin; /* optimistic switch */
634 mb(); // make sure it's seen everywhere. 597 mb();
598
635 ret = litmus->activate_plugin(); 599 ret = litmus->activate_plugin();
636 if (0 != ret) { 600 if (0 != ret) {
637 printk(KERN_INFO "Can't activate %s (%d).\n", 601 printk(KERN_INFO "Can't activate %s (%d).\n",
638 litmus->plugin_name, ret); 602 litmus->plugin_name, ret);
639 litmus = &linux_sched_plugin; 603 litmus = &linux_sched_plugin; /* fail to Linux */
604 ret = litmus->activate_plugin();
605 BUG_ON(ret);
640 } 606 }
641 printk(KERN_INFO "Switching to LITMUS^RT plugin %s.\n", litmus->plugin_name); 607 printk(KERN_INFO "Switched to LITMUS^RT plugin %s.\n", litmus->plugin_name);
642 } else 608 } else
643 ret = -EBUSY; 609 ret = -EBUSY;
644out: 610out:
645 //raw_spin_unlock_irqrestore(&task_transition_lock, flags);
646 atomic_set(&cannot_use_plugin, 0);
647
648 return ret; 611 return ret;
649} 612}
650 613
614/* Switching a plugin in use is tricky.
615 * We must watch out that no real-time tasks exists
616 * (and that none is created in parallel) and that the plugin is not
617 * currently in use on any processor (in theory).
618 */
619int switch_sched_plugin(struct sched_plugin* plugin)
620{
621 BUG_ON(!plugin);
622
623#ifdef CONFIG_LITMUS_SOFTIRQD
624 if (!klmirqd_is_dead()) {
625 kill_klmirqd();
626 }
627#endif
628
629 if (atomic_read(&rt_task_count) == 0)
630 return stop_machine(do_plugin_switch, plugin, NULL);
631 else
632 return -EBUSY;
633}
634
651/* Called upon fork. 635/* Called upon fork.
652 * p is the newly forked task. 636 * p is the newly forked task.
653 */ 637 */
@@ -772,8 +756,6 @@ static int __init _init_litmus(void)
772 */ 756 */
773 printk("Starting LITMUS^RT kernel\n"); 757 printk("Starting LITMUS^RT kernel\n");
774 758
775 BUILD_BUG_ON(sizeof(union np_flag) != sizeof(uint32_t));
776
777 register_sched_plugin(&linux_sched_plugin); 759 register_sched_plugin(&linux_sched_plugin);
778 760
779 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC); 761 bheap_node_cache = KMEM_CACHE(bheap_node, SLAB_PANIC);