aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2013-01-10 14:43:43 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2013-01-10 14:43:43 -0500
commitfdf0a6b73001976c5d02d631ebdd0927819d7c91 (patch)
tree4e0f6c9504fbd463085f53ba778f25ffb3c0965f
parent29496770fea3c801553b0dc5a2a8e295e490a915 (diff)
parenta9a4887bc8dc24b0cf9665f59dc363c126628b25 (diff)
Merge branch 'prop/misc-fixes' into wip-2012.3-gpu
-rw-r--r--litmus/litmus.c76
-rw-r--r--litmus/sync.c4
2 files changed, 33 insertions, 47 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 9c6b738ab99d..dc94be71bfb6 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/reboot.h> 12#include <linux/reboot.h>
13#include <linux/stop_machine.h>
13 14
14#include <litmus/litmus.h> 15#include <litmus/litmus.h>
15#include <litmus/bheap.h> 16#include <litmus/bheap.h>
@@ -24,9 +25,6 @@
24 25
25/* Number of RT tasks that exist in the system */ 26/* Number of RT tasks that exist in the system */
26atomic_t rt_task_count = ATOMIC_INIT(0); 27atomic_t rt_task_count = ATOMIC_INIT(0);
27static DEFINE_RAW_SPINLOCK(task_transition_lock);
28/* synchronize plugin switching */
29atomic_t cannot_use_plugin = ATOMIC_INIT(0);
30 28
31/* Give log messages sequential IDs. */ 29/* Give log messages sequential IDs. */
32atomic_t __log_seq_no = ATOMIC_INIT(0); 30atomic_t __log_seq_no = ATOMIC_INIT(0);
@@ -322,10 +320,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore)
322long litmus_admit_task(struct task_struct* tsk) 320long litmus_admit_task(struct task_struct* tsk)
323{ 321{
324 long retval = 0; 322 long retval = 0;
325 unsigned long flags;
326 323
327 BUG_ON(is_realtime(tsk)); 324 BUG_ON(is_realtime(tsk));
328 325
326 tsk_rt(tsk)->heap_node = NULL;
327 tsk_rt(tsk)->rel_heap = NULL;
328
329 if (get_rt_relative_deadline(tsk) == 0 || 329 if (get_rt_relative_deadline(tsk) == 0 ||
330 get_exec_cost(tsk) > 330 get_exec_cost(tsk) >
331 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) { 331 min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) {
@@ -347,9 +347,6 @@ long litmus_admit_task(struct task_struct* tsk)
347 347
348 INIT_LIST_HEAD(&tsk_rt(tsk)->list); 348 INIT_LIST_HEAD(&tsk_rt(tsk)->list);
349 349
350 /* avoid scheduler plugin changing underneath us */
351 raw_spin_lock_irqsave(&task_transition_lock, flags);
352
353 /* allocate heap node for this task */ 350 /* allocate heap node for this task */
354 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); 351 tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC);
355 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); 352 tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC);
@@ -357,15 +354,14 @@ long litmus_admit_task(struct task_struct* tsk)
357 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { 354 if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) {
358 printk(KERN_WARNING "litmus: no more heap node memory!?\n"); 355 printk(KERN_WARNING "litmus: no more heap node memory!?\n");
359 356
360 bheap_node_free(tsk_rt(tsk)->heap_node);
361 release_heap_free(tsk_rt(tsk)->rel_heap);
362
363 retval = -ENOMEM; 357 retval = -ENOMEM;
364 goto out_unlock; 358 goto out;
365 } else { 359 } else {
366 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); 360 bheap_node_init(&tsk_rt(tsk)->heap_node, tsk);
367 } 361 }
368 362
363 preempt_disable();
364
369 retval = litmus->admit_task(tsk); 365 retval = litmus->admit_task(tsk);
370 366
371 if (!retval) { 367 if (!retval) {
@@ -374,9 +370,13 @@ long litmus_admit_task(struct task_struct* tsk)
374 atomic_inc(&rt_task_count); 370 atomic_inc(&rt_task_count);
375 } 371 }
376 372
377out_unlock: 373 preempt_enable();
378 raw_spin_unlock_irqrestore(&task_transition_lock, flags); 374
379out: 375out:
376 if (retval) {
377 bheap_node_free(tsk_rt(tsk)->heap_node);
378 release_heap_free(tsk_rt(tsk)->rel_heap);
379 }
380 return retval; 380 return retval;
381} 381}
382 382
@@ -396,37 +396,10 @@ void litmus_exit_task(struct task_struct* tsk)
396 } 396 }
397} 397}
398 398
399/* IPI callback to synchronize plugin switching */ 399static int do_plugin_switch(void *_plugin)
400static void synch_on_plugin_switch(void* info)
401{
402 atomic_inc(&cannot_use_plugin);
403 while (atomic_read(&cannot_use_plugin) > 0)
404 cpu_relax();
405}
406
407/* Switching a plugin in use is tricky.
408 * We must watch out that no real-time tasks exists
409 * (and that none is created in parallel) and that the plugin is not
410 * currently in use on any processor (in theory).
411 */
412int switch_sched_plugin(struct sched_plugin* plugin)
413{ 400{
414 unsigned long flags; 401 int ret;
415 int ret = 0; 402 struct sched_plugin* plugin = _plugin;
416
417 BUG_ON(!plugin);
418
419 /* forbid other cpus to use the plugin */
420 atomic_set(&cannot_use_plugin, 1);
421 /* send IPI to force other CPUs to synch with us */
422 smp_call_function(synch_on_plugin_switch, NULL, 0);
423
424 /* wait until all other CPUs have started synch */
425 while (atomic_read(&cannot_use_plugin) < num_online_cpus())
426 cpu_relax();
427
428 /* stop task transitions */
429 raw_spin_lock_irqsave(&task_transition_lock, flags);
430 403
431 /* don't switch if there are active real-time tasks */ 404 /* don't switch if there are active real-time tasks */
432 if (atomic_read(&rt_task_count) == 0) { 405 if (atomic_read(&rt_task_count) == 0) {
@@ -444,11 +417,24 @@ int switch_sched_plugin(struct sched_plugin* plugin)
444 } else 417 } else
445 ret = -EBUSY; 418 ret = -EBUSY;
446out: 419out:
447 raw_spin_unlock_irqrestore(&task_transition_lock, flags);
448 atomic_set(&cannot_use_plugin, 0);
449 return ret; 420 return ret;
450} 421}
451 422
423/* Switching a plugin in use is tricky.
424 * We must watch out that no real-time tasks exists
425 * (and that none is created in parallel) and that the plugin is not
426 * currently in use on any processor (in theory).
427 */
428int switch_sched_plugin(struct sched_plugin* plugin)
429{
430 BUG_ON(!plugin);
431
432 if (atomic_read(&rt_task_count) == 0)
433 return stop_machine(do_plugin_switch, plugin, NULL);
434 else
435 return -EBUSY;
436}
437
452/* Called upon fork. 438/* Called upon fork.
453 * p is the newly forked task. 439 * p is the newly forked task.
454 */ 440 */
diff --git a/litmus/sync.c b/litmus/sync.c
index a796c20f0e9f..908bf21dc210 100644
--- a/litmus/sync.c
+++ b/litmus/sync.c
@@ -91,7 +91,7 @@ static long do_release_ts(lt_t start)
91{ 91{
92 long task_count = 0; 92 long task_count = 0;
93 93
94 struct list_head *pos; 94 struct list_head *pos, *safe;
95 struct ts_release_wait *wait; 95 struct ts_release_wait *wait;
96 96
97 if (mutex_lock_interruptible(&task_release_lock)) { 97 if (mutex_lock_interruptible(&task_release_lock)) {
@@ -103,7 +103,7 @@ static long do_release_ts(lt_t start)
103 sched_trace_sys_release(&start); 103 sched_trace_sys_release(&start);
104 104
105 task_count = 0; 105 task_count = 0;
106 list_for_each(pos, &task_release_list) { 106 list_for_each_safe(pos, safe, &task_release_list) {
107 wait = (struct ts_release_wait*) 107 wait = (struct ts_release_wait*)
108 list_entry(pos, struct ts_release_wait, list); 108 list_entry(pos, struct ts_release_wait, list);
109 109