diff options
author | Bjoern Brandenburg <bbb@mpi-sws.org> | 2013-01-10 10:41:34 -0500 |
---|---|---|
committer | Bjoern Brandenburg <bbb@mpi-sws.org> | 2013-01-10 10:43:23 -0500 |
commit | a9a4887bc8dc24b0cf9665f59dc363c126628b25 (patch) | |
tree | 366e651e6a99654e4a9b7f03f22d52f847160404 | |
parent | b60b41eb47d4746d16c2c1365521bbcb94d6cd2e (diff) |
Reimplement plugin switching using stop_machine()
stop_machine() does exactly what we want (avoid all concurrent
scheduling activity) and much simpler than rolling our own (buggy)
implementation.
-rw-r--r-- | litmus/litmus.c | 76 |
1 files changed, 31 insertions, 45 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c index 9c6b738ab99d..dc94be71bfb6 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/reboot.h> | 12 | #include <linux/reboot.h> |
13 | #include <linux/stop_machine.h> | ||
13 | 14 | ||
14 | #include <litmus/litmus.h> | 15 | #include <litmus/litmus.h> |
15 | #include <litmus/bheap.h> | 16 | #include <litmus/bheap.h> |
@@ -24,9 +25,6 @@ | |||
24 | 25 | ||
25 | /* Number of RT tasks that exist in the system */ | 26 | /* Number of RT tasks that exist in the system */ |
26 | atomic_t rt_task_count = ATOMIC_INIT(0); | 27 | atomic_t rt_task_count = ATOMIC_INIT(0); |
27 | static DEFINE_RAW_SPINLOCK(task_transition_lock); | ||
28 | /* synchronize plugin switching */ | ||
29 | atomic_t cannot_use_plugin = ATOMIC_INIT(0); | ||
30 | 28 | ||
31 | /* Give log messages sequential IDs. */ | 29 | /* Give log messages sequential IDs. */ |
32 | atomic_t __log_seq_no = ATOMIC_INIT(0); | 30 | atomic_t __log_seq_no = ATOMIC_INIT(0); |
@@ -322,10 +320,12 @@ static void reinit_litmus_state(struct task_struct* p, int restore) | |||
322 | long litmus_admit_task(struct task_struct* tsk) | 320 | long litmus_admit_task(struct task_struct* tsk) |
323 | { | 321 | { |
324 | long retval = 0; | 322 | long retval = 0; |
325 | unsigned long flags; | ||
326 | 323 | ||
327 | BUG_ON(is_realtime(tsk)); | 324 | BUG_ON(is_realtime(tsk)); |
328 | 325 | ||
326 | tsk_rt(tsk)->heap_node = NULL; | ||
327 | tsk_rt(tsk)->rel_heap = NULL; | ||
328 | |||
329 | if (get_rt_relative_deadline(tsk) == 0 || | 329 | if (get_rt_relative_deadline(tsk) == 0 || |
330 | get_exec_cost(tsk) > | 330 | get_exec_cost(tsk) > |
331 | min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) { | 331 | min(get_rt_relative_deadline(tsk), get_rt_period(tsk)) ) { |
@@ -347,9 +347,6 @@ long litmus_admit_task(struct task_struct* tsk) | |||
347 | 347 | ||
348 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); | 348 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); |
349 | 349 | ||
350 | /* avoid scheduler plugin changing underneath us */ | ||
351 | raw_spin_lock_irqsave(&task_transition_lock, flags); | ||
352 | |||
353 | /* allocate heap node for this task */ | 350 | /* allocate heap node for this task */ |
354 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); | 351 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); |
355 | tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); | 352 | tsk_rt(tsk)->rel_heap = release_heap_alloc(GFP_ATOMIC); |
@@ -357,15 +354,14 @@ long litmus_admit_task(struct task_struct* tsk) | |||
357 | if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { | 354 | if (!tsk_rt(tsk)->heap_node || !tsk_rt(tsk)->rel_heap) { |
358 | printk(KERN_WARNING "litmus: no more heap node memory!?\n"); | 355 | printk(KERN_WARNING "litmus: no more heap node memory!?\n"); |
359 | 356 | ||
360 | bheap_node_free(tsk_rt(tsk)->heap_node); | ||
361 | release_heap_free(tsk_rt(tsk)->rel_heap); | ||
362 | |||
363 | retval = -ENOMEM; | 357 | retval = -ENOMEM; |
364 | goto out_unlock; | 358 | goto out; |
365 | } else { | 359 | } else { |
366 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); | 360 | bheap_node_init(&tsk_rt(tsk)->heap_node, tsk); |
367 | } | 361 | } |
368 | 362 | ||
363 | preempt_disable(); | ||
364 | |||
369 | retval = litmus->admit_task(tsk); | 365 | retval = litmus->admit_task(tsk); |
370 | 366 | ||
371 | if (!retval) { | 367 | if (!retval) { |
@@ -374,9 +370,13 @@ long litmus_admit_task(struct task_struct* tsk) | |||
374 | atomic_inc(&rt_task_count); | 370 | atomic_inc(&rt_task_count); |
375 | } | 371 | } |
376 | 372 | ||
377 | out_unlock: | 373 | preempt_enable(); |
378 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); | 374 | |
379 | out: | 375 | out: |
376 | if (retval) { | ||
377 | bheap_node_free(tsk_rt(tsk)->heap_node); | ||
378 | release_heap_free(tsk_rt(tsk)->rel_heap); | ||
379 | } | ||
380 | return retval; | 380 | return retval; |
381 | } | 381 | } |
382 | 382 | ||
@@ -396,37 +396,10 @@ void litmus_exit_task(struct task_struct* tsk) | |||
396 | } | 396 | } |
397 | } | 397 | } |
398 | 398 | ||
399 | /* IPI callback to synchronize plugin switching */ | 399 | static int do_plugin_switch(void *_plugin) |
400 | static void synch_on_plugin_switch(void* info) | ||
401 | { | ||
402 | atomic_inc(&cannot_use_plugin); | ||
403 | while (atomic_read(&cannot_use_plugin) > 0) | ||
404 | cpu_relax(); | ||
405 | } | ||
406 | |||
407 | /* Switching a plugin in use is tricky. | ||
408 | * We must watch out that no real-time tasks exists | ||
409 | * (and that none is created in parallel) and that the plugin is not | ||
410 | * currently in use on any processor (in theory). | ||
411 | */ | ||
412 | int switch_sched_plugin(struct sched_plugin* plugin) | ||
413 | { | 400 | { |
414 | unsigned long flags; | 401 | int ret; |
415 | int ret = 0; | 402 | struct sched_plugin* plugin = _plugin; |
416 | |||
417 | BUG_ON(!plugin); | ||
418 | |||
419 | /* forbid other cpus to use the plugin */ | ||
420 | atomic_set(&cannot_use_plugin, 1); | ||
421 | /* send IPI to force other CPUs to synch with us */ | ||
422 | smp_call_function(synch_on_plugin_switch, NULL, 0); | ||
423 | |||
424 | /* wait until all other CPUs have started synch */ | ||
425 | while (atomic_read(&cannot_use_plugin) < num_online_cpus()) | ||
426 | cpu_relax(); | ||
427 | |||
428 | /* stop task transitions */ | ||
429 | raw_spin_lock_irqsave(&task_transition_lock, flags); | ||
430 | 403 | ||
431 | /* don't switch if there are active real-time tasks */ | 404 | /* don't switch if there are active real-time tasks */ |
432 | if (atomic_read(&rt_task_count) == 0) { | 405 | if (atomic_read(&rt_task_count) == 0) { |
@@ -444,11 +417,24 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
444 | } else | 417 | } else |
445 | ret = -EBUSY; | 418 | ret = -EBUSY; |
446 | out: | 419 | out: |
447 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); | ||
448 | atomic_set(&cannot_use_plugin, 0); | ||
449 | return ret; | 420 | return ret; |
450 | } | 421 | } |
451 | 422 | ||
423 | /* Switching a plugin in use is tricky. | ||
424 | * We must watch out that no real-time tasks exists | ||
425 | * (and that none is created in parallel) and that the plugin is not | ||
426 | * currently in use on any processor (in theory). | ||
427 | */ | ||
428 | int switch_sched_plugin(struct sched_plugin* plugin) | ||
429 | { | ||
430 | BUG_ON(!plugin); | ||
431 | |||
432 | if (atomic_read(&rt_task_count) == 0) | ||
433 | return stop_machine(do_plugin_switch, plugin, NULL); | ||
434 | else | ||
435 | return -EBUSY; | ||
436 | } | ||
437 | |||
452 | /* Called upon fork. | 438 | /* Called upon fork. |
453 | * p is the newly forked task. | 439 | * p is the newly forked task. |
454 | */ | 440 | */ |