aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-04-27 11:00:19 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-29 17:37:07 -0400
commitf85625ccf28d1bffd4dac916babb76b910ebef31 (patch)
treecc3eee0b83d0295034b5cb24737a637c8a662e77 /litmus
parent8fe2fb8bb1c1cd0194608bc783d0ce7029e8d869 (diff)
Synchronize plugin switching
Make sure the plugin is not used by any CPUs while switching. The CPU performing the switch sends an IPI to all other CPUs forcing them to synchronize on an atomic variable.
Diffstat (limited to 'litmus')
-rw-r--r--litmus/litmus.c18
1 files changed, 15 insertions, 3 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c
index 3cf7cb9e8a9f..3ef2df8ffb50 100644
--- a/litmus/litmus.c
+++ b/litmus/litmus.c
@@ -24,6 +24,8 @@
24/* Number of RT tasks that exist in the system */ 24/* Number of RT tasks that exist in the system */
25atomic_t rt_task_count = ATOMIC_INIT(0); 25atomic_t rt_task_count = ATOMIC_INIT(0);
26static DEFINE_SPINLOCK(task_transition_lock); 26static DEFINE_SPINLOCK(task_transition_lock);
27/* synchronize plugin switching */
28atomic_t cannot_use_plugin = ATOMIC_INIT(0);
27 29
28/* Give log messages sequential IDs. */ 30/* Give log messages sequential IDs. */
29atomic_t __log_seq_no = ATOMIC_INIT(0); 31atomic_t __log_seq_no = ATOMIC_INIT(0);
@@ -369,13 +371,17 @@ void litmus_exit_task(struct task_struct* tsk)
369 } 371 }
370} 372}
371 373
374/* IPI callback to synchronize plugin switching */
375static void synch_on_plugin_switch(void* info)
376{
377 while (atomic_read(&cannot_use_plugin))
378 cpu_relax();
379}
380
372/* Switching a plugin in use is tricky. 381/* Switching a plugin in use is tricky.
373 * We must watch out that no real-time tasks exists 382 * We must watch out that no real-time tasks exists
374 * (and that none is created in parallel) and that the plugin is not 383 * (and that none is created in parallel) and that the plugin is not
375 * currently in use on any processor (in theory). 384 * currently in use on any processor (in theory).
376 *
377 * For now, we don't enforce the second part since it is unlikely to cause
378 * any trouble by itself as long as we don't unload modules.
379 */ 385 */
380int switch_sched_plugin(struct sched_plugin* plugin) 386int switch_sched_plugin(struct sched_plugin* plugin)
381{ 387{
@@ -384,6 +390,11 @@ int switch_sched_plugin(struct sched_plugin* plugin)
384 390
385 BUG_ON(!plugin); 391 BUG_ON(!plugin);
386 392
393 /* forbid other cpus to use the plugin */
394 atomic_set(&cannot_use_plugin, 1);
395 /* send IPI to force other CPUs to synch with us */
396 smp_call_function(synch_on_plugin_switch, NULL, 0);
397
387 /* stop task transitions */ 398 /* stop task transitions */
388 spin_lock_irqsave(&task_transition_lock, flags); 399 spin_lock_irqsave(&task_transition_lock, flags);
389 400
@@ -404,6 +415,7 @@ int switch_sched_plugin(struct sched_plugin* plugin)
404 ret = -EBUSY; 415 ret = -EBUSY;
405out: 416out:
406 spin_unlock_irqrestore(&task_transition_lock, flags); 417 spin_unlock_irqrestore(&task_transition_lock, flags);
418 atomic_set(&cannot_use_plugin, 0);
407 return ret; 419 return ret;
408} 420}
409 421