diff options
-rw-r--r-- | litmus/litmus.c | 22 |
1 files changed, 15 insertions, 7 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c index 70a7f628455b..e43596a5104c 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -24,6 +24,8 @@ | |||
24 | /* Number of RT tasks that exist in the system */ | 24 | /* Number of RT tasks that exist in the system */ |
25 | atomic_t rt_task_count = ATOMIC_INIT(0); | 25 | atomic_t rt_task_count = ATOMIC_INIT(0); |
26 | static DEFINE_SPINLOCK(task_transition_lock); | 26 | static DEFINE_SPINLOCK(task_transition_lock); |
27 | /* synchronize plugin switching */ | ||
28 | atomic_t cannot_use_plugin = ATOMIC_INIT(0); | ||
27 | 29 | ||
28 | /* Give log messages sequential IDs. */ | 30 | /* Give log messages sequential IDs. */ |
29 | atomic_t __log_seq_no = ATOMIC_INIT(0); | 31 | atomic_t __log_seq_no = ATOMIC_INIT(0); |
@@ -369,17 +371,17 @@ void litmus_exit_task(struct task_struct* tsk) | |||
369 | } | 371 | } |
370 | } | 372 | } |
371 | 373 | ||
374 | /* IPI callback to synchronize plugin switching */ | ||
375 | static void synch_on_plugin_switch(void* info) | ||
376 | { | ||
377 | while (atomic_read(&cannot_use_plugin)) | ||
378 | cpu_relax(); | ||
379 | } | ||
380 | |||
372 | /* Switching a plugin in use is tricky. | 381 | /* Switching a plugin in use is tricky. |
373 | * We must watch out that no real-time tasks exists | 382 | * We must watch out that no real-time tasks exists |
374 | * (and that none is created in parallel) and that the plugin is not | 383 | * (and that none is created in parallel) and that the plugin is not |
375 | * currently in use on any processor (in theory). | 384 | * currently in use on any processor (in theory). |
376 | * | ||
377 | * FIXME we don't enforce the second part since it is unlikely to cause | ||
378 | * any trouble by itself as long as we don't unload modules. | ||
379 | * - This is causing problems with C-EDF when reloading clusters configuration | ||
380 | * on big SMP machines (e.g., 24 cores). The plugin is in use on some | ||
381 | * processor when we unload is, and this cause troubles as | ||
382 | * we free the clusters while these are used :( | ||
383 | */ | 385 | */ |
384 | int switch_sched_plugin(struct sched_plugin* plugin) | 386 | int switch_sched_plugin(struct sched_plugin* plugin) |
385 | { | 387 | { |
@@ -388,6 +390,11 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
388 | 390 | ||
389 | BUG_ON(!plugin); | 391 | BUG_ON(!plugin); |
390 | 392 | ||
393 | /* forbid other cpus to use the plugin */ | ||
394 | atomic_set(&cannot_use_plugin, 1); | ||
395 | /* send IPI to force other CPUs to synch with us */ | ||
396 | smp_call_function(synch_on_plugin_switch, NULL, 0); | ||
397 | |||
391 | /* stop task transitions */ | 398 | /* stop task transitions */ |
392 | spin_lock_irqsave(&task_transition_lock, flags); | 399 | spin_lock_irqsave(&task_transition_lock, flags); |
393 | 400 | ||
@@ -408,6 +415,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
408 | ret = -EBUSY; | 415 | ret = -EBUSY; |
409 | out: | 416 | out: |
410 | spin_unlock_irqrestore(&task_transition_lock, flags); | 417 | spin_unlock_irqrestore(&task_transition_lock, flags); |
418 | atomic_set(&cannot_use_plugin, 0); | ||
411 | return ret; | 419 | return ret; |
412 | } | 420 | } |
413 | 421 | ||