diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:45:13 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:57:07 -0400 |
commit | a66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch) | |
tree | ebdf77a3cf491c0d0b77af3d9622f33013af5856 /litmus/litmus.c | |
parent | 6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff) |
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock:
(tglx 20091217)
spinlock - the weakest one, which might sleep in RT
raw_spinlock - spinlock which always spins even on RT
arch_spinlock - the hardware level architecture dependent implementation
----
Most probably, all the spinlocks changed by this commit will be true
spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few
changes when porting Litmmus to PreemptRT).
There are a couple of spinlock that the kernel still defines as
spinlock_t (therefore no changes reported in this commit) that might cause
us troubles:
- wait_queue_t lock is defined as spinlock_t; it is used in:
* fmlp.c -- sem->wait.lock
* sync.c -- ts_release.wait.lock
- rwlock_t used in fifo implementation in sched_trace.c
* this need probably to be changed to something always spinning in RT
at the expense of increased locking time.
----
This commit also fixes warnings and errors due to the need to include
slab.h when using kmalloc() and friends.
----
This commit does not compile.
Diffstat (limited to 'litmus/litmus.c')
-rw-r--r-- | litmus/litmus.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/litmus/litmus.c b/litmus/litmus.c index e43596a5104c..99714d06eed5 100644 --- a/litmus/litmus.c +++ b/litmus/litmus.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | /* Number of RT tasks that exist in the system */ | 24 | /* Number of RT tasks that exist in the system */ |
25 | atomic_t rt_task_count = ATOMIC_INIT(0); | 25 | atomic_t rt_task_count = ATOMIC_INIT(0); |
26 | static DEFINE_SPINLOCK(task_transition_lock); | 26 | static DEFINE_RAW_SPINLOCK(task_transition_lock); |
27 | /* synchronize plugin switching */ | 27 | /* synchronize plugin switching */ |
28 | atomic_t cannot_use_plugin = ATOMIC_INIT(0); | 28 | atomic_t cannot_use_plugin = ATOMIC_INIT(0); |
29 | 29 | ||
@@ -323,7 +323,7 @@ long litmus_admit_task(struct task_struct* tsk) | |||
323 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); | 323 | INIT_LIST_HEAD(&tsk_rt(tsk)->list); |
324 | 324 | ||
325 | /* avoid scheduler plugin changing underneath us */ | 325 | /* avoid scheduler plugin changing underneath us */ |
326 | spin_lock_irqsave(&task_transition_lock, flags); | 326 | raw_spin_lock_irqsave(&task_transition_lock, flags); |
327 | 327 | ||
328 | /* allocate heap node for this task */ | 328 | /* allocate heap node for this task */ |
329 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); | 329 | tsk_rt(tsk)->heap_node = bheap_node_alloc(GFP_ATOMIC); |
@@ -350,7 +350,7 @@ long litmus_admit_task(struct task_struct* tsk) | |||
350 | } | 350 | } |
351 | 351 | ||
352 | out_unlock: | 352 | out_unlock: |
353 | spin_unlock_irqrestore(&task_transition_lock, flags); | 353 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); |
354 | out: | 354 | out: |
355 | return retval; | 355 | return retval; |
356 | } | 356 | } |
@@ -396,7 +396,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
396 | smp_call_function(synch_on_plugin_switch, NULL, 0); | 396 | smp_call_function(synch_on_plugin_switch, NULL, 0); |
397 | 397 | ||
398 | /* stop task transitions */ | 398 | /* stop task transitions */ |
399 | spin_lock_irqsave(&task_transition_lock, flags); | 399 | raw_spin_lock_irqsave(&task_transition_lock, flags); |
400 | 400 | ||
401 | /* don't switch if there are active real-time tasks */ | 401 | /* don't switch if there are active real-time tasks */ |
402 | if (atomic_read(&rt_task_count) == 0) { | 402 | if (atomic_read(&rt_task_count) == 0) { |
@@ -414,7 +414,7 @@ int switch_sched_plugin(struct sched_plugin* plugin) | |||
414 | } else | 414 | } else |
415 | ret = -EBUSY; | 415 | ret = -EBUSY; |
416 | out: | 416 | out: |
417 | spin_unlock_irqrestore(&task_transition_lock, flags); | 417 | raw_spin_unlock_irqrestore(&task_transition_lock, flags); |
418 | atomic_set(&cannot_use_plugin, 0); | 418 | atomic_set(&cannot_use_plugin, 0); |
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |