diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:45:13 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:57:07 -0400 |
commit | a66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch) | |
tree | ebdf77a3cf491c0d0b77af3d9622f33013af5856 /litmus/rt_domain.c | |
parent | 6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff) |
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock:
(tglx 20091217)
spinlock - the weakest one, which might sleep in RT
raw_spinlock - spinlock which always spins even on RT
arch_spinlock - the hardware level architecture dependent implementation
----
Most probably, all the spinlocks changed by this commit will be true
spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few
changes when porting Litmmus to PreemptRT).
There are a couple of spinlock that the kernel still defines as
spinlock_t (therefore no changes reported in this commit) that might cause
us troubles:
- wait_queue_t lock is defined as spinlock_t; it is used in:
* fmlp.c -- sem->wait.lock
* sync.c -- ts_release.wait.lock
- rwlock_t used in fifo implementation in sched_trace.c
* this need probably to be changed to something always spinning in RT
at the expense of increased locking time.
----
This commit also fixes warnings and errors due to the need to include
slab.h when using kmalloc() and friends.
----
This commit does not compile.
Diffstat (limited to 'litmus/rt_domain.c')
-rw-r--r-- | litmus/rt_domain.c | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/litmus/rt_domain.c b/litmus/rt_domain.c index 609ff0f82abb..8d5db6050723 100644 --- a/litmus/rt_domain.c +++ b/litmus/rt_domain.c | |||
@@ -53,11 +53,11 @@ static enum hrtimer_restart on_release_timer(struct hrtimer *timer) | |||
53 | 53 | ||
54 | rh = container_of(timer, struct release_heap, timer); | 54 | rh = container_of(timer, struct release_heap, timer); |
55 | 55 | ||
56 | spin_lock_irqsave(&rh->dom->release_lock, flags); | 56 | raw_spin_lock_irqsave(&rh->dom->release_lock, flags); |
57 | TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); | 57 | TRACE("CB has the release_lock 0x%p\n", &rh->dom->release_lock); |
58 | /* remove from release queue */ | 58 | /* remove from release queue */ |
59 | list_del(&rh->list); | 59 | list_del(&rh->list); |
60 | spin_unlock_irqrestore(&rh->dom->release_lock, flags); | 60 | raw_spin_unlock_irqrestore(&rh->dom->release_lock, flags); |
61 | TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); | 61 | TRACE("CB returned release_lock 0x%p\n", &rh->dom->release_lock); |
62 | 62 | ||
63 | /* call release callback */ | 63 | /* call release callback */ |
@@ -185,20 +185,20 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
185 | list_del(pos); | 185 | list_del(pos); |
186 | 186 | ||
187 | /* put into release heap while holding release_lock */ | 187 | /* put into release heap while holding release_lock */ |
188 | spin_lock(&rt->release_lock); | 188 | raw_spin_lock(&rt->release_lock); |
189 | TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); | 189 | TRACE_TASK(t, "I have the release_lock 0x%p\n", &rt->release_lock); |
190 | 190 | ||
191 | rh = get_release_heap(rt, t, 0); | 191 | rh = get_release_heap(rt, t, 0); |
192 | if (!rh) { | 192 | if (!rh) { |
193 | /* need to use our own, but drop lock first */ | 193 | /* need to use our own, but drop lock first */ |
194 | spin_unlock(&rt->release_lock); | 194 | raw_spin_unlock(&rt->release_lock); |
195 | TRACE_TASK(t, "Dropped release_lock 0x%p\n", | 195 | TRACE_TASK(t, "Dropped release_lock 0x%p\n", |
196 | &rt->release_lock); | 196 | &rt->release_lock); |
197 | 197 | ||
198 | reinit_release_heap(t); | 198 | reinit_release_heap(t); |
199 | TRACE_TASK(t, "release_heap ready\n"); | 199 | TRACE_TASK(t, "release_heap ready\n"); |
200 | 200 | ||
201 | spin_lock(&rt->release_lock); | 201 | raw_spin_lock(&rt->release_lock); |
202 | TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", | 202 | TRACE_TASK(t, "Re-acquired release_lock 0x%p\n", |
203 | &rt->release_lock); | 203 | &rt->release_lock); |
204 | 204 | ||
@@ -207,7 +207,7 @@ static void arm_release_timer(rt_domain_t *_rt) | |||
207 | bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); | 207 | bheap_insert(rt->order, &rh->heap, tsk_rt(t)->heap_node); |
208 | TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); | 208 | TRACE_TASK(t, "arm_release_timer(): added to release heap\n"); |
209 | 209 | ||
210 | spin_unlock(&rt->release_lock); | 210 | raw_spin_unlock(&rt->release_lock); |
211 | TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); | 211 | TRACE_TASK(t, "Returned the release_lock 0x%p\n", &rt->release_lock); |
212 | 212 | ||
213 | /* To avoid arming the timer multiple times, we only let the | 213 | /* To avoid arming the timer multiple times, we only let the |
@@ -258,9 +258,9 @@ void rt_domain_init(rt_domain_t *rt, | |||
258 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) | 258 | for (i = 0; i < RELEASE_QUEUE_SLOTS; i++) |
259 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); | 259 | INIT_LIST_HEAD(&rt->release_queue.slot[i]); |
260 | 260 | ||
261 | spin_lock_init(&rt->ready_lock); | 261 | raw_spin_lock_init(&rt->ready_lock); |
262 | spin_lock_init(&rt->release_lock); | 262 | raw_spin_lock_init(&rt->release_lock); |
263 | spin_lock_init(&rt->tobe_lock); | 263 | raw_spin_lock_init(&rt->tobe_lock); |
264 | 264 | ||
265 | rt->check_resched = check; | 265 | rt->check_resched = check; |
266 | rt->release_jobs = release; | 266 | rt->release_jobs = release; |