diff options
author | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:45:13 -0400 |
---|---|---|
committer | Andrea Bastoni <bastoni@cs.unc.edu> | 2010-05-29 23:57:07 -0400 |
commit | a66246f9e973a68fb9955a2fa7663a2e02afbd30 (patch) | |
tree | ebdf77a3cf491c0d0b77af3d9622f33013af5856 /include/litmus/rt_domain.h | |
parent | 6ffc1fee98c4b995eb3a0285f4f8fb467cb0306e (diff) |
Change most LitmusRT spinlock_t in raw_spinlock_t
Adapt to new schema for spinlock:
(tglx 20091217)
spinlock - the weakest one, which might sleep in RT
raw_spinlock - spinlock which always spins even on RT
arch_spinlock - the hardware level architecture dependent implementation
----
Most probably, all the spinlocks changed by this commit will be true
spinning lock (raw_spinlock) in PreemptRT (so hopefully we'll need few
changes when porting Litmmus to PreemptRT).
There are a couple of spinlock that the kernel still defines as
spinlock_t (therefore no changes reported in this commit) that might cause
us troubles:
- wait_queue_t lock is defined as spinlock_t; it is used in:
* fmlp.c -- sem->wait.lock
* sync.c -- ts_release.wait.lock
- rwlock_t used in fifo implementation in sched_trace.c
* this need probably to be changed to something always spinning in RT
at the expense of increased locking time.
----
This commit also fixes warnings and errors due to the need to include
slab.h when using kmalloc() and friends.
----
This commit does not compile.
Diffstat (limited to 'include/litmus/rt_domain.h')
-rw-r--r-- | include/litmus/rt_domain.h | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/include/litmus/rt_domain.h b/include/litmus/rt_domain.h index b452be1d2256..9bf980713474 100644 --- a/include/litmus/rt_domain.h +++ b/include/litmus/rt_domain.h | |||
@@ -22,16 +22,16 @@ struct release_queue { | |||
22 | 22 | ||
23 | typedef struct _rt_domain { | 23 | typedef struct _rt_domain { |
24 | /* runnable rt tasks are in here */ | 24 | /* runnable rt tasks are in here */ |
25 | spinlock_t ready_lock; | 25 | raw_spinlock_t ready_lock; |
26 | struct bheap ready_queue; | 26 | struct bheap ready_queue; |
27 | 27 | ||
28 | /* real-time tasks waiting for release are in here */ | 28 | /* real-time tasks waiting for release are in here */ |
29 | spinlock_t release_lock; | 29 | raw_spinlock_t release_lock; |
30 | struct release_queue release_queue; | 30 | struct release_queue release_queue; |
31 | int release_master; | 31 | int release_master; |
32 | 32 | ||
33 | /* for moving tasks to the release queue */ | 33 | /* for moving tasks to the release queue */ |
34 | spinlock_t tobe_lock; | 34 | raw_spinlock_t tobe_lock; |
35 | struct list_head tobe_released; | 35 | struct list_head tobe_released; |
36 | 36 | ||
37 | /* how do we check if we need to kick another CPU? */ | 37 | /* how do we check if we need to kick another CPU? */ |
@@ -109,17 +109,17 @@ static inline void add_ready(rt_domain_t* rt, struct task_struct *new) | |||
109 | { | 109 | { |
110 | unsigned long flags; | 110 | unsigned long flags; |
111 | /* first we need the write lock for rt_ready_queue */ | 111 | /* first we need the write lock for rt_ready_queue */ |
112 | spin_lock_irqsave(&rt->ready_lock, flags); | 112 | raw_spin_lock_irqsave(&rt->ready_lock, flags); |
113 | __add_ready(rt, new); | 113 | __add_ready(rt, new); |
114 | spin_unlock_irqrestore(&rt->ready_lock, flags); | 114 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); |
115 | } | 115 | } |
116 | 116 | ||
117 | static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) | 117 | static inline void merge_ready(rt_domain_t* rt, struct bheap* tasks) |
118 | { | 118 | { |
119 | unsigned long flags; | 119 | unsigned long flags; |
120 | spin_lock_irqsave(&rt->ready_lock, flags); | 120 | raw_spin_lock_irqsave(&rt->ready_lock, flags); |
121 | __merge_ready(rt, tasks); | 121 | __merge_ready(rt, tasks); |
122 | spin_unlock_irqrestore(&rt->ready_lock, flags); | 122 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); |
123 | } | 123 | } |
124 | 124 | ||
125 | static inline struct task_struct* take_ready(rt_domain_t* rt) | 125 | static inline struct task_struct* take_ready(rt_domain_t* rt) |
@@ -127,9 +127,9 @@ static inline struct task_struct* take_ready(rt_domain_t* rt) | |||
127 | unsigned long flags; | 127 | unsigned long flags; |
128 | struct task_struct* ret; | 128 | struct task_struct* ret; |
129 | /* first we need the write lock for rt_ready_queue */ | 129 | /* first we need the write lock for rt_ready_queue */ |
130 | spin_lock_irqsave(&rt->ready_lock, flags); | 130 | raw_spin_lock_irqsave(&rt->ready_lock, flags); |
131 | ret = __take_ready(rt); | 131 | ret = __take_ready(rt); |
132 | spin_unlock_irqrestore(&rt->ready_lock, flags); | 132 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); |
133 | return ret; | 133 | return ret; |
134 | } | 134 | } |
135 | 135 | ||
@@ -138,9 +138,9 @@ static inline void add_release(rt_domain_t* rt, struct task_struct *task) | |||
138 | { | 138 | { |
139 | unsigned long flags; | 139 | unsigned long flags; |
140 | /* first we need the write lock for rt_ready_queue */ | 140 | /* first we need the write lock for rt_ready_queue */ |
141 | spin_lock_irqsave(&rt->tobe_lock, flags); | 141 | raw_spin_lock_irqsave(&rt->tobe_lock, flags); |
142 | __add_release(rt, task); | 142 | __add_release(rt, task); |
143 | spin_unlock_irqrestore(&rt->tobe_lock, flags); | 143 | raw_spin_unlock_irqrestore(&rt->tobe_lock, flags); |
144 | } | 144 | } |
145 | 145 | ||
146 | static inline int __jobs_pending(rt_domain_t* rt) | 146 | static inline int __jobs_pending(rt_domain_t* rt) |
@@ -153,9 +153,9 @@ static inline int jobs_pending(rt_domain_t* rt) | |||
153 | unsigned long flags; | 153 | unsigned long flags; |
154 | int ret; | 154 | int ret; |
155 | /* first we need the write lock for rt_ready_queue */ | 155 | /* first we need the write lock for rt_ready_queue */ |
156 | spin_lock_irqsave(&rt->ready_lock, flags); | 156 | raw_spin_lock_irqsave(&rt->ready_lock, flags); |
157 | ret = !bheap_empty(&rt->ready_queue); | 157 | ret = !bheap_empty(&rt->ready_queue); |
158 | spin_unlock_irqrestore(&rt->ready_lock, flags); | 158 | raw_spin_unlock_irqrestore(&rt->ready_lock, flags); |
159 | return ret; | 159 | return ret; |
160 | } | 160 | } |
161 | 161 | ||