diff options
author | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-05-01 12:13:37 -0400 |
---|---|---|
committer | Bjoern B. Brandenburg <bbb@cs.unc.edu> | 2008-05-01 12:13:37 -0400 |
commit | 7d9d731677836b9e8e71271f8dd01bb35ab62062 (patch) | |
tree | e58d46eba4d40a6cdcc8d4617292ea0b806a8b6a | |
parent | 03201b0f6d2b3f5ba9e4fe48be6b995ddd58a2d5 (diff) |
SRP+FMLP: reorganize code
- split SRP into its own file
- rename litmus_sem.c to fmlp.c, since that is what it is
-rw-r--r-- | litmus/Makefile | 5 | ||||
-rw-r--r-- | litmus/fmlp.c | 282 | ||||
-rw-r--r-- | litmus/srp.c (renamed from litmus/litmus_sem.c) | 283 |
3 files changed, 288 insertions, 282 deletions
diff --git a/litmus/Makefile b/litmus/Makefile index a6a9f8797d..db8cc21355 100644 --- a/litmus/Makefile +++ b/litmus/Makefile | |||
@@ -4,5 +4,6 @@ | |||
4 | 4 | ||
5 | obj-y = sched_plugin.o litmus.o sched_trace.o \ | 5 | obj-y = sched_plugin.o litmus.o sched_trace.o \ |
6 | edf_common.o jobs.o\ | 6 | edf_common.o jobs.o\ |
7 | sched_gsn_edf.o sched_psn_edf.o litmus_sem.o \ | 7 | sched_gsn_edf.o sched_psn_edf.o \ |
8 | trace.o ft_event.o rt_domain.o fdso.o sync.o | 8 | trace.o ft_event.o rt_domain.o fdso.o sync.o \ |
9 | fmlp.o srp.o | ||
diff --git a/litmus/fmlp.c b/litmus/fmlp.c new file mode 100644 index 0000000000..b40e42e9e0 --- /dev/null +++ b/litmus/fmlp.c | |||
@@ -0,0 +1,282 @@ | |||
1 | /* | ||
2 | * FMLP implementation. | ||
3 | * Much of the code here is borrowed from include/asm-i386/semaphore.h. | ||
4 | */ | ||
5 | |||
6 | #include <asm/atomic.h> | ||
7 | #include <asm/semaphore.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/wait.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <litmus/litmus.h> | ||
12 | #include <litmus/sched_plugin.h> | ||
13 | #include <litmus/edf_common.h> | ||
14 | |||
15 | #include <litmus/fdso.h> | ||
16 | |||
17 | #include <litmus/trace.h> | ||
18 | |||
19 | /* ************************************************************************** */ | ||
20 | /* PRIORITY INHERITANCE */ | ||
21 | /* ************************************************************************** */ | ||
22 | |||
23 | static void* create_pi_semaphore(void) | ||
24 | { | ||
25 | struct pi_semaphore* sem; | ||
26 | int i; | ||
27 | |||
28 | sem = kmalloc(sizeof(struct pi_semaphore), GFP_KERNEL); | ||
29 | if (!sem) | ||
30 | return NULL; | ||
31 | atomic_set(&sem->count, 1); | ||
32 | sem->sleepers = 0; | ||
33 | init_waitqueue_head(&sem->wait); | ||
34 | sem->hp.task = NULL; | ||
35 | sem->holder = NULL; | ||
36 | for (i = 0; i < NR_CPUS; i++) | ||
37 | sem->hp.cpu_task[i] = NULL; | ||
38 | return sem; | ||
39 | } | ||
40 | |||
41 | static void destroy_pi_semaphore(void* sem) | ||
42 | { | ||
43 | /* XXX assert invariants */ | ||
44 | kfree(sem); | ||
45 | } | ||
46 | |||
47 | struct fdso_ops pi_sem_ops = { | ||
48 | .create = create_pi_semaphore, | ||
49 | .destroy = destroy_pi_semaphore | ||
50 | }; | ||
51 | |||
52 | struct wq_pair { | ||
53 | struct task_struct* tsk; | ||
54 | struct pi_semaphore* sem; | ||
55 | }; | ||
56 | |||
57 | static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
58 | void *key) | ||
59 | { | ||
60 | struct wq_pair* wqp = (struct wq_pair*) wait->private; | ||
61 | set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); | ||
62 | litmus->inherit_priority(wqp->sem, wqp->tsk); | ||
63 | TRACE_TASK(wqp->tsk, | ||
64 | "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); | ||
65 | /* point to task for default_wake_function() */ | ||
66 | wait->private = wqp->tsk; | ||
67 | default_wake_function(wait, mode, sync, key); | ||
68 | |||
69 | /* Always return true since we know that if we encountered a task | ||
70 | * that was already running the wake_up raced with the schedule in | ||
71 | * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled | ||
72 | * immediately and own the lock. We must not wake up another task in | ||
73 | * any case. | ||
74 | */ | ||
75 | return 1; | ||
76 | } | ||
77 | |||
78 | /* caller is responsible for locking */ | ||
79 | int edf_set_hp_task(struct pi_semaphore *sem) | ||
80 | { | ||
81 | struct list_head *tmp, *next; | ||
82 | struct task_struct *queued; | ||
83 | int ret = 0; | ||
84 | |||
85 | sem->hp.task = NULL; | ||
86 | list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
87 | queued = ((struct wq_pair*) | ||
88 | list_entry(tmp, wait_queue_t, | ||
89 | task_list)->private)->tsk; | ||
90 | |||
91 | /* Compare task prios, find high prio task. */ | ||
92 | if (edf_higher_prio(queued, sem->hp.task)) { | ||
93 | sem->hp.task = queued; | ||
94 | ret = 1; | ||
95 | } | ||
96 | } | ||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | /* caller is responsible for locking */ | ||
101 | int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu) | ||
102 | { | ||
103 | struct list_head *tmp, *next; | ||
104 | struct task_struct *queued; | ||
105 | int ret = 0; | ||
106 | |||
107 | sem->hp.cpu_task[cpu] = NULL; | ||
108 | list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
109 | queued = ((struct wq_pair*) | ||
110 | list_entry(tmp, wait_queue_t, | ||
111 | task_list)->private)->tsk; | ||
112 | |||
113 | /* Compare task prios, find high prio task. */ | ||
114 | if (get_partition(queued) == cpu && | ||
115 | edf_higher_prio(queued, sem->hp.cpu_task[cpu])) { | ||
116 | sem->hp.cpu_task[cpu] = queued; | ||
117 | ret = 1; | ||
118 | } | ||
119 | } | ||
120 | return ret; | ||
121 | } | ||
122 | |||
123 | int do_pi_down(struct pi_semaphore* sem) | ||
124 | { | ||
125 | unsigned long flags; | ||
126 | struct task_struct *tsk = current; | ||
127 | struct wq_pair pair; | ||
128 | int suspended = 1; | ||
129 | wait_queue_t wait = { | ||
130 | .private = &pair, | ||
131 | .func = rt_pi_wake_up, | ||
132 | .task_list = {NULL, NULL} | ||
133 | }; | ||
134 | |||
135 | pair.tsk = tsk; | ||
136 | pair.sem = sem; | ||
137 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
138 | |||
139 | if (atomic_dec_return(&sem->count) < 0 || | ||
140 | waitqueue_active(&sem->wait)) { | ||
141 | /* we need to suspend */ | ||
142 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
143 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
144 | |||
145 | TRACE_CUR("suspends on PI lock %p\n", sem); | ||
146 | litmus->pi_block(sem, tsk); | ||
147 | |||
148 | /* release lock before sleeping */ | ||
149 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
150 | |||
151 | TS_PI_DOWN_END; | ||
152 | preempt_enable_no_resched(); | ||
153 | |||
154 | |||
155 | /* we depend on the FIFO order | ||
156 | * Thus, we don't need to recheck when we wake up, we | ||
157 | * are guaranteed to have the lock since there is only one | ||
158 | * wake up per release | ||
159 | */ | ||
160 | schedule(); | ||
161 | |||
162 | TRACE_CUR("woke up, now owns PI lock %p\n", sem); | ||
163 | |||
164 | /* try_to_wake_up() set our state to TASK_RUNNING, | ||
165 | * all we need to do is to remove our wait queue entry | ||
166 | */ | ||
167 | remove_wait_queue(&sem->wait, &wait); | ||
168 | } else { | ||
169 | /* no priority inheritance necessary, since there are no queued | ||
170 | * tasks. | ||
171 | */ | ||
172 | suspended = 0; | ||
173 | TRACE_CUR("acquired PI lock %p, no contention\n", sem); | ||
174 | sem->holder = tsk; | ||
175 | sem->hp.task = tsk; | ||
176 | litmus->inherit_priority(sem, tsk); | ||
177 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
178 | } | ||
179 | return suspended; | ||
180 | } | ||
181 | |||
182 | void do_pi_up(struct pi_semaphore* sem) | ||
183 | { | ||
184 | unsigned long flags; | ||
185 | |||
186 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
187 | |||
188 | TRACE_CUR("releases PI lock %p\n", sem); | ||
189 | litmus->return_priority(sem); | ||
190 | sem->holder = NULL; | ||
191 | if (atomic_inc_return(&sem->count) < 1) | ||
192 | /* there is a task queued */ | ||
193 | wake_up_locked(&sem->wait); | ||
194 | |||
195 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
196 | } | ||
197 | |||
198 | asmlinkage long sys_pi_down(int sem_od) | ||
199 | { | ||
200 | long ret = 0; | ||
201 | struct pi_semaphore * sem; | ||
202 | int suspended = 0; | ||
203 | |||
204 | preempt_disable(); | ||
205 | TS_PI_DOWN_START; | ||
206 | |||
207 | sem = lookup_pi_sem(sem_od); | ||
208 | if (sem) | ||
209 | suspended = do_pi_down(sem); | ||
210 | else | ||
211 | ret = -EINVAL; | ||
212 | |||
213 | if (!suspended) { | ||
214 | TS_PI_DOWN_END; | ||
215 | preempt_enable(); | ||
216 | } | ||
217 | |||
218 | return ret; | ||
219 | } | ||
220 | |||
221 | asmlinkage long sys_pi_up(int sem_od) | ||
222 | { | ||
223 | long ret = 0; | ||
224 | struct pi_semaphore * sem; | ||
225 | |||
226 | preempt_disable(); | ||
227 | TS_PI_UP_START; | ||
228 | |||
229 | sem = lookup_pi_sem(sem_od); | ||
230 | if (sem) | ||
231 | do_pi_up(sem); | ||
232 | else | ||
233 | ret = -EINVAL; | ||
234 | |||
235 | |||
236 | TS_PI_UP_END; | ||
237 | preempt_enable(); | ||
238 | |||
239 | return ret; | ||
240 | } | ||
241 | |||
242 | /* Clear wait queue and wakeup waiting tasks, and free semaphore. */ | ||
243 | /* | ||
244 | asmlinkage long sys_pi_sema_free(int sem_id) | ||
245 | { | ||
246 | struct list_head *tmp, *next; | ||
247 | unsigned long flags; | ||
248 | |||
249 | if (sem_id < 0 || sem_id >= MAX_PI_SEMAPHORES) | ||
250 | return -EINVAL; | ||
251 | |||
252 | if (!pi_sems[sem_id].used) | ||
253 | return -EINVAL; | ||
254 | |||
255 | spin_lock_irqsave(&pi_sems[sem_id].wait.lock, flags); | ||
256 | if (waitqueue_active(&pi_sems[sem_id].wait)) { | ||
257 | list_for_each_safe(tmp, next, | ||
258 | &pi_sems[sem_id].wait.task_list) { | ||
259 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
260 | task_list); | ||
261 | list_del(tmp); | ||
262 | set_rt_flags((struct task_struct*)curr->private, | ||
263 | RT_F_EXIT_SEM); | ||
264 | curr->func(curr, | ||
265 | TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | ||
266 | 0, NULL); | ||
267 | } | ||
268 | } | ||
269 | |||
270 | spin_unlock_irqrestore(&pi_sems[sem_id].wait.lock, flags); | ||
271 | pi_sems[sem_id].used = 0; | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | */ | ||
276 | |||
277 | |||
278 | |||
279 | /* ************************************************************************** */ | ||
280 | |||
281 | |||
282 | |||
diff --git a/litmus/litmus_sem.c b/litmus/srp.c index 5d51d337d9..3a1e7d8cc8 100644 --- a/litmus/litmus_sem.c +++ b/litmus/srp.c | |||
@@ -1,288 +1,16 @@ | |||
1 | /* | 1 | /* ************************************************************************** */ |
2 | * PI semaphores and SRP implementations. | 2 | /* STACK RESOURCE POLICY */ |
3 | * Much of the code here is borrowed from include/asm-i386/semaphore.h. | 3 | /* ************************************************************************** */ |
4 | * | ||
5 | * NOTE: This implementation is very much a prototype and horribly insecure. It | ||
6 | * is intended to be a proof of concept, not a feature-complete solution. | ||
7 | */ | ||
8 | 4 | ||
9 | #include <asm/atomic.h> | 5 | #include <asm/atomic.h> |
10 | #include <asm/semaphore.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/wait.h> | 6 | #include <linux/wait.h> |
13 | #include <linux/spinlock.h> | ||
14 | #include <litmus/litmus.h> | 7 | #include <litmus/litmus.h> |
15 | #include <litmus/sched_plugin.h> | 8 | #include <litmus/sched_plugin.h> |
16 | #include <litmus/edf_common.h> | ||
17 | 9 | ||
18 | #include <litmus/fdso.h> | 10 | #include <litmus/fdso.h> |
19 | 11 | ||
20 | #include <litmus/trace.h> | 12 | #include <litmus/trace.h> |
21 | 13 | ||
22 | /* ************************************************************************** */ | ||
23 | /* PRIORITY INHERITANCE */ | ||
24 | /* ************************************************************************** */ | ||
25 | |||
26 | static void* create_pi_semaphore(void) | ||
27 | { | ||
28 | struct pi_semaphore* sem; | ||
29 | int i; | ||
30 | |||
31 | sem = kmalloc(sizeof(struct pi_semaphore), GFP_KERNEL); | ||
32 | if (!sem) | ||
33 | return NULL; | ||
34 | atomic_set(&sem->count, 1); | ||
35 | sem->sleepers = 0; | ||
36 | init_waitqueue_head(&sem->wait); | ||
37 | sem->hp.task = NULL; | ||
38 | sem->holder = NULL; | ||
39 | for (i = 0; i < NR_CPUS; i++) | ||
40 | sem->hp.cpu_task[i] = NULL; | ||
41 | return sem; | ||
42 | } | ||
43 | |||
44 | static void destroy_pi_semaphore(void* sem) | ||
45 | { | ||
46 | /* XXX assert invariants */ | ||
47 | kfree(sem); | ||
48 | } | ||
49 | |||
50 | struct fdso_ops pi_sem_ops = { | ||
51 | .create = create_pi_semaphore, | ||
52 | .destroy = destroy_pi_semaphore | ||
53 | }; | ||
54 | |||
55 | struct wq_pair { | ||
56 | struct task_struct* tsk; | ||
57 | struct pi_semaphore* sem; | ||
58 | }; | ||
59 | |||
60 | static int rt_pi_wake_up(wait_queue_t *wait, unsigned mode, int sync, | ||
61 | void *key) | ||
62 | { | ||
63 | struct wq_pair* wqp = (struct wq_pair*) wait->private; | ||
64 | set_rt_flags(wqp->tsk, RT_F_EXIT_SEM); | ||
65 | litmus->inherit_priority(wqp->sem, wqp->tsk); | ||
66 | TRACE_TASK(wqp->tsk, | ||
67 | "woken up by rt_pi_wake_up() (RT_F_SEM_EXIT, PI)\n"); | ||
68 | /* point to task for default_wake_function() */ | ||
69 | wait->private = wqp->tsk; | ||
70 | default_wake_function(wait, mode, sync, key); | ||
71 | |||
72 | /* Always return true since we know that if we encountered a task | ||
73 | * that was already running the wake_up raced with the schedule in | ||
74 | * rt_pi_down(). In that case the task in rt_pi_down() will be scheduled | ||
75 | * immediately and own the lock. We must not wake up another task in | ||
76 | * any case. | ||
77 | */ | ||
78 | return 1; | ||
79 | } | ||
80 | |||
81 | /* caller is responsible for locking */ | ||
82 | int edf_set_hp_task(struct pi_semaphore *sem) | ||
83 | { | ||
84 | struct list_head *tmp, *next; | ||
85 | struct task_struct *queued; | ||
86 | int ret = 0; | ||
87 | |||
88 | sem->hp.task = NULL; | ||
89 | list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
90 | queued = ((struct wq_pair*) | ||
91 | list_entry(tmp, wait_queue_t, | ||
92 | task_list)->private)->tsk; | ||
93 | |||
94 | /* Compare task prios, find high prio task. */ | ||
95 | if (edf_higher_prio(queued, sem->hp.task)) { | ||
96 | sem->hp.task = queued; | ||
97 | ret = 1; | ||
98 | } | ||
99 | } | ||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | /* caller is responsible for locking */ | ||
104 | int edf_set_hp_cpu_task(struct pi_semaphore *sem, int cpu) | ||
105 | { | ||
106 | struct list_head *tmp, *next; | ||
107 | struct task_struct *queued; | ||
108 | int ret = 0; | ||
109 | |||
110 | sem->hp.cpu_task[cpu] = NULL; | ||
111 | list_for_each_safe(tmp, next, &sem->wait.task_list) { | ||
112 | queued = ((struct wq_pair*) | ||
113 | list_entry(tmp, wait_queue_t, | ||
114 | task_list)->private)->tsk; | ||
115 | |||
116 | /* Compare task prios, find high prio task. */ | ||
117 | if (get_partition(queued) == cpu && | ||
118 | edf_higher_prio(queued, sem->hp.cpu_task[cpu])) { | ||
119 | sem->hp.cpu_task[cpu] = queued; | ||
120 | ret = 1; | ||
121 | } | ||
122 | } | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | int do_pi_down(struct pi_semaphore* sem) | ||
127 | { | ||
128 | unsigned long flags; | ||
129 | struct task_struct *tsk = current; | ||
130 | struct wq_pair pair; | ||
131 | int suspended = 1; | ||
132 | wait_queue_t wait = { | ||
133 | .private = &pair, | ||
134 | .func = rt_pi_wake_up, | ||
135 | .task_list = {NULL, NULL} | ||
136 | }; | ||
137 | |||
138 | pair.tsk = tsk; | ||
139 | pair.sem = sem; | ||
140 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
141 | |||
142 | if (atomic_dec_return(&sem->count) < 0 || | ||
143 | waitqueue_active(&sem->wait)) { | ||
144 | /* we need to suspend */ | ||
145 | tsk->state = TASK_UNINTERRUPTIBLE; | ||
146 | add_wait_queue_exclusive_locked(&sem->wait, &wait); | ||
147 | |||
148 | TRACE_CUR("suspends on PI lock %p\n", sem); | ||
149 | litmus->pi_block(sem, tsk); | ||
150 | |||
151 | /* release lock before sleeping */ | ||
152 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
153 | |||
154 | TS_PI_DOWN_END; | ||
155 | preempt_enable_no_resched(); | ||
156 | |||
157 | |||
158 | /* we depend on the FIFO order | ||
159 | * Thus, we don't need to recheck when we wake up, we | ||
160 | * are guaranteed to have the lock since there is only one | ||
161 | * wake up per release | ||
162 | */ | ||
163 | schedule(); | ||
164 | |||
165 | TRACE_CUR("woke up, now owns PI lock %p\n", sem); | ||
166 | |||
167 | /* try_to_wake_up() set our state to TASK_RUNNING, | ||
168 | * all we need to do is to remove our wait queue entry | ||
169 | */ | ||
170 | remove_wait_queue(&sem->wait, &wait); | ||
171 | } else { | ||
172 | /* no priority inheritance necessary, since there are no queued | ||
173 | * tasks. | ||
174 | */ | ||
175 | suspended = 0; | ||
176 | TRACE_CUR("acquired PI lock %p, no contention\n", sem); | ||
177 | sem->holder = tsk; | ||
178 | sem->hp.task = tsk; | ||
179 | litmus->inherit_priority(sem, tsk); | ||
180 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
181 | } | ||
182 | return suspended; | ||
183 | } | ||
184 | |||
185 | void do_pi_up(struct pi_semaphore* sem) | ||
186 | { | ||
187 | unsigned long flags; | ||
188 | |||
189 | spin_lock_irqsave(&sem->wait.lock, flags); | ||
190 | |||
191 | TRACE_CUR("releases PI lock %p\n", sem); | ||
192 | litmus->return_priority(sem); | ||
193 | sem->holder = NULL; | ||
194 | if (atomic_inc_return(&sem->count) < 1) | ||
195 | /* there is a task queued */ | ||
196 | wake_up_locked(&sem->wait); | ||
197 | |||
198 | spin_unlock_irqrestore(&sem->wait.lock, flags); | ||
199 | } | ||
200 | |||
201 | asmlinkage long sys_pi_down(int sem_od) | ||
202 | { | ||
203 | long ret = 0; | ||
204 | struct pi_semaphore * sem; | ||
205 | int suspended = 0; | ||
206 | |||
207 | preempt_disable(); | ||
208 | TS_PI_DOWN_START; | ||
209 | |||
210 | sem = lookup_pi_sem(sem_od); | ||
211 | if (sem) | ||
212 | suspended = do_pi_down(sem); | ||
213 | else | ||
214 | ret = -EINVAL; | ||
215 | |||
216 | if (!suspended) { | ||
217 | TS_PI_DOWN_END; | ||
218 | preempt_enable(); | ||
219 | } | ||
220 | |||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | asmlinkage long sys_pi_up(int sem_od) | ||
225 | { | ||
226 | long ret = 0; | ||
227 | struct pi_semaphore * sem; | ||
228 | |||
229 | preempt_disable(); | ||
230 | TS_PI_UP_START; | ||
231 | |||
232 | sem = lookup_pi_sem(sem_od); | ||
233 | if (sem) | ||
234 | do_pi_up(sem); | ||
235 | else | ||
236 | ret = -EINVAL; | ||
237 | |||
238 | |||
239 | TS_PI_UP_END; | ||
240 | preempt_enable(); | ||
241 | |||
242 | return ret; | ||
243 | } | ||
244 | |||
245 | /* Clear wait queue and wakeup waiting tasks, and free semaphore. */ | ||
246 | /* | ||
247 | asmlinkage long sys_pi_sema_free(int sem_id) | ||
248 | { | ||
249 | struct list_head *tmp, *next; | ||
250 | unsigned long flags; | ||
251 | |||
252 | if (sem_id < 0 || sem_id >= MAX_PI_SEMAPHORES) | ||
253 | return -EINVAL; | ||
254 | |||
255 | if (!pi_sems[sem_id].used) | ||
256 | return -EINVAL; | ||
257 | |||
258 | spin_lock_irqsave(&pi_sems[sem_id].wait.lock, flags); | ||
259 | if (waitqueue_active(&pi_sems[sem_id].wait)) { | ||
260 | list_for_each_safe(tmp, next, | ||
261 | &pi_sems[sem_id].wait.task_list) { | ||
262 | wait_queue_t *curr = list_entry(tmp, wait_queue_t, | ||
263 | task_list); | ||
264 | list_del(tmp); | ||
265 | set_rt_flags((struct task_struct*)curr->private, | ||
266 | RT_F_EXIT_SEM); | ||
267 | curr->func(curr, | ||
268 | TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, | ||
269 | 0, NULL); | ||
270 | } | ||
271 | } | ||
272 | |||
273 | spin_unlock_irqrestore(&pi_sems[sem_id].wait.lock, flags); | ||
274 | pi_sems[sem_id].used = 0; | ||
275 | |||
276 | return 0; | ||
277 | } | ||
278 | */ | ||
279 | |||
280 | |||
281 | |||
282 | /* ************************************************************************** */ | ||
283 | /* STACK RESOURCE POLICY */ | ||
284 | /* ************************************************************************** */ | ||
285 | |||
286 | 14 | ||
287 | struct srp_priority { | 15 | struct srp_priority { |
288 | struct list_head list; | 16 | struct list_head list; |
@@ -566,7 +294,6 @@ void srp_ceiling_block(void) | |||
566 | return; | 294 | return; |
567 | 295 | ||
568 | preempt_disable(); | 296 | preempt_disable(); |
569 | tsk->rt_param.srp_block = 0; | ||
570 | if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) { | 297 | if (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) { |
571 | TRACE_CUR("is priority ceiling blocked.\n"); | 298 | TRACE_CUR("is priority ceiling blocked.\n"); |
572 | while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) | 299 | while (!srp_exceeds_ceiling(tsk, &__get_cpu_var(srp))) |
@@ -577,7 +304,3 @@ void srp_ceiling_block(void) | |||
577 | preempt_enable(); | 304 | preempt_enable(); |
578 | } | 305 | } |
579 | 306 | ||
580 | /* ************************************************************************** */ | ||
581 | |||
582 | |||
583 | |||