diff options
Diffstat (limited to 'litmus/rsm_lock.c')
-rw-r--r-- | litmus/rsm_lock.c | 796 |
1 files changed, 796 insertions, 0 deletions
diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c new file mode 100644 index 000000000000..3dfd8ae9d221 --- /dev/null +++ b/litmus/rsm_lock.c | |||
@@ -0,0 +1,796 @@ | |||
1 | #include <linux/slab.h> | ||
2 | #include <linux/uaccess.h> | ||
3 | |||
4 | #include <litmus/trace.h> | ||
5 | #include <litmus/sched_plugin.h> | ||
6 | #include <litmus/rsm_lock.h> | ||
7 | |||
8 | //#include <litmus/edf_common.h> | ||
9 | |||
10 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
11 | #include <litmus/gpu_affinity.h> | ||
12 | #endif | ||
13 | |||
14 | |||
15 | /* caller is responsible for locking */ | ||
16 | static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | ||
17 | struct task_struct* skip) | ||
18 | { | ||
19 | wait_queue_t *q; | ||
20 | struct list_head *pos; | ||
21 | struct task_struct *queued = NULL, *found = NULL; | ||
22 | |||
23 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
24 | dgl_wait_state_t *dgl_wait = NULL; | ||
25 | #endif | ||
26 | |||
27 | list_for_each(pos, &mutex->wait.task_list) { | ||
28 | q = list_entry(pos, wait_queue_t, task_list); | ||
29 | |||
30 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
31 | if(q->func == dgl_wake_up) { | ||
32 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
33 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { | ||
34 | queued = dgl_wait->task; | ||
35 | } | ||
36 | else { | ||
37 | queued = NULL; // skip it. | ||
38 | } | ||
39 | } | ||
40 | else { | ||
41 | queued = (struct task_struct*) q->private; | ||
42 | } | ||
43 | #else | ||
44 | queued = (struct task_struct*) q->private; | ||
45 | #endif | ||
46 | |||
47 | /* Compare task prios, find high prio task. */ | ||
48 | //if (queued && queued != skip && edf_higher_prio(queued, found)) { | ||
49 | if (queued && queued != skip && litmus->compare(queued, found)) { | ||
50 | found = queued; | ||
51 | } | ||
52 | } | ||
53 | return found; | ||
54 | } | ||
55 | |||
56 | |||
57 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
58 | |||
59 | int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) | ||
60 | { | ||
61 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
62 | return(mutex->owner == t); | ||
63 | } | ||
64 | |||
65 | // return 1 if resource was immediatly acquired. | ||
66 | // Assumes mutex->lock is held. | ||
67 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
68 | int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, | ||
69 | wait_queue_t* wq_node) | ||
70 | { | ||
71 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
72 | struct task_struct *t = dgl_wait->task; | ||
73 | |||
74 | int acquired_immediatly = 0; | ||
75 | |||
76 | BUG_ON(t != current); | ||
77 | |||
78 | if (mutex->owner) { | ||
79 | TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); | ||
80 | |||
81 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | ||
82 | |||
83 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
84 | __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); | ||
85 | } else { | ||
86 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
87 | |||
88 | /* it's ours now */ | ||
89 | mutex->owner = t; | ||
90 | |||
91 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
92 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
93 | struct nested_info, hp_binheap_node); | ||
94 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
95 | |||
96 | acquired_immediatly = 1; | ||
97 | } | ||
98 | |||
99 | return acquired_immediatly; | ||
100 | } | ||
101 | |||
102 | void rsm_mutex_enable_priority(struct litmus_lock *l, | ||
103 | dgl_wait_state_t* dgl_wait) | ||
104 | { | ||
105 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
106 | struct task_struct *t = dgl_wait->task; | ||
107 | struct task_struct *owner = mutex->owner; | ||
108 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | ||
109 | |||
110 | BUG_ON(owner == t); | ||
111 | |||
112 | tsk_rt(t)->blocked_lock = l; | ||
113 | mb(); | ||
114 | |||
115 | //if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
116 | if (litmus->compare(t, mutex->hp_waiter)) { | ||
117 | |||
118 | struct task_struct *old_max_eff_prio; | ||
119 | struct task_struct *new_max_eff_prio; | ||
120 | struct task_struct *new_prio = NULL; | ||
121 | |||
122 | if(mutex->hp_waiter) | ||
123 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
124 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
125 | else | ||
126 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
127 | |||
128 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
129 | |||
130 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
131 | mutex->hp_waiter = t; | ||
132 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
133 | binheap_decrease(&l->nest.hp_binheap_node, | ||
134 | &tsk_rt(owner)->hp_blocked_tasks); | ||
135 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
136 | |||
137 | if(new_max_eff_prio != old_max_eff_prio) { | ||
138 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
139 | |||
140 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
141 | //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
142 | (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
143 | new_prio = new_max_eff_prio; | ||
144 | } | ||
145 | } | ||
146 | else { | ||
147 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
148 | } | ||
149 | |||
150 | if(new_prio) { | ||
151 | litmus->nested_increase_prio(owner, new_prio, | ||
152 | &mutex->lock, flags); // unlocks lock. | ||
153 | } | ||
154 | else { | ||
155 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
156 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
157 | } | ||
158 | } | ||
159 | else { | ||
160 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
161 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | static void select_next_lock_if_primary(struct litmus_lock *l, | ||
166 | dgl_wait_state_t *dgl_wait) | ||
167 | { | ||
168 | if(tsk_rt(dgl_wait->task)->blocked_lock == l) { | ||
169 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", | ||
170 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
171 | tsk_rt(dgl_wait->task)->blocked_lock = NULL; | ||
172 | mb(); | ||
173 | select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on | ||
174 | } | ||
175 | else { | ||
176 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", | ||
177 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
178 | } | ||
179 | } | ||
180 | #endif | ||
181 | |||
182 | |||
183 | |||
184 | |||
185 | int rsm_mutex_lock(struct litmus_lock* l) | ||
186 | { | ||
187 | struct task_struct *t = current; | ||
188 | struct task_struct *owner; | ||
189 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
190 | wait_queue_t wait; | ||
191 | unsigned long flags; | ||
192 | |||
193 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
194 | raw_spinlock_t *dgl_lock; | ||
195 | #endif | ||
196 | |||
197 | if (!is_realtime(t)) | ||
198 | return -EPERM; | ||
199 | |||
200 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
201 | dgl_lock = litmus->get_dgl_spinlock(t); | ||
202 | #endif | ||
203 | |||
204 | lock_global_irqsave(dgl_lock, flags); | ||
205 | lock_fine_irqsave(&mutex->lock, flags); | ||
206 | |||
207 | if (mutex->owner) { | ||
208 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | ||
209 | |||
210 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
211 | // KLUDGE: don't count this suspension as time in the critical gpu | ||
212 | // critical section | ||
213 | if(tsk_rt(t)->held_gpus) { | ||
214 | tsk_rt(t)->suspend_gpu_tracker_on_block = 1; | ||
215 | } | ||
216 | #endif | ||
217 | |||
218 | /* resource is not free => must suspend and wait */ | ||
219 | |||
220 | owner = mutex->owner; | ||
221 | |||
222 | init_waitqueue_entry(&wait, t); | ||
223 | |||
224 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | ||
225 | mb(); // needed? | ||
226 | |||
227 | /* FIXME: interruptible would be nice some day */ | ||
228 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
229 | |||
230 | __add_wait_queue_tail_exclusive(&mutex->wait, &wait); | ||
231 | |||
232 | /* check if we need to activate priority inheritance */ | ||
233 | //if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
234 | if (litmus->compare(t, mutex->hp_waiter)) { | ||
235 | |||
236 | struct task_struct *old_max_eff_prio; | ||
237 | struct task_struct *new_max_eff_prio; | ||
238 | struct task_struct *new_prio = NULL; | ||
239 | |||
240 | if(mutex->hp_waiter) | ||
241 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
242 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
243 | else | ||
244 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
245 | |||
246 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
247 | |||
248 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
249 | mutex->hp_waiter = t; | ||
250 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
251 | binheap_decrease(&l->nest.hp_binheap_node, | ||
252 | &tsk_rt(owner)->hp_blocked_tasks); | ||
253 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
254 | |||
255 | if(new_max_eff_prio != old_max_eff_prio) { | ||
256 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
257 | |||
258 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
259 | //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
260 | (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
261 | new_prio = new_max_eff_prio; | ||
262 | } | ||
263 | } | ||
264 | else { | ||
265 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
266 | } | ||
267 | |||
268 | if(new_prio) { | ||
269 | litmus->nested_increase_prio(owner, new_prio, &mutex->lock, | ||
270 | flags); // unlocks lock. | ||
271 | } | ||
272 | else { | ||
273 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
274 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
275 | } | ||
276 | } | ||
277 | else { | ||
278 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
279 | |||
280 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
281 | } | ||
282 | |||
283 | unlock_global_irqrestore(dgl_lock, flags); | ||
284 | |||
285 | TS_LOCK_SUSPEND; | ||
286 | |||
287 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
288 | * when we wake up; we are guaranteed to have the lock since | ||
289 | * there is only one wake up per release. | ||
290 | */ | ||
291 | |||
292 | suspend_for_lock(); | ||
293 | |||
294 | TS_LOCK_RESUME; | ||
295 | |||
296 | /* Since we hold the lock, no other task will change | ||
297 | * ->owner. We can thus check it without acquiring the spin | ||
298 | * lock. */ | ||
299 | BUG_ON(mutex->owner != t); | ||
300 | |||
301 | TRACE_TASK(t, "Acquired lock %d.\n", l->ident); | ||
302 | |||
303 | } else { | ||
304 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
305 | |||
306 | /* it's ours now */ | ||
307 | mutex->owner = t; | ||
308 | |||
309 | raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
310 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
311 | struct nested_info, hp_binheap_node); | ||
312 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
313 | |||
314 | |||
315 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
316 | unlock_global_irqrestore(dgl_lock, flags); | ||
317 | } | ||
318 | |||
319 | return 0; | ||
320 | } | ||
321 | |||
322 | |||
323 | |||
324 | int rsm_mutex_unlock(struct litmus_lock* l) | ||
325 | { | ||
326 | struct task_struct *t = current, *next = NULL; | ||
327 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
328 | unsigned long flags; | ||
329 | |||
330 | struct task_struct *old_max_eff_prio; | ||
331 | |||
332 | int wake_up_task = 1; | ||
333 | |||
334 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
335 | dgl_wait_state_t *dgl_wait = NULL; | ||
336 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
337 | #endif | ||
338 | |||
339 | int err = 0; | ||
340 | |||
341 | if (mutex->owner != t) { | ||
342 | err = -EINVAL; | ||
343 | return err; | ||
344 | } | ||
345 | |||
346 | lock_global_irqsave(dgl_lock, flags); | ||
347 | lock_fine_irqsave(&mutex->lock, flags); | ||
348 | |||
349 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
350 | |||
351 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | ||
352 | |||
353 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
354 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); | ||
355 | |||
356 | if(tsk_rt(t)->inh_task){ | ||
357 | struct task_struct *new_max_eff_prio = | ||
358 | top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
359 | |||
360 | if((new_max_eff_prio == NULL) || | ||
361 | /* there was a change in eff prio */ | ||
362 | ( (new_max_eff_prio != old_max_eff_prio) && | ||
363 | /* and owner had the old eff prio */ | ||
364 | (effective_priority(t) == old_max_eff_prio)) ) | ||
365 | { | ||
366 | // old_max_eff_prio > new_max_eff_prio | ||
367 | |||
368 | //if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { | ||
369 | if(litmus->__compare(new_max_eff_prio, BASE, t, EFFECTIVE)) { | ||
370 | TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", | ||
371 | new_max_eff_prio->comm, new_max_eff_prio->pid, | ||
372 | t->comm, t->pid, tsk_rt(t)->inh_task->comm, | ||
373 | tsk_rt(t)->inh_task->pid); | ||
374 | WARN_ON(1); | ||
375 | } | ||
376 | |||
377 | litmus->decrease_prio(t, new_max_eff_prio); | ||
378 | } | ||
379 | } | ||
380 | |||
381 | if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && | ||
382 | tsk_rt(t)->inh_task != NULL) | ||
383 | { | ||
384 | WARN_ON(tsk_rt(t)->inh_task != NULL); | ||
385 | TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", | ||
386 | tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
387 | } | ||
388 | |||
389 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
390 | |||
391 | |||
392 | /* check if there are jobs waiting for this resource */ | ||
393 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
394 | __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); | ||
395 | if(dgl_wait) { | ||
396 | next = dgl_wait->task; | ||
397 | //select_next_lock_if_primary(l, dgl_wait); | ||
398 | } | ||
399 | #else | ||
400 | next = __waitqueue_remove_first(&mutex->wait); | ||
401 | #endif | ||
402 | if (next) { | ||
403 | /* next becomes the resouce holder */ | ||
404 | mutex->owner = next; | ||
405 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
406 | |||
407 | /* determine new hp_waiter if necessary */ | ||
408 | if (next == mutex->hp_waiter) { | ||
409 | |||
410 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
411 | /* next has the highest priority --- it doesn't need to | ||
412 | * inherit. However, we need to make sure that the | ||
413 | * next-highest priority in the queue is reflected in | ||
414 | * hp_waiter. */ | ||
415 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | ||
416 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | ||
417 | effective_priority(mutex->hp_waiter) : | ||
418 | NULL; | ||
419 | |||
420 | if (mutex->hp_waiter) | ||
421 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
422 | else | ||
423 | TRACE("no further waiters\n"); | ||
424 | |||
425 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
426 | |||
427 | binheap_add(&l->nest.hp_binheap_node, | ||
428 | &tsk_rt(next)->hp_blocked_tasks, | ||
429 | struct nested_info, hp_binheap_node); | ||
430 | |||
431 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
432 | if(dgl_wait) { | ||
433 | select_next_lock_if_primary(l, dgl_wait); | ||
434 | //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
435 | --(dgl_wait->nr_remaining); | ||
436 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
437 | } | ||
438 | #endif | ||
439 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
440 | } | ||
441 | else { | ||
442 | /* Well, if 'next' is not the highest-priority waiter, | ||
443 | * then it (probably) ought to inherit the highest-priority | ||
444 | * waiter's priority. */ | ||
445 | TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); | ||
446 | |||
447 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
448 | |||
449 | binheap_add(&l->nest.hp_binheap_node, | ||
450 | &tsk_rt(next)->hp_blocked_tasks, | ||
451 | struct nested_info, hp_binheap_node); | ||
452 | |||
453 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
454 | if(dgl_wait) { | ||
455 | select_next_lock_if_primary(l, dgl_wait); | ||
456 | --(dgl_wait->nr_remaining); | ||
457 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
458 | } | ||
459 | #endif | ||
460 | |||
461 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
462 | * because that update hasn't yet executed (update operation is | ||
463 | * probably blocked on mutex->lock). So only inherit if the top of | ||
464 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
465 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
466 | * since the effective priority of hp_waiter can change (and the | ||
467 | * update has not made it to this lock).) | ||
468 | */ | ||
469 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
470 | if((l->nest.hp_waiter_eff_prio != NULL) && | ||
471 | (top_priority(&tsk_rt(next)->hp_blocked_tasks) == | ||
472 | l->nest.hp_waiter_eff_prio)) | ||
473 | { | ||
474 | if(dgl_wait && tsk_rt(next)->blocked_lock) { | ||
475 | BUG_ON(wake_up_task); | ||
476 | //if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { | ||
477 | if(litmus->__compare(l->nest.hp_waiter_eff_prio, BASE, next, EFFECTIVE)) { | ||
478 | litmus->nested_increase_prio(next, | ||
479 | l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. | ||
480 | goto out; // all spinlocks are released. bail out now. | ||
481 | } | ||
482 | } | ||
483 | else { | ||
484 | litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
489 | #else | ||
490 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == | ||
491 | l->nest.hp_waiter_eff_prio)) | ||
492 | { | ||
493 | litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); | ||
494 | } | ||
495 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
496 | #endif | ||
497 | } | ||
498 | |||
499 | if(wake_up_task) { | ||
500 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); | ||
501 | |||
502 | tsk_rt(next)->blocked_lock = NULL; | ||
503 | mb(); | ||
504 | |||
505 | #if defined(CONFIG_LITMUS_AFFINITY_LOCKING) && defined(CONFIG_LITMUS_NVIDIA) | ||
506 | // re-enable tracking | ||
507 | if(tsk_rt(next)->held_gpus) { | ||
508 | tsk_rt(next)->suspend_gpu_tracker_on_block = 0; | ||
509 | } | ||
510 | #endif | ||
511 | |||
512 | wake_up_process(next); | ||
513 | } | ||
514 | else { | ||
515 | TRACE_TASK(next, "is still blocked.\n"); | ||
516 | } | ||
517 | } | ||
518 | else { | ||
519 | /* becomes available */ | ||
520 | mutex->owner = NULL; | ||
521 | } | ||
522 | |||
523 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
524 | |||
525 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
526 | out: | ||
527 | #endif | ||
528 | unlock_global_irqrestore(dgl_lock, flags); | ||
529 | |||
530 | return err; | ||
531 | } | ||
532 | |||
533 | |||
534 | void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
535 | struct task_struct* t, | ||
536 | raw_spinlock_t* to_unlock, | ||
537 | unsigned long irqflags) | ||
538 | { | ||
539 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
540 | |||
541 | // relay-style locking | ||
542 | lock_fine(&mutex->lock); | ||
543 | unlock_fine(to_unlock); | ||
544 | |||
545 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
546 | struct task_struct *owner = mutex->owner; | ||
547 | |||
548 | struct task_struct *old_max_eff_prio; | ||
549 | struct task_struct *new_max_eff_prio; | ||
550 | |||
551 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
552 | |||
553 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
554 | |||
555 | //if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { | ||
556 | if((t != mutex->hp_waiter) && litmus->compare(t, mutex->hp_waiter)) { | ||
557 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | ||
558 | mutex->hp_waiter = t; | ||
559 | } | ||
560 | if(t == mutex->hp_waiter) { | ||
561 | // reflect the decreased priority in the heap node. | ||
562 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
563 | |||
564 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); | ||
565 | BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, | ||
566 | &tsk_rt(owner)->hp_blocked_tasks)); | ||
567 | |||
568 | binheap_decrease(&l->nest.hp_binheap_node, | ||
569 | &tsk_rt(owner)->hp_blocked_tasks); | ||
570 | } | ||
571 | |||
572 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
573 | |||
574 | |||
575 | if(new_max_eff_prio != old_max_eff_prio) { | ||
576 | // new_max_eff_prio > old_max_eff_prio holds. | ||
577 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
578 | //(__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { | ||
579 | (litmus->__compare(new_max_eff_prio, BASE, owner, EFFECTIVE))) { | ||
580 | TRACE_CUR("Propagating inheritance to holder of lock %d.\n", | ||
581 | l->ident); | ||
582 | |||
583 | // beware: recursion | ||
584 | litmus->nested_increase_prio(owner, new_max_eff_prio, | ||
585 | &mutex->lock, irqflags); // unlocks mutex->lock | ||
586 | } | ||
587 | else { | ||
588 | TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", | ||
589 | owner->comm, owner->pid); | ||
590 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
591 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
592 | } | ||
593 | } | ||
594 | else { | ||
595 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | ||
596 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
597 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
598 | } | ||
599 | } | ||
600 | else { | ||
601 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
602 | |||
603 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
604 | if(still_blocked) { | ||
605 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", | ||
606 | still_blocked->ident); | ||
607 | if(still_blocked->ops->propagate_increase_inheritance) { | ||
608 | /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
609 | we know that task 't' has not released any locks behind us in this | ||
610 | chain. Propagation just needs to catch up with task 't'. */ | ||
611 | still_blocked->ops->propagate_increase_inheritance(still_blocked, | ||
612 | t, | ||
613 | &mutex->lock, | ||
614 | irqflags); | ||
615 | } | ||
616 | else { | ||
617 | TRACE_TASK(t, | ||
618 | "Inheritor is blocked on lock (%p) that does not " | ||
619 | "support nesting!\n", | ||
620 | still_blocked); | ||
621 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
622 | } | ||
623 | } | ||
624 | else { | ||
625 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
626 | } | ||
627 | } | ||
628 | } | ||
629 | |||
630 | |||
631 | void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
632 | struct task_struct* t, | ||
633 | raw_spinlock_t* to_unlock, | ||
634 | unsigned long irqflags) | ||
635 | { | ||
636 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
637 | |||
638 | // relay-style locking | ||
639 | lock_fine(&mutex->lock); | ||
640 | unlock_fine(to_unlock); | ||
641 | |||
642 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
643 | if(t == mutex->hp_waiter) { | ||
644 | struct task_struct *owner = mutex->owner; | ||
645 | |||
646 | struct task_struct *old_max_eff_prio; | ||
647 | struct task_struct *new_max_eff_prio; | ||
648 | |||
649 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
650 | |||
651 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
652 | |||
653 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
654 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
655 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | ||
656 | effective_priority(mutex->hp_waiter) : NULL; | ||
657 | binheap_add(&l->nest.hp_binheap_node, | ||
658 | &tsk_rt(owner)->hp_blocked_tasks, | ||
659 | struct nested_info, hp_binheap_node); | ||
660 | |||
661 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
662 | |||
663 | if((old_max_eff_prio != new_max_eff_prio) && | ||
664 | (effective_priority(owner) == old_max_eff_prio)) | ||
665 | { | ||
666 | // Need to set new effective_priority for owner | ||
667 | |||
668 | struct task_struct *decreased_prio; | ||
669 | |||
670 | TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", | ||
671 | l->ident); | ||
672 | |||
673 | //if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
674 | if(litmus->__compare(new_max_eff_prio, BASE, owner, BASE)) { | ||
675 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
676 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
677 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
678 | owner->comm, | ||
679 | owner->pid, | ||
680 | l->ident); | ||
681 | |||
682 | decreased_prio = new_max_eff_prio; | ||
683 | } | ||
684 | else { | ||
685 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
686 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
687 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
688 | owner->comm, | ||
689 | owner->pid, | ||
690 | l->ident); | ||
691 | |||
692 | decreased_prio = NULL; | ||
693 | } | ||
694 | |||
695 | // beware: recursion | ||
696 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | ||
697 | } | ||
698 | else { | ||
699 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
700 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
701 | } | ||
702 | } | ||
703 | else { | ||
704 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | ||
705 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
706 | } | ||
707 | } | ||
708 | else { | ||
709 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
710 | |||
711 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
712 | if(still_blocked) { | ||
713 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", | ||
714 | still_blocked->ident); | ||
715 | if(still_blocked->ops->propagate_decrease_inheritance) { | ||
716 | /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
717 | we know that task 't' has not released any locks behind us in this | ||
718 | chain. propagation just needs to catch up with task 't' */ | ||
719 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, | ||
720 | t, | ||
721 | &mutex->lock, | ||
722 | irqflags); | ||
723 | } | ||
724 | else { | ||
725 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | ||
726 | still_blocked); | ||
727 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
728 | } | ||
729 | } | ||
730 | else { | ||
731 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
732 | } | ||
733 | } | ||
734 | } | ||
735 | |||
736 | |||
737 | int rsm_mutex_close(struct litmus_lock* l) | ||
738 | { | ||
739 | struct task_struct *t = current; | ||
740 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
741 | unsigned long flags; | ||
742 | |||
743 | int owner; | ||
744 | |||
745 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
746 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
747 | #endif | ||
748 | |||
749 | lock_global_irqsave(dgl_lock, flags); | ||
750 | lock_fine_irqsave(&mutex->lock, flags); | ||
751 | |||
752 | owner = (mutex->owner == t); | ||
753 | |||
754 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
755 | unlock_global_irqrestore(dgl_lock, flags); | ||
756 | |||
757 | if (owner) | ||
758 | rsm_mutex_unlock(l); | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | void rsm_mutex_free(struct litmus_lock* lock) | ||
764 | { | ||
765 | kfree(rsm_mutex_from_lock(lock)); | ||
766 | } | ||
767 | |||
768 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) | ||
769 | { | ||
770 | struct rsm_mutex* mutex; | ||
771 | |||
772 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); | ||
773 | if (!mutex) | ||
774 | return NULL; | ||
775 | |||
776 | mutex->litmus_lock.ops = ops; | ||
777 | mutex->owner = NULL; | ||
778 | mutex->hp_waiter = NULL; | ||
779 | init_waitqueue_head(&mutex->wait); | ||
780 | |||
781 | |||
782 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
783 | { | ||
784 | __raw_spin_lock_init(&mutex->lock, | ||
785 | ((struct litmus_lock*)mutex)->cheat_lockdep, | ||
786 | &((struct litmus_lock*)mutex)->key); | ||
787 | } | ||
788 | #else | ||
789 | raw_spin_lock_init(&mutex->lock); | ||
790 | #endif | ||
791 | |||
792 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; | ||
793 | |||
794 | return &mutex->litmus_lock; | ||
795 | } | ||
796 | |||