diff options
| author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-13 16:18:03 -0400 |
|---|---|---|
| committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-04-13 16:18:03 -0400 |
| commit | c0667dc4894e913048cf8904f0ce9a79b481b556 (patch) | |
| tree | 1803f6f9a6de45c949f57d1172aab4aa2546393b /litmus/rsm_lock.c | |
| parent | 8eb55f8fa1a2c3854f0f77b9b8663178c0129f6c (diff) | |
Move RSM and IKGLP imp. to own .c fileswip-ikglp
Also reformated code to be slightly more
standard coding practice compliant.
Diffstat (limited to 'litmus/rsm_lock.c')
| -rw-r--r-- | litmus/rsm_lock.c | 774 |
1 files changed, 774 insertions, 0 deletions
diff --git a/litmus/rsm_lock.c b/litmus/rsm_lock.c new file mode 100644 index 000000000000..11d119210ef9 --- /dev/null +++ b/litmus/rsm_lock.c | |||
| @@ -0,0 +1,774 @@ | |||
| 1 | #include <linux/slab.h> | ||
| 2 | #include <linux/uaccess.h> | ||
| 3 | |||
| 4 | #include <litmus/trace.h> | ||
| 5 | #include <litmus/sched_plugin.h> | ||
| 6 | #include <litmus/rsm_lock.h> | ||
| 7 | |||
| 8 | #include <litmus/edf_common.h> | ||
| 9 | |||
| 10 | |||
| 11 | /* caller is responsible for locking */ | ||
| 12 | static struct task_struct* rsm_mutex_find_hp_waiter(struct rsm_mutex *mutex, | ||
| 13 | struct task_struct* skip) | ||
| 14 | { | ||
| 15 | wait_queue_t *q; | ||
| 16 | struct list_head *pos; | ||
| 17 | struct task_struct *queued = NULL, *found = NULL; | ||
| 18 | |||
| 19 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 20 | dgl_wait_state_t *dgl_wait = NULL; | ||
| 21 | #endif | ||
| 22 | |||
| 23 | list_for_each(pos, &mutex->wait.task_list) { | ||
| 24 | q = list_entry(pos, wait_queue_t, task_list); | ||
| 25 | |||
| 26 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 27 | if(q->func == dgl_wake_up) { | ||
| 28 | dgl_wait = (dgl_wait_state_t*) q->private; | ||
| 29 | if(tsk_rt(dgl_wait->task)->blocked_lock == &mutex->litmus_lock) { | ||
| 30 | queued = dgl_wait->task; | ||
| 31 | } | ||
| 32 | else { | ||
| 33 | queued = NULL; // skip it. | ||
| 34 | } | ||
| 35 | } | ||
| 36 | else { | ||
| 37 | queued = (struct task_struct*) q->private; | ||
| 38 | } | ||
| 39 | #else | ||
| 40 | queued = (struct task_struct*) q->private; | ||
| 41 | #endif | ||
| 42 | |||
| 43 | /* Compare task prios, find high prio task. */ | ||
| 44 | if (queued && queued != skip && edf_higher_prio(queued, found)) { | ||
| 45 | found = queued; | ||
| 46 | } | ||
| 47 | } | ||
| 48 | return found; | ||
| 49 | } | ||
| 50 | |||
| 51 | |||
| 52 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 53 | |||
| 54 | int rsm_mutex_is_owner(struct litmus_lock *l, struct task_struct *t) | ||
| 55 | { | ||
| 56 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 57 | return(mutex->owner == t); | ||
| 58 | } | ||
| 59 | |||
| 60 | // return 1 if resource was immediatly acquired. | ||
| 61 | // Assumes mutex->lock is held. | ||
| 62 | // Must set task state to TASK_UNINTERRUPTIBLE if task blocks. | ||
| 63 | int rsm_mutex_dgl_lock(struct litmus_lock *l, dgl_wait_state_t* dgl_wait, | ||
| 64 | wait_queue_t* wq_node) | ||
| 65 | { | ||
| 66 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 67 | struct task_struct *t = dgl_wait->task; | ||
| 68 | |||
| 69 | int acquired_immediatly = 0; | ||
| 70 | |||
| 71 | BUG_ON(t != current); | ||
| 72 | |||
| 73 | if (mutex->owner) { | ||
| 74 | TRACE_TASK(t, "Enqueuing on lock %d.\n", l->ident); | ||
| 75 | |||
| 76 | init_dgl_waitqueue_entry(wq_node, dgl_wait); | ||
| 77 | |||
| 78 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
| 79 | __add_wait_queue_tail_exclusive(&mutex->wait, wq_node); | ||
| 80 | } else { | ||
| 81 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
| 82 | |||
| 83 | /* it's ours now */ | ||
| 84 | mutex->owner = t; | ||
| 85 | |||
| 86 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
| 87 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
| 88 | struct nested_info, hp_binheap_node); | ||
| 89 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
| 90 | |||
| 91 | acquired_immediatly = 1; | ||
| 92 | } | ||
| 93 | |||
| 94 | return acquired_immediatly; | ||
| 95 | } | ||
| 96 | |||
| 97 | void rsm_mutex_enable_priority(struct litmus_lock *l, | ||
| 98 | dgl_wait_state_t* dgl_wait) | ||
| 99 | { | ||
| 100 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 101 | struct task_struct *t = dgl_wait->task; | ||
| 102 | struct task_struct *owner = mutex->owner; | ||
| 103 | unsigned long flags = 0; // these are unused under DGL coarse-grain locking | ||
| 104 | |||
| 105 | BUG_ON(owner == t); | ||
| 106 | |||
| 107 | tsk_rt(t)->blocked_lock = l; | ||
| 108 | mb(); | ||
| 109 | |||
| 110 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
| 111 | |||
| 112 | struct task_struct *old_max_eff_prio; | ||
| 113 | struct task_struct *new_max_eff_prio; | ||
| 114 | struct task_struct *new_prio = NULL; | ||
| 115 | |||
| 116 | if(mutex->hp_waiter) | ||
| 117 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
| 118 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
| 119 | else | ||
| 120 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
| 121 | |||
| 122 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 123 | |||
| 124 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 125 | mutex->hp_waiter = t; | ||
| 126 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
| 127 | binheap_decrease(&l->nest.hp_binheap_node, | ||
| 128 | &tsk_rt(owner)->hp_blocked_tasks); | ||
| 129 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 130 | |||
| 131 | if(new_max_eff_prio != old_max_eff_prio) { | ||
| 132 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
| 133 | |||
| 134 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
| 135 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))){ | ||
| 136 | new_prio = new_max_eff_prio; | ||
| 137 | } | ||
| 138 | } | ||
| 139 | else { | ||
| 140 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
| 141 | } | ||
| 142 | |||
| 143 | if(new_prio) { | ||
| 144 | litmus->nested_increase_prio(owner, new_prio, | ||
| 145 | &mutex->lock, flags); // unlocks lock. | ||
| 146 | } | ||
| 147 | else { | ||
| 148 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 149 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 150 | } | ||
| 151 | } | ||
| 152 | else { | ||
| 153 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
| 154 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 155 | } | ||
| 156 | } | ||
| 157 | |||
| 158 | static void select_next_lock_if_primary(struct litmus_lock *l, | ||
| 159 | dgl_wait_state_t *dgl_wait) | ||
| 160 | { | ||
| 161 | if(tsk_rt(dgl_wait->task)->blocked_lock == l) { | ||
| 162 | TRACE_CUR("Lock %d in DGL was primary for %s/%d.\n", | ||
| 163 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
| 164 | tsk_rt(dgl_wait->task)->blocked_lock = NULL; | ||
| 165 | mb(); | ||
| 166 | select_next_lock(dgl_wait /*, l*/); // pick the next lock to be blocked on | ||
| 167 | } | ||
| 168 | else { | ||
| 169 | TRACE_CUR("Got lock early! Lock %d in DGL was NOT primary for %s/%d.\n", | ||
| 170 | l->ident, dgl_wait->task->comm, dgl_wait->task->pid); | ||
| 171 | } | ||
| 172 | } | ||
| 173 | #endif | ||
| 174 | |||
| 175 | |||
| 176 | |||
| 177 | |||
| 178 | int rsm_mutex_lock(struct litmus_lock* l) | ||
| 179 | { | ||
| 180 | struct task_struct *t = current; | ||
| 181 | struct task_struct *owner; | ||
| 182 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 183 | wait_queue_t wait; | ||
| 184 | unsigned long flags; | ||
| 185 | |||
| 186 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 187 | raw_spinlock_t *dgl_lock; | ||
| 188 | #endif | ||
| 189 | |||
| 190 | if (!is_realtime(t)) | ||
| 191 | return -EPERM; | ||
| 192 | |||
| 193 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 194 | dgl_lock = litmus->get_dgl_spinlock(t); | ||
| 195 | #endif | ||
| 196 | |||
| 197 | lock_global_irqsave(dgl_lock, flags); | ||
| 198 | lock_fine_irqsave(&mutex->lock, flags); | ||
| 199 | |||
| 200 | if (mutex->owner) { | ||
| 201 | TRACE_TASK(t, "Blocking on lock %d.\n", l->ident); | ||
| 202 | |||
| 203 | /* resource is not free => must suspend and wait */ | ||
| 204 | |||
| 205 | owner = mutex->owner; | ||
| 206 | |||
| 207 | init_waitqueue_entry(&wait, t); | ||
| 208 | |||
| 209 | tsk_rt(t)->blocked_lock = l; /* record where we are blocked */ | ||
| 210 | mb(); // needed? | ||
| 211 | |||
| 212 | /* FIXME: interruptible would be nice some day */ | ||
| 213 | set_task_state(t, TASK_UNINTERRUPTIBLE); | ||
| 214 | |||
| 215 | __add_wait_queue_tail_exclusive(&mutex->wait, &wait); | ||
| 216 | |||
| 217 | /* check if we need to activate priority inheritance */ | ||
| 218 | if (edf_higher_prio(t, mutex->hp_waiter)) { | ||
| 219 | |||
| 220 | struct task_struct *old_max_eff_prio; | ||
| 221 | struct task_struct *new_max_eff_prio; | ||
| 222 | struct task_struct *new_prio = NULL; | ||
| 223 | |||
| 224 | if(mutex->hp_waiter) | ||
| 225 | TRACE_TASK(t, "has higher prio than hp_waiter (%s/%d).\n", | ||
| 226 | mutex->hp_waiter->comm, mutex->hp_waiter->pid); | ||
| 227 | else | ||
| 228 | TRACE_TASK(t, "has higher prio than hp_waiter (NIL).\n"); | ||
| 229 | |||
| 230 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 231 | |||
| 232 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 233 | mutex->hp_waiter = t; | ||
| 234 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
| 235 | binheap_decrease(&l->nest.hp_binheap_node, | ||
| 236 | &tsk_rt(owner)->hp_blocked_tasks); | ||
| 237 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 238 | |||
| 239 | if(new_max_eff_prio != old_max_eff_prio) { | ||
| 240 | TRACE_TASK(t, "is new hp_waiter.\n"); | ||
| 241 | |||
| 242 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
| 243 | (__edf_higher_prio(new_max_eff_prio, BASE, | ||
| 244 | owner, EFFECTIVE))){ | ||
| 245 | new_prio = new_max_eff_prio; | ||
| 246 | } | ||
| 247 | } | ||
| 248 | else { | ||
| 249 | TRACE_TASK(t, "no change in max_eff_prio of heap.\n"); | ||
| 250 | } | ||
| 251 | |||
| 252 | if(new_prio) { | ||
| 253 | litmus->nested_increase_prio(owner, new_prio, &mutex->lock, | ||
| 254 | flags); // unlocks lock. | ||
| 255 | } | ||
| 256 | else { | ||
| 257 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 258 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 259 | } | ||
| 260 | } | ||
| 261 | else { | ||
| 262 | TRACE_TASK(t, "no change in hp_waiter.\n"); | ||
| 263 | |||
| 264 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 265 | } | ||
| 266 | |||
| 267 | unlock_global_irqrestore(dgl_lock, flags); | ||
| 268 | |||
| 269 | TS_LOCK_SUSPEND; | ||
| 270 | |||
| 271 | /* We depend on the FIFO order. Thus, we don't need to recheck | ||
| 272 | * when we wake up; we are guaranteed to have the lock since | ||
| 273 | * there is only one wake up per release. | ||
| 274 | */ | ||
| 275 | |||
| 276 | schedule(); | ||
| 277 | |||
| 278 | TS_LOCK_RESUME; | ||
| 279 | |||
| 280 | /* Since we hold the lock, no other task will change | ||
| 281 | * ->owner. We can thus check it without acquiring the spin | ||
| 282 | * lock. */ | ||
| 283 | BUG_ON(mutex->owner != t); | ||
| 284 | |||
| 285 | TRACE_TASK(t, "Acquired lock %d.\n", l->ident); | ||
| 286 | |||
| 287 | } else { | ||
| 288 | TRACE_TASK(t, "Acquired lock %d with no blocking.\n", l->ident); | ||
| 289 | |||
| 290 | /* it's ours now */ | ||
| 291 | mutex->owner = t; | ||
| 292 | |||
| 293 | raw_spin_lock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
| 294 | binheap_add(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks, | ||
| 295 | struct nested_info, hp_binheap_node); | ||
| 296 | raw_spin_unlock(&tsk_rt(mutex->owner)->hp_blocked_tasks_lock); | ||
| 297 | |||
| 298 | |||
| 299 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 300 | unlock_global_irqrestore(dgl_lock, flags); | ||
| 301 | } | ||
| 302 | |||
| 303 | return 0; | ||
| 304 | } | ||
| 305 | |||
| 306 | |||
| 307 | |||
| 308 | int rsm_mutex_unlock(struct litmus_lock* l) | ||
| 309 | { | ||
| 310 | struct task_struct *t = current, *next = NULL; | ||
| 311 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 312 | unsigned long flags; | ||
| 313 | |||
| 314 | struct task_struct *old_max_eff_prio; | ||
| 315 | |||
| 316 | int wake_up_task = 1; | ||
| 317 | |||
| 318 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 319 | dgl_wait_state_t *dgl_wait = NULL; | ||
| 320 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
| 321 | #endif | ||
| 322 | |||
| 323 | int err = 0; | ||
| 324 | |||
| 325 | lock_global_irqsave(dgl_lock, flags); | ||
| 326 | lock_fine_irqsave(&mutex->lock, flags); | ||
| 327 | |||
| 328 | |||
| 329 | if (mutex->owner != t) { | ||
| 330 | err = -EINVAL; | ||
| 331 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 332 | unlock_global_irqrestore(dgl_lock, flags); | ||
| 333 | return err; | ||
| 334 | } | ||
| 335 | |||
| 336 | |||
| 337 | raw_spin_lock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
| 338 | |||
| 339 | TRACE_TASK(t, "Freeing lock %d\n", l->ident); | ||
| 340 | |||
| 341 | old_max_eff_prio = top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
| 342 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(t)->hp_blocked_tasks); | ||
| 343 | |||
| 344 | if(tsk_rt(t)->inh_task){ | ||
| 345 | struct task_struct *new_max_eff_prio = | ||
| 346 | top_priority(&tsk_rt(t)->hp_blocked_tasks); | ||
| 347 | |||
| 348 | if((new_max_eff_prio == NULL) || | ||
| 349 | /* there was a change in eff prio */ | ||
| 350 | ( (new_max_eff_prio != old_max_eff_prio) && | ||
| 351 | /* and owner had the old eff prio */ | ||
| 352 | (effective_priority(t) == old_max_eff_prio)) ) | ||
| 353 | { | ||
| 354 | // old_max_eff_prio > new_max_eff_prio | ||
| 355 | |||
| 356 | if(__edf_higher_prio(new_max_eff_prio, BASE, t, EFFECTIVE)) { | ||
| 357 | TRACE_TASK(t, "new_max_eff_prio > task's eff_prio-- new_max_eff_prio: %s/%d task: %s/%d [%s/%d]\n", | ||
| 358 | new_max_eff_prio->comm, new_max_eff_prio->pid, | ||
| 359 | t->comm, t->pid, tsk_rt(t)->inh_task->comm, | ||
| 360 | tsk_rt(t)->inh_task->pid); | ||
| 361 | WARN_ON(1); | ||
| 362 | } | ||
| 363 | |||
| 364 | litmus->decrease_prio(t, new_max_eff_prio); | ||
| 365 | } | ||
| 366 | } | ||
| 367 | |||
| 368 | if(binheap_empty(&tsk_rt(t)->hp_blocked_tasks) && | ||
| 369 | tsk_rt(t)->inh_task != NULL) | ||
| 370 | { | ||
| 371 | WARN_ON(tsk_rt(t)->inh_task != NULL); | ||
| 372 | TRACE_TASK(t, "No more locks are held, but eff_prio = %s/%d\n", | ||
| 373 | tsk_rt(t)->inh_task->comm, tsk_rt(t)->inh_task->pid); | ||
| 374 | } | ||
| 375 | |||
| 376 | raw_spin_unlock(&tsk_rt(t)->hp_blocked_tasks_lock); | ||
| 377 | |||
| 378 | |||
| 379 | /* check if there are jobs waiting for this resource */ | ||
| 380 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 381 | __waitqueue_dgl_remove_first(&mutex->wait, &dgl_wait, &next); | ||
| 382 | if(dgl_wait) { | ||
| 383 | next = dgl_wait->task; | ||
| 384 | //select_next_lock_if_primary(l, dgl_wait); | ||
| 385 | } | ||
| 386 | #else | ||
| 387 | next = __waitqueue_remove_first(&mutex->wait); | ||
| 388 | #endif | ||
| 389 | if (next) { | ||
| 390 | /* next becomes the resouce holder */ | ||
| 391 | mutex->owner = next; | ||
| 392 | TRACE_CUR("lock ownership passed to %s/%d\n", next->comm, next->pid); | ||
| 393 | |||
| 394 | /* determine new hp_waiter if necessary */ | ||
| 395 | if (next == mutex->hp_waiter) { | ||
| 396 | |||
| 397 | TRACE_TASK(next, "was highest-prio waiter\n"); | ||
| 398 | /* next has the highest priority --- it doesn't need to | ||
| 399 | * inherit. However, we need to make sure that the | ||
| 400 | * next-highest priority in the queue is reflected in | ||
| 401 | * hp_waiter. */ | ||
| 402 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, next); | ||
| 403 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | ||
| 404 | effective_priority(mutex->hp_waiter) : | ||
| 405 | NULL; | ||
| 406 | |||
| 407 | if (mutex->hp_waiter) | ||
| 408 | TRACE_TASK(mutex->hp_waiter, "is new highest-prio waiter\n"); | ||
| 409 | else | ||
| 410 | TRACE("no further waiters\n"); | ||
| 411 | |||
| 412 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
| 413 | |||
| 414 | binheap_add(&l->nest.hp_binheap_node, | ||
| 415 | &tsk_rt(next)->hp_blocked_tasks, | ||
| 416 | struct nested_info, hp_binheap_node); | ||
| 417 | |||
| 418 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 419 | if(dgl_wait) { | ||
| 420 | select_next_lock_if_primary(l, dgl_wait); | ||
| 421 | //wake_up_task = atomic_dec_and_test(&dgl_wait->nr_remaining); | ||
| 422 | --(dgl_wait->nr_remaining); | ||
| 423 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
| 424 | } | ||
| 425 | #endif | ||
| 426 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
| 427 | } | ||
| 428 | else { | ||
| 429 | /* Well, if 'next' is not the highest-priority waiter, | ||
| 430 | * then it (probably) ought to inherit the highest-priority | ||
| 431 | * waiter's priority. */ | ||
| 432 | TRACE_TASK(next, "is not hp_waiter of lock %d.\n", l->ident); | ||
| 433 | |||
| 434 | raw_spin_lock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
| 435 | |||
| 436 | binheap_add(&l->nest.hp_binheap_node, | ||
| 437 | &tsk_rt(next)->hp_blocked_tasks, | ||
| 438 | struct nested_info, hp_binheap_node); | ||
| 439 | |||
| 440 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 441 | if(dgl_wait) { | ||
| 442 | select_next_lock_if_primary(l, dgl_wait); | ||
| 443 | --(dgl_wait->nr_remaining); | ||
| 444 | wake_up_task = (dgl_wait->nr_remaining == 0); | ||
| 445 | } | ||
| 446 | #endif | ||
| 447 | |||
| 448 | /* It is possible that 'next' *should* be the hp_waiter, but isn't | ||
| 449 | * because that update hasn't yet executed (update operation is | ||
| 450 | * probably blocked on mutex->lock). So only inherit if the top of | ||
| 451 | * 'next's top heap node is indeed the effective prio. of hp_waiter. | ||
| 452 | * (We use l->hp_waiter_eff_prio instead of effective_priority(hp_waiter) | ||
| 453 | * since the effective priority of hp_waiter can change (and the | ||
| 454 | * update has not made it to this lock).) | ||
| 455 | */ | ||
| 456 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 457 | if((l->nest.hp_waiter_eff_prio != NULL) && | ||
| 458 | (top_priority(&tsk_rt(next)->hp_blocked_tasks) == | ||
| 459 | l->nest.hp_waiter_eff_prio)) | ||
| 460 | { | ||
| 461 | if(dgl_wait && tsk_rt(next)->blocked_lock) { | ||
| 462 | BUG_ON(wake_up_task); | ||
| 463 | if(__edf_higher_prio(l->nest.hp_waiter_eff_prio, BASE, | ||
| 464 | next, EFFECTIVE)) { | ||
| 465 | litmus->nested_increase_prio(next, | ||
| 466 | l->nest.hp_waiter_eff_prio, &mutex->lock, flags); // unlocks lock && hp_blocked_tasks_lock. | ||
| 467 | goto out; // all spinlocks are released. bail out now. | ||
| 468 | } | ||
| 469 | } | ||
| 470 | else { | ||
| 471 | litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); | ||
| 472 | } | ||
| 473 | } | ||
| 474 | |||
| 475 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
| 476 | #else | ||
| 477 | if(likely(top_priority(&tsk_rt(next)->hp_blocked_tasks) == | ||
| 478 | l->nest.hp_waiter_eff_prio)) | ||
| 479 | { | ||
| 480 | litmus->increase_prio(next, l->nest.hp_waiter_eff_prio); | ||
| 481 | } | ||
| 482 | raw_spin_unlock(&tsk_rt(next)->hp_blocked_tasks_lock); | ||
| 483 | #endif | ||
| 484 | } | ||
| 485 | |||
| 486 | if(wake_up_task) { | ||
| 487 | TRACE_TASK(next, "waking up since it is no longer blocked.\n"); | ||
| 488 | |||
| 489 | tsk_rt(next)->blocked_lock = NULL; | ||
| 490 | mb(); | ||
| 491 | |||
| 492 | wake_up_process(next); | ||
| 493 | } | ||
| 494 | else { | ||
| 495 | TRACE_TASK(next, "is still blocked.\n"); | ||
| 496 | } | ||
| 497 | } | ||
| 498 | else { | ||
| 499 | /* becomes available */ | ||
| 500 | mutex->owner = NULL; | ||
| 501 | } | ||
| 502 | |||
| 503 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 504 | |||
| 505 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 506 | out: | ||
| 507 | #endif | ||
| 508 | unlock_global_irqrestore(dgl_lock, flags); | ||
| 509 | |||
| 510 | return err; | ||
| 511 | } | ||
| 512 | |||
| 513 | |||
| 514 | void rsm_mutex_propagate_increase_inheritance(struct litmus_lock* l, | ||
| 515 | struct task_struct* t, | ||
| 516 | raw_spinlock_t* to_unlock, | ||
| 517 | unsigned long irqflags) | ||
| 518 | { | ||
| 519 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 520 | |||
| 521 | // relay-style locking | ||
| 522 | lock_fine(&mutex->lock); | ||
| 523 | unlock_fine(to_unlock); | ||
| 524 | |||
| 525 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
| 526 | struct task_struct *owner = mutex->owner; | ||
| 527 | |||
| 528 | struct task_struct *old_max_eff_prio; | ||
| 529 | struct task_struct *new_max_eff_prio; | ||
| 530 | |||
| 531 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 532 | |||
| 533 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 534 | |||
| 535 | if((t != mutex->hp_waiter) && edf_higher_prio(t, mutex->hp_waiter)) { | ||
| 536 | TRACE_TASK(t, "is new highest-prio waiter by propagation.\n"); | ||
| 537 | mutex->hp_waiter = t; | ||
| 538 | } | ||
| 539 | if(t == mutex->hp_waiter) { | ||
| 540 | // reflect the decreased priority in the heap node. | ||
| 541 | l->nest.hp_waiter_eff_prio = effective_priority(mutex->hp_waiter); | ||
| 542 | |||
| 543 | BUG_ON(!binheap_is_in_heap(&l->nest.hp_binheap_node)); | ||
| 544 | BUG_ON(!binheap_is_in_this_heap(&l->nest.hp_binheap_node, | ||
| 545 | &tsk_rt(owner)->hp_blocked_tasks)); | ||
| 546 | |||
| 547 | binheap_decrease(&l->nest.hp_binheap_node, | ||
| 548 | &tsk_rt(owner)->hp_blocked_tasks); | ||
| 549 | } | ||
| 550 | |||
| 551 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 552 | |||
| 553 | |||
| 554 | if(new_max_eff_prio != old_max_eff_prio) { | ||
| 555 | // new_max_eff_prio > old_max_eff_prio holds. | ||
| 556 | if ((effective_priority(owner) == old_max_eff_prio) || | ||
| 557 | (__edf_higher_prio(new_max_eff_prio, BASE, owner, EFFECTIVE))) { | ||
| 558 | |||
| 559 | TRACE_CUR("Propagating inheritance to holder of lock %d.\n", | ||
| 560 | l->ident); | ||
| 561 | |||
| 562 | // beware: recursion | ||
| 563 | litmus->nested_increase_prio(owner, new_max_eff_prio, | ||
| 564 | &mutex->lock, irqflags); // unlocks mutex->lock | ||
| 565 | } | ||
| 566 | else { | ||
| 567 | TRACE_CUR("Lower priority than holder %s/%d. No propagation.\n", | ||
| 568 | owner->comm, owner->pid); | ||
| 569 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 570 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 571 | } | ||
| 572 | } | ||
| 573 | else { | ||
| 574 | TRACE_TASK(mutex->owner, "No change in maxiumum effective priority.\n"); | ||
| 575 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 576 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 577 | } | ||
| 578 | } | ||
| 579 | else { | ||
| 580 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
| 581 | |||
| 582 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
| 583 | if(still_blocked) { | ||
| 584 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", | ||
| 585 | still_blocked->ident); | ||
| 586 | if(still_blocked->ops->propagate_increase_inheritance) { | ||
| 587 | /* due to relay-style nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
| 588 | we know that task 't' has not released any locks behind us in this | ||
| 589 | chain. Propagation just needs to catch up with task 't'. */ | ||
| 590 | still_blocked->ops->propagate_increase_inheritance(still_blocked, | ||
| 591 | t, | ||
| 592 | &mutex->lock, | ||
| 593 | irqflags); | ||
| 594 | } | ||
| 595 | else { | ||
| 596 | TRACE_TASK(t, | ||
| 597 | "Inheritor is blocked on lock (%p) that does not " | ||
| 598 | "support nesting!\n", | ||
| 599 | still_blocked); | ||
| 600 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 601 | } | ||
| 602 | } | ||
| 603 | else { | ||
| 604 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 605 | } | ||
| 606 | } | ||
| 607 | } | ||
| 608 | |||
| 609 | |||
| 610 | void rsm_mutex_propagate_decrease_inheritance(struct litmus_lock* l, | ||
| 611 | struct task_struct* t, | ||
| 612 | raw_spinlock_t* to_unlock, | ||
| 613 | unsigned long irqflags) | ||
| 614 | { | ||
| 615 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 616 | |||
| 617 | // relay-style locking | ||
| 618 | lock_fine(&mutex->lock); | ||
| 619 | unlock_fine(to_unlock); | ||
| 620 | |||
| 621 | if(tsk_rt(t)->blocked_lock == l) { // prevent race on tsk_rt(t)->blocked | ||
| 622 | if(t == mutex->hp_waiter) { | ||
| 623 | struct task_struct *owner = mutex->owner; | ||
| 624 | |||
| 625 | struct task_struct *old_max_eff_prio; | ||
| 626 | struct task_struct *new_max_eff_prio; | ||
| 627 | |||
| 628 | raw_spin_lock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 629 | |||
| 630 | old_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 631 | |||
| 632 | binheap_delete(&l->nest.hp_binheap_node, &tsk_rt(owner)->hp_blocked_tasks); | ||
| 633 | mutex->hp_waiter = rsm_mutex_find_hp_waiter(mutex, NULL); | ||
| 634 | l->nest.hp_waiter_eff_prio = (mutex->hp_waiter) ? | ||
| 635 | effective_priority(mutex->hp_waiter) : NULL; | ||
| 636 | binheap_add(&l->nest.hp_binheap_node, | ||
| 637 | &tsk_rt(owner)->hp_blocked_tasks, | ||
| 638 | struct nested_info, hp_binheap_node); | ||
| 639 | |||
| 640 | new_max_eff_prio = top_priority(&tsk_rt(owner)->hp_blocked_tasks); | ||
| 641 | |||
| 642 | if((old_max_eff_prio != new_max_eff_prio) && | ||
| 643 | (effective_priority(owner) == old_max_eff_prio)) | ||
| 644 | { | ||
| 645 | // Need to set new effective_priority for owner | ||
| 646 | |||
| 647 | struct task_struct *decreased_prio; | ||
| 648 | |||
| 649 | TRACE_CUR("Propagating decreased inheritance to holder of lock %d.\n", | ||
| 650 | l->ident); | ||
| 651 | |||
| 652 | if(__edf_higher_prio(new_max_eff_prio, BASE, owner, BASE)) { | ||
| 653 | TRACE_CUR("%s/%d has greater base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
| 654 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
| 655 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
| 656 | owner->comm, | ||
| 657 | owner->pid, | ||
| 658 | l->ident); | ||
| 659 | |||
| 660 | decreased_prio = new_max_eff_prio; | ||
| 661 | } | ||
| 662 | else { | ||
| 663 | TRACE_CUR("%s/%d has lesser base priority than base priority of owner (%s/%d) of lock %d.\n", | ||
| 664 | (new_max_eff_prio) ? new_max_eff_prio->comm : "nil", | ||
| 665 | (new_max_eff_prio) ? new_max_eff_prio->pid : -1, | ||
| 666 | owner->comm, | ||
| 667 | owner->pid, | ||
| 668 | l->ident); | ||
| 669 | |||
| 670 | decreased_prio = NULL; | ||
| 671 | } | ||
| 672 | |||
| 673 | // beware: recursion | ||
| 674 | litmus->nested_decrease_prio(owner, decreased_prio, &mutex->lock, irqflags); // will unlock mutex->lock | ||
| 675 | } | ||
| 676 | else { | ||
| 677 | raw_spin_unlock(&tsk_rt(owner)->hp_blocked_tasks_lock); | ||
| 678 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 679 | } | ||
| 680 | } | ||
| 681 | else { | ||
| 682 | TRACE_TASK(t, "is not hp_waiter. No propagation.\n"); | ||
| 683 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 684 | } | ||
| 685 | } | ||
| 686 | else { | ||
| 687 | struct litmus_lock *still_blocked = tsk_rt(t)->blocked_lock; | ||
| 688 | |||
| 689 | TRACE_TASK(t, "is not blocked on lock %d.\n", l->ident); | ||
| 690 | if(still_blocked) { | ||
| 691 | TRACE_TASK(t, "is still blocked on a lock though (lock %d).\n", | ||
| 692 | still_blocked->ident); | ||
| 693 | if(still_blocked->ops->propagate_decrease_inheritance) { | ||
| 694 | /* due to linked nesting of spinlocks (acq. A, acq. B, free A, free B) | ||
| 695 | we know that task 't' has not released any locks behind us in this | ||
| 696 | chain. propagation just needs to catch up with task 't' */ | ||
| 697 | still_blocked->ops->propagate_decrease_inheritance(still_blocked, | ||
| 698 | t, | ||
| 699 | &mutex->lock, | ||
| 700 | irqflags); | ||
| 701 | } | ||
| 702 | else { | ||
| 703 | TRACE_TASK(t, "Inheritor is blocked on lock (%p) that does not support nesting!\n", | ||
| 704 | still_blocked); | ||
| 705 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 706 | } | ||
| 707 | } | ||
| 708 | else { | ||
| 709 | unlock_fine_irqrestore(&mutex->lock, irqflags); | ||
| 710 | } | ||
| 711 | } | ||
| 712 | } | ||
| 713 | |||
| 714 | |||
| 715 | int rsm_mutex_close(struct litmus_lock* l) | ||
| 716 | { | ||
| 717 | struct task_struct *t = current; | ||
| 718 | struct rsm_mutex *mutex = rsm_mutex_from_lock(l); | ||
| 719 | unsigned long flags; | ||
| 720 | |||
| 721 | int owner; | ||
| 722 | |||
| 723 | #ifdef CONFIG_LITMUS_DGL_SUPPORT | ||
| 724 | raw_spinlock_t *dgl_lock = litmus->get_dgl_spinlock(t); | ||
| 725 | #endif | ||
| 726 | |||
| 727 | lock_global_irqsave(dgl_lock, flags); | ||
| 728 | lock_fine_irqsave(&mutex->lock, flags); | ||
| 729 | |||
| 730 | owner = (mutex->owner == t); | ||
| 731 | |||
| 732 | unlock_fine_irqrestore(&mutex->lock, flags); | ||
| 733 | unlock_global_irqrestore(dgl_lock, flags); | ||
| 734 | |||
| 735 | if (owner) | ||
| 736 | rsm_mutex_unlock(l); | ||
| 737 | |||
| 738 | return 0; | ||
| 739 | } | ||
| 740 | |||
| 741 | void rsm_mutex_free(struct litmus_lock* lock) | ||
| 742 | { | ||
| 743 | kfree(rsm_mutex_from_lock(lock)); | ||
| 744 | } | ||
| 745 | |||
| 746 | struct litmus_lock* rsm_mutex_new(struct litmus_lock_ops* ops) | ||
| 747 | { | ||
| 748 | struct rsm_mutex* mutex; | ||
| 749 | |||
| 750 | mutex = kmalloc(sizeof(*mutex), GFP_KERNEL); | ||
| 751 | if (!mutex) | ||
| 752 | return NULL; | ||
| 753 | |||
| 754 | mutex->litmus_lock.ops = ops; | ||
| 755 | mutex->owner = NULL; | ||
| 756 | mutex->hp_waiter = NULL; | ||
| 757 | init_waitqueue_head(&mutex->wait); | ||
| 758 | |||
| 759 | |||
| 760 | #ifdef CONFIG_DEBUG_SPINLOCK | ||
| 761 | { | ||
| 762 | __raw_spin_lock_init(&mutex->lock, | ||
| 763 | ((struct litmus_lock*)mutex)->cheat_lockdep, | ||
| 764 | &((struct litmus_lock*)mutex)->key); | ||
| 765 | } | ||
| 766 | #else | ||
| 767 | raw_spin_lock_init(&mutex->lock); | ||
| 768 | #endif | ||
| 769 | |||
| 770 | ((struct litmus_lock*)mutex)->nest.hp_waiter_ptr = &mutex->hp_waiter; | ||
| 771 | |||
| 772 | return &mutex->litmus_lock; | ||
| 773 | } | ||
| 774 | |||
