diff options
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 125 |
1 files changed, 125 insertions, 0 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index d607ed5dd441..2f363b9bfc1f 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -498,3 +498,128 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) | |||
498 | return 1; | 498 | return 1; |
499 | } | 499 | } |
500 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); | 500 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |
501 | |||
502 | |||
503 | |||
504 | |||
505 | void mutex_lock_sfx(struct mutex *lock, | ||
506 | side_effect_t pre, unsigned long pre_arg, | ||
507 | side_effect_t post, unsigned long post_arg) | ||
508 | { | ||
509 | long state = TASK_UNINTERRUPTIBLE; | ||
510 | |||
511 | struct task_struct *task = current; | ||
512 | struct mutex_waiter waiter; | ||
513 | unsigned long flags; | ||
514 | |||
515 | preempt_disable(); | ||
516 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
517 | |||
518 | spin_lock_mutex(&lock->wait_lock, flags); | ||
519 | |||
520 | if(pre) | ||
521 | { | ||
522 | if(unlikely(pre(pre_arg))) | ||
523 | { | ||
524 | // this will fuck with lockdep's CONFIG_PROVE_LOCKING... | ||
525 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
526 | preempt_enable(); | ||
527 | return; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | debug_mutex_lock_common(lock, &waiter); | ||
532 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | ||
533 | |||
534 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | ||
535 | list_add_tail(&waiter.list, &lock->wait_list); | ||
536 | waiter.task = task; | ||
537 | |||
538 | if (atomic_xchg(&lock->count, -1) == 1) | ||
539 | goto done; | ||
540 | |||
541 | lock_contended(&lock->dep_map, ip); | ||
542 | |||
543 | for (;;) { | ||
544 | /* | ||
545 | * Lets try to take the lock again - this is needed even if | ||
546 | * we get here for the first time (shortly after failing to | ||
547 | * acquire the lock), to make sure that we get a wakeup once | ||
548 | * it's unlocked. Later on, if we sleep, this is the | ||
549 | * operation that gives us the lock. We xchg it to -1, so | ||
550 | * that when we release the lock, we properly wake up the | ||
551 | * other waiters: | ||
552 | */ | ||
553 | if (atomic_xchg(&lock->count, -1) == 1) | ||
554 | break; | ||
555 | |||
556 | __set_task_state(task, state); | ||
557 | |||
558 | /* didnt get the lock, go to sleep: */ | ||
559 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
560 | preempt_enable_no_resched(); | ||
561 | schedule(); | ||
562 | preempt_disable(); | ||
563 | spin_lock_mutex(&lock->wait_lock, flags); | ||
564 | } | ||
565 | |||
566 | done: | ||
567 | lock_acquired(&lock->dep_map, ip); | ||
568 | /* got the lock - rejoice! */ | ||
569 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | ||
570 | mutex_set_owner(lock); | ||
571 | |||
572 | /* set it to 0 if there are no waiters left: */ | ||
573 | if (likely(list_empty(&lock->wait_list))) | ||
574 | atomic_set(&lock->count, 0); | ||
575 | |||
576 | if(post) | ||
577 | post(post_arg); | ||
578 | |||
579 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
580 | |||
581 | debug_mutex_free_waiter(&waiter); | ||
582 | preempt_enable(); | ||
583 | } | ||
584 | EXPORT_SYMBOL(mutex_lock_sfx); | ||
585 | |||
586 | void mutex_unlock_sfx(struct mutex *lock, | ||
587 | side_effect_t pre, unsigned long pre_arg, | ||
588 | side_effect_t post, unsigned long post_arg) | ||
589 | { | ||
590 | unsigned long flags; | ||
591 | |||
592 | spin_lock_mutex(&lock->wait_lock, flags); | ||
593 | |||
594 | if(pre) | ||
595 | pre(pre_arg); | ||
596 | |||
597 | //mutex_release(&lock->dep_map, nested, _RET_IP_); | ||
598 | mutex_release(&lock->dep_map, 1, _RET_IP_); | ||
599 | debug_mutex_unlock(lock); | ||
600 | |||
601 | /* | ||
602 | * some architectures leave the lock unlocked in the fastpath failure | ||
603 | * case, others need to leave it locked. In the later case we have to | ||
604 | * unlock it here | ||
605 | */ | ||
606 | if (__mutex_slowpath_needs_to_unlock()) | ||
607 | atomic_set(&lock->count, 1); | ||
608 | |||
609 | if (!list_empty(&lock->wait_list)) { | ||
610 | /* get the first entry from the wait-list: */ | ||
611 | struct mutex_waiter *waiter = | ||
612 | list_entry(lock->wait_list.next, | ||
613 | struct mutex_waiter, list); | ||
614 | |||
615 | debug_mutex_wake_waiter(lock, waiter); | ||
616 | |||
617 | wake_up_process(waiter->task); | ||
618 | } | ||
619 | |||
620 | if(post) | ||
621 | post(post_arg); | ||
622 | |||
623 | spin_unlock_mutex(&lock->wait_lock, flags); | ||
624 | } | ||
625 | EXPORT_SYMBOL(mutex_unlock_sfx); | ||