aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c141
1 files changed, 141 insertions, 0 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 200407c1502f..435685ecd068 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -496,3 +496,144 @@ int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
496 return 1; 496 return 1;
497} 497}
498EXPORT_SYMBOL(atomic_dec_and_mutex_lock); 498EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
499
500
501
502
503
504
505
506
507//__mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
508
509void mutex_lock_sfx(struct mutex *lock,
510 side_effect_t pre, unsigned long pre_arg,
511 side_effect_t post, unsigned long post_arg)
512{
513 long state = TASK_UNINTERRUPTIBLE;
514 unsigned int subclass = 0;
515 unsigned long ip = _RET_IP_;
516
517
518 struct task_struct *task = current;
519 struct mutex_waiter waiter;
520 unsigned long flags;
521
522 preempt_disable();
523 mutex_acquire(&lock->dep_map, subclass, 0, ip);
524
525 spin_lock_mutex(&lock->wait_lock, flags);
526
527 if(pre)
528 {
529 if(unlikely(pre(pre_arg)))
530 {
531 // this will fuck with lockdep's CONFIG_PROVE_LOCKING...
532 spin_unlock_mutex(&lock->wait_lock, flags);
533 preempt_enable();
534 return;
535 }
536 }
537
538 debug_mutex_lock_common(lock, &waiter);
539 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
540
541 /* add waiting tasks to the end of the waitqueue (FIFO): */
542 list_add_tail(&waiter.list, &lock->wait_list);
543 waiter.task = task;
544
545 if (atomic_xchg(&lock->count, -1) == 1)
546 goto done;
547
548 lock_contended(&lock->dep_map, ip);
549
550 for (;;) {
551 /*
552 * Lets try to take the lock again - this is needed even if
553 * we get here for the first time (shortly after failing to
554 * acquire the lock), to make sure that we get a wakeup once
555 * it's unlocked. Later on, if we sleep, this is the
556 * operation that gives us the lock. We xchg it to -1, so
557 * that when we release the lock, we properly wake up the
558 * other waiters:
559 */
560 if (atomic_xchg(&lock->count, -1) == 1)
561 break;
562
563 __set_task_state(task, state);
564
565 /* didnt get the lock, go to sleep: */
566 spin_unlock_mutex(&lock->wait_lock, flags);
567 preempt_enable_no_resched();
568 schedule();
569 preempt_disable();
570 spin_lock_mutex(&lock->wait_lock, flags);
571 }
572
573done:
574 lock_acquired(&lock->dep_map, ip);
575 /* got the lock - rejoice! */
576 mutex_remove_waiter(lock, &waiter, current_thread_info());
577 mutex_set_owner(lock);
578
579 /* set it to 0 if there are no waiters left: */
580 if (likely(list_empty(&lock->wait_list)))
581 atomic_set(&lock->count, 0);
582
583 if(post)
584 post(post_arg);
585
586 spin_unlock_mutex(&lock->wait_lock, flags);
587
588 debug_mutex_free_waiter(&waiter);
589 preempt_enable();
590
591 //return 0;
592}
593EXPORT_SYMBOL(mutex_lock_sfx);
594
595
596
597//__mutex_unlock_common_slowpath(lock_count, 1);
598
599void mutex_unlock_sfx(struct mutex *lock,
600 side_effect_t pre, unsigned long pre_arg,
601 side_effect_t post, unsigned long post_arg)
602{
603 //struct mutex *lock = container_of(lock_count, struct mutex, count);
604 unsigned long flags;
605
606 spin_lock_mutex(&lock->wait_lock, flags);
607
608 if(pre)
609 pre(pre_arg);
610
611 //mutex_release(&lock->dep_map, nested, _RET_IP_);
612 mutex_release(&lock->dep_map, 1, _RET_IP_);
613 debug_mutex_unlock(lock);
614
615 /*
616 * some architectures leave the lock unlocked in the fastpath failure
617 * case, others need to leave it locked. In the later case we have to
618 * unlock it here
619 */
620 if (__mutex_slowpath_needs_to_unlock())
621 atomic_set(&lock->count, 1);
622
623 if (!list_empty(&lock->wait_list)) {
624 /* get the first entry from the wait-list: */
625 struct mutex_waiter *waiter =
626 list_entry(lock->wait_list.next,
627 struct mutex_waiter, list);
628
629 debug_mutex_wake_waiter(lock, waiter);
630
631 wake_up_process(waiter->task);
632 }
633
634 if(post)
635 post(post_arg);
636
637 spin_unlock_mutex(&lock->wait_lock, flags);
638}
639EXPORT_SYMBOL(mutex_unlock_sfx);