diff options
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r-- | kernel/mutex.c | 48 |
1 files changed, 24 insertions, 24 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c index 2f363b9bfc1f..96bcecd385d3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -511,12 +511,12 @@ void mutex_lock_sfx(struct mutex *lock, | |||
511 | struct task_struct *task = current; | 511 | struct task_struct *task = current; |
512 | struct mutex_waiter waiter; | 512 | struct mutex_waiter waiter; |
513 | unsigned long flags; | 513 | unsigned long flags; |
514 | 514 | ||
515 | preempt_disable(); | 515 | preempt_disable(); |
516 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | 516 | mutex_acquire(&lock->dep_map, subclass, 0, ip); |
517 | 517 | ||
518 | spin_lock_mutex(&lock->wait_lock, flags); | 518 | spin_lock_mutex(&lock->wait_lock, flags); |
519 | 519 | ||
520 | if(pre) | 520 | if(pre) |
521 | { | 521 | { |
522 | if(unlikely(pre(pre_arg))) | 522 | if(unlikely(pre(pre_arg))) |
@@ -530,16 +530,16 @@ void mutex_lock_sfx(struct mutex *lock, | |||
530 | 530 | ||
531 | debug_mutex_lock_common(lock, &waiter); | 531 | debug_mutex_lock_common(lock, &waiter); |
532 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 532 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
533 | 533 | ||
534 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 534 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
535 | list_add_tail(&waiter.list, &lock->wait_list); | 535 | list_add_tail(&waiter.list, &lock->wait_list); |
536 | waiter.task = task; | 536 | waiter.task = task; |
537 | 537 | ||
538 | if (atomic_xchg(&lock->count, -1) == 1) | 538 | if (atomic_xchg(&lock->count, -1) == 1) |
539 | goto done; | 539 | goto done; |
540 | 540 | ||
541 | lock_contended(&lock->dep_map, ip); | 541 | lock_contended(&lock->dep_map, ip); |
542 | 542 | ||
543 | for (;;) { | 543 | for (;;) { |
544 | /* | 544 | /* |
545 | * Lets try to take the lock again - this is needed even if | 545 | * Lets try to take the lock again - this is needed even if |
@@ -552,9 +552,9 @@ void mutex_lock_sfx(struct mutex *lock, | |||
552 | */ | 552 | */ |
553 | if (atomic_xchg(&lock->count, -1) == 1) | 553 | if (atomic_xchg(&lock->count, -1) == 1) |
554 | break; | 554 | break; |
555 | 555 | ||
556 | __set_task_state(task, state); | 556 | __set_task_state(task, state); |
557 | 557 | ||
558 | /* didnt get the lock, go to sleep: */ | 558 | /* didnt get the lock, go to sleep: */ |
559 | spin_unlock_mutex(&lock->wait_lock, flags); | 559 | spin_unlock_mutex(&lock->wait_lock, flags); |
560 | preempt_enable_no_resched(); | 560 | preempt_enable_no_resched(); |
@@ -562,22 +562,22 @@ void mutex_lock_sfx(struct mutex *lock, | |||
562 | preempt_disable(); | 562 | preempt_disable(); |
563 | spin_lock_mutex(&lock->wait_lock, flags); | 563 | spin_lock_mutex(&lock->wait_lock, flags); |
564 | } | 564 | } |
565 | 565 | ||
566 | done: | 566 | done: |
567 | lock_acquired(&lock->dep_map, ip); | 567 | lock_acquired(&lock->dep_map, ip); |
568 | /* got the lock - rejoice! */ | 568 | /* got the lock - rejoice! */ |
569 | mutex_remove_waiter(lock, &waiter, current_thread_info()); | 569 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
570 | mutex_set_owner(lock); | 570 | mutex_set_owner(lock); |
571 | 571 | ||
572 | /* set it to 0 if there are no waiters left: */ | 572 | /* set it to 0 if there are no waiters left: */ |
573 | if (likely(list_empty(&lock->wait_list))) | 573 | if (likely(list_empty(&lock->wait_list))) |
574 | atomic_set(&lock->count, 0); | 574 | atomic_set(&lock->count, 0); |
575 | 575 | ||
576 | if(post) | 576 | if(post) |
577 | post(post_arg); | 577 | post(post_arg); |
578 | 578 | ||
579 | spin_unlock_mutex(&lock->wait_lock, flags); | 579 | spin_unlock_mutex(&lock->wait_lock, flags); |
580 | 580 | ||
581 | debug_mutex_free_waiter(&waiter); | 581 | debug_mutex_free_waiter(&waiter); |
582 | preempt_enable(); | 582 | preempt_enable(); |
583 | } | 583 | } |
@@ -588,16 +588,16 @@ void mutex_unlock_sfx(struct mutex *lock, | |||
588 | side_effect_t post, unsigned long post_arg) | 588 | side_effect_t post, unsigned long post_arg) |
589 | { | 589 | { |
590 | unsigned long flags; | 590 | unsigned long flags; |
591 | 591 | ||
592 | spin_lock_mutex(&lock->wait_lock, flags); | 592 | spin_lock_mutex(&lock->wait_lock, flags); |
593 | 593 | ||
594 | if(pre) | 594 | if(pre) |
595 | pre(pre_arg); | 595 | pre(pre_arg); |
596 | 596 | ||
597 | //mutex_release(&lock->dep_map, nested, _RET_IP_); | 597 | //mutex_release(&lock->dep_map, nested, _RET_IP_); |
598 | mutex_release(&lock->dep_map, 1, _RET_IP_); | 598 | mutex_release(&lock->dep_map, 1, _RET_IP_); |
599 | debug_mutex_unlock(lock); | 599 | debug_mutex_unlock(lock); |
600 | 600 | ||
601 | /* | 601 | /* |
602 | * some architectures leave the lock unlocked in the fastpath failure | 602 | * some architectures leave the lock unlocked in the fastpath failure |
603 | * case, others need to leave it locked. In the later case we have to | 603 | * case, others need to leave it locked. In the later case we have to |
@@ -605,21 +605,21 @@ void mutex_unlock_sfx(struct mutex *lock, | |||
605 | */ | 605 | */ |
606 | if (__mutex_slowpath_needs_to_unlock()) | 606 | if (__mutex_slowpath_needs_to_unlock()) |
607 | atomic_set(&lock->count, 1); | 607 | atomic_set(&lock->count, 1); |
608 | 608 | ||
609 | if (!list_empty(&lock->wait_list)) { | 609 | if (!list_empty(&lock->wait_list)) { |
610 | /* get the first entry from the wait-list: */ | 610 | /* get the first entry from the wait-list: */ |
611 | struct mutex_waiter *waiter = | 611 | struct mutex_waiter *waiter = |
612 | list_entry(lock->wait_list.next, | 612 | list_entry(lock->wait_list.next, |
613 | struct mutex_waiter, list); | 613 | struct mutex_waiter, list); |
614 | 614 | ||
615 | debug_mutex_wake_waiter(lock, waiter); | 615 | debug_mutex_wake_waiter(lock, waiter); |
616 | 616 | ||
617 | wake_up_process(waiter->task); | 617 | wake_up_process(waiter->task); |
618 | } | 618 | } |
619 | 619 | ||
620 | if(post) | 620 | if(post) |
621 | post(post_arg); | 621 | post(post_arg); |
622 | 622 | ||
623 | spin_unlock_mutex(&lock->wait_lock, flags); | 623 | spin_unlock_mutex(&lock->wait_lock, flags); |
624 | } | 624 | } |
625 | EXPORT_SYMBOL(mutex_unlock_sfx); | 625 | EXPORT_SYMBOL(mutex_unlock_sfx); |