aboutsummaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
authorRik van Riel <riel@surriel.com>2013-04-30 22:15:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-05-01 11:12:58 -0400
commit9f1bc2c9022c1d4944c4a1a44c2f365487420aca (patch)
treea1764801b992d87e1c488035135f10ed80413944 /ipc
parentc460b662d5cae467f1c341c59b02a5c5e68fed0b (diff)
ipc,sem: have only one list in struct sem_queue
Having only one list in struct sem_queue, and only queueing simple semaphore operations on the list for the semaphore involved, allows us to introduce finer grained locking for semtimedop. Signed-off-by: Rik van Riel <riel@redhat.com> Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com> Cc: Chegu Vinod <chegu_vinod@hp.com> Cc: Emmanuel Benisty <benisty.e@gmail.com> Cc: Jason Low <jason.low2@hp.com> Cc: Michel Lespinasse <walken@google.com> Cc: Peter Hurley <peter@hurleysoftware.com> Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Tested-by: Sedat Dilek <sedat.dilek@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'ipc')
-rw-r--r--ipc/sem.c65
1 files changed, 34 insertions, 31 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 70020066ac0d..f68b61749a85 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -99,7 +99,6 @@ struct sem {
99 99
100/* One queue for each sleeping process in the system. */ 100/* One queue for each sleeping process in the system. */
101struct sem_queue { 101struct sem_queue {
102 struct list_head simple_list; /* queue of pending operations */
103 struct list_head list; /* queue of pending operations */ 102 struct list_head list; /* queue of pending operations */
104 struct task_struct *sleeper; /* this process */ 103 struct task_struct *sleeper; /* this process */
105 struct sem_undo *undo; /* undo structure */ 104 struct sem_undo *undo; /* undo structure */
@@ -519,7 +518,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt,
519 q->status = IN_WAKEUP; 518 q->status = IN_WAKEUP;
520 q->pid = error; 519 q->pid = error;
521 520
522 list_add_tail(&q->simple_list, pt); 521 list_add_tail(&q->list, pt);
523} 522}
524 523
525/** 524/**
@@ -537,7 +536,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
537 int did_something; 536 int did_something;
538 537
539 did_something = !list_empty(pt); 538 did_something = !list_empty(pt);
540 list_for_each_entry_safe(q, t, pt, simple_list) { 539 list_for_each_entry_safe(q, t, pt, list) {
541 wake_up_process(q->sleeper); 540 wake_up_process(q->sleeper);
542 /* q can disappear immediately after writing q->status. */ 541 /* q can disappear immediately after writing q->status. */
543 smp_wmb(); 542 smp_wmb();
@@ -550,9 +549,7 @@ static void wake_up_sem_queue_do(struct list_head *pt)
550static void unlink_queue(struct sem_array *sma, struct sem_queue *q) 549static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
551{ 550{
552 list_del(&q->list); 551 list_del(&q->list);
553 if (q->nsops == 1) 552 if (q->nsops > 1)
554 list_del(&q->simple_list);
555 else
556 sma->complex_count--; 553 sma->complex_count--;
557} 554}
558 555
@@ -605,9 +602,9 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q)
605 } 602 }
606 /* 603 /*
607 * semval is 0. Check if there are wait-for-zero semops. 604 * semval is 0. Check if there are wait-for-zero semops.
608 * They must be the first entries in the per-semaphore simple queue 605 * They must be the first entries in the per-semaphore queue
609 */ 606 */
610 h = list_first_entry(&curr->sem_pending, struct sem_queue, simple_list); 607 h = list_first_entry(&curr->sem_pending, struct sem_queue, list);
611 BUG_ON(h->nsops != 1); 608 BUG_ON(h->nsops != 1);
612 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num); 609 BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
613 610
@@ -627,8 +624,9 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q)
627 * @pt: list head for the tasks that must be woken up. 624 * @pt: list head for the tasks that must be woken up.
628 * 625 *
629 * update_queue must be called after a semaphore in a semaphore array 626 * update_queue must be called after a semaphore in a semaphore array
630 * was modified. If multiple semaphore were modified, then @semnum 627 * was modified. If multiple semaphores were modified, update_queue must
631 * must be set to -1. 628 * be called with semnum = -1, as well as with the number of each modified
629 * semaphore.
632 * The tasks that must be woken up are added to @pt. The return code 630 * The tasks that must be woken up are added to @pt. The return code
633 * is stored in q->pid. 631 * is stored in q->pid.
634 * The function return 1 if at least one semop was completed successfully. 632 * The function return 1 if at least one semop was completed successfully.
@@ -638,30 +636,19 @@ static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
638 struct sem_queue *q; 636 struct sem_queue *q;
639 struct list_head *walk; 637 struct list_head *walk;
640 struct list_head *pending_list; 638 struct list_head *pending_list;
641 int offset;
642 int semop_completed = 0; 639 int semop_completed = 0;
643 640
644 /* if there are complex operations around, then knowing the semaphore 641 if (semnum == -1)
645 * that was modified doesn't help us. Assume that multiple semaphores
646 * were modified.
647 */
648 if (sma->complex_count)
649 semnum = -1;
650
651 if (semnum == -1) {
652 pending_list = &sma->sem_pending; 642 pending_list = &sma->sem_pending;
653 offset = offsetof(struct sem_queue, list); 643 else
654 } else {
655 pending_list = &sma->sem_base[semnum].sem_pending; 644 pending_list = &sma->sem_base[semnum].sem_pending;
656 offset = offsetof(struct sem_queue, simple_list);
657 }
658 645
659again: 646again:
660 walk = pending_list->next; 647 walk = pending_list->next;
661 while (walk != pending_list) { 648 while (walk != pending_list) {
662 int error, restart; 649 int error, restart;
663 650
664 q = (struct sem_queue *)((char *)walk - offset); 651 q = container_of(walk, struct sem_queue, list);
665 walk = walk->next; 652 walk = walk->next;
666 653
667 /* If we are scanning the single sop, per-semaphore list of 654 /* If we are scanning the single sop, per-semaphore list of
@@ -720,9 +707,18 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
720 if (sma->complex_count || sops == NULL) { 707 if (sma->complex_count || sops == NULL) {
721 if (update_queue(sma, -1, pt)) 708 if (update_queue(sma, -1, pt))
722 otime = 1; 709 otime = 1;
710 }
711
712 if (!sops) {
713 /* No semops; something special is going on. */
714 for (i = 0; i < sma->sem_nsems; i++) {
715 if (update_queue(sma, i, pt))
716 otime = 1;
717 }
723 goto done; 718 goto done;
724 } 719 }
725 720
721 /* Check the semaphores that were modified. */
726 for (i = 0; i < nsops; i++) { 722 for (i = 0; i < nsops; i++) {
727 if (sops[i].sem_op > 0 || 723 if (sops[i].sem_op > 0 ||
728 (sops[i].sem_op < 0 && 724 (sops[i].sem_op < 0 &&
@@ -793,6 +789,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
793 struct sem_queue *q, *tq; 789 struct sem_queue *q, *tq;
794 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 790 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
795 struct list_head tasks; 791 struct list_head tasks;
792 int i;
796 793
797 /* Free the existing undo structures for this semaphore set. */ 794 /* Free the existing undo structures for this semaphore set. */
798 assert_spin_locked(&sma->sem_perm.lock); 795 assert_spin_locked(&sma->sem_perm.lock);
@@ -811,6 +808,13 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
811 unlink_queue(sma, q); 808 unlink_queue(sma, q);
812 wake_up_sem_queue_prepare(&tasks, q, -EIDRM); 809 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
813 } 810 }
811 for (i = 0; i < sma->sem_nsems; i++) {
812 struct sem *sem = sma->sem_base + i;
813 list_for_each_entry_safe(q, tq, &sem->sem_pending, list) {
814 unlink_queue(sma, q);
815 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
816 }
817 }
814 818
815 /* Remove the semaphore set from the IDR */ 819 /* Remove the semaphore set from the IDR */
816 sem_rmid(ns, sma); 820 sem_rmid(ns, sma);
@@ -1565,21 +1569,20 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1565 queue.undo = un; 1569 queue.undo = un;
1566 queue.pid = task_tgid_vnr(current); 1570 queue.pid = task_tgid_vnr(current);
1567 queue.alter = alter; 1571 queue.alter = alter;
1568 if (alter)
1569 list_add_tail(&queue.list, &sma->sem_pending);
1570 else
1571 list_add(&queue.list, &sma->sem_pending);
1572 1572
1573 if (nsops == 1) { 1573 if (nsops == 1) {
1574 struct sem *curr; 1574 struct sem *curr;
1575 curr = &sma->sem_base[sops->sem_num]; 1575 curr = &sma->sem_base[sops->sem_num];
1576 1576
1577 if (alter) 1577 if (alter)
1578 list_add_tail(&queue.simple_list, &curr->sem_pending); 1578 list_add_tail(&queue.list, &curr->sem_pending);
1579 else 1579 else
1580 list_add(&queue.simple_list, &curr->sem_pending); 1580 list_add(&queue.list, &curr->sem_pending);
1581 } else { 1581 } else {
1582 INIT_LIST_HEAD(&queue.simple_list); 1582 if (alter)
1583 list_add_tail(&queue.list, &sma->sem_pending);
1584 else
1585 list_add(&queue.list, &sma->sem_pending);
1583 sma->complex_count++; 1586 sma->complex_count++;
1584 } 1587 }
1585 1588