diff options
Diffstat (limited to 'ipc/sem.c')
-rw-r--r-- | ipc/sem.c | 178 |
1 files changed, 92 insertions, 86 deletions
@@ -188,7 +188,7 @@ void sem_exit_ns(struct ipc_namespace *ns) | |||
188 | } | 188 | } |
189 | #endif | 189 | #endif |
190 | 190 | ||
191 | void __init sem_init (void) | 191 | void __init sem_init(void) |
192 | { | 192 | { |
193 | sem_init_ns(&init_ipc_ns); | 193 | sem_init_ns(&init_ipc_ns); |
194 | ipc_init_proc_interface("sysvipc/sem", | 194 | ipc_init_proc_interface("sysvipc/sem", |
@@ -225,7 +225,7 @@ static void unmerge_queues(struct sem_array *sma) | |||
225 | } | 225 | } |
226 | 226 | ||
227 | /** | 227 | /** |
228 | * merge_queues - Merge single semop queues into global queue | 228 | * merge_queues - merge single semop queues into global queue |
229 | * @sma: semaphore array | 229 | * @sma: semaphore array |
230 | * | 230 | * |
231 | * This function merges all per-semaphore queues into the global queue. | 231 | * This function merges all per-semaphore queues into the global queue. |
@@ -394,7 +394,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, | |||
394 | /* ipc_rmid() may have already freed the ID while sem_lock | 394 | /* ipc_rmid() may have already freed the ID while sem_lock |
395 | * was spinning: verify that the structure is still valid | 395 | * was spinning: verify that the structure is still valid |
396 | */ | 396 | */ |
397 | if (!ipcp->deleted) | 397 | if (ipc_valid_object(ipcp)) |
398 | return container_of(ipcp, struct sem_array, sem_perm); | 398 | return container_of(ipcp, struct sem_array, sem_perm); |
399 | 399 | ||
400 | sem_unlock(sma, *locknum); | 400 | sem_unlock(sma, *locknum); |
@@ -445,11 +445,11 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) | |||
445 | * * call wake_up_process | 445 | * * call wake_up_process |
446 | * * set queue.status to the final value. | 446 | * * set queue.status to the final value. |
447 | * - the previously blocked thread checks queue.status: | 447 | * - the previously blocked thread checks queue.status: |
448 | * * if it's IN_WAKEUP, then it must wait until the value changes | 448 | * * if it's IN_WAKEUP, then it must wait until the value changes |
449 | * * if it's not -EINTR, then the operation was completed by | 449 | * * if it's not -EINTR, then the operation was completed by |
450 | * update_queue. semtimedop can return queue.status without | 450 | * update_queue. semtimedop can return queue.status without |
451 | * performing any operation on the sem array. | 451 | * performing any operation on the sem array. |
452 | * * otherwise it must acquire the spinlock and check what's up. | 452 | * * otherwise it must acquire the spinlock and check what's up. |
453 | * | 453 | * |
454 | * The two-stage algorithm is necessary to protect against the following | 454 | * The two-stage algorithm is necessary to protect against the following |
455 | * races: | 455 | * races: |
@@ -474,7 +474,6 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) | |||
474 | * | 474 | * |
475 | * Called with sem_ids.rwsem held (as a writer) | 475 | * Called with sem_ids.rwsem held (as a writer) |
476 | */ | 476 | */ |
477 | |||
478 | static int newary(struct ipc_namespace *ns, struct ipc_params *params) | 477 | static int newary(struct ipc_namespace *ns, struct ipc_params *params) |
479 | { | 478 | { |
480 | int id; | 479 | int id; |
@@ -491,12 +490,12 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
491 | if (ns->used_sems + nsems > ns->sc_semmns) | 490 | if (ns->used_sems + nsems > ns->sc_semmns) |
492 | return -ENOSPC; | 491 | return -ENOSPC; |
493 | 492 | ||
494 | size = sizeof (*sma) + nsems * sizeof (struct sem); | 493 | size = sizeof(*sma) + nsems * sizeof(struct sem); |
495 | sma = ipc_rcu_alloc(size); | 494 | sma = ipc_rcu_alloc(size); |
496 | if (!sma) { | 495 | if (!sma) |
497 | return -ENOMEM; | 496 | return -ENOMEM; |
498 | } | 497 | |
499 | memset (sma, 0, size); | 498 | memset(sma, 0, size); |
500 | 499 | ||
501 | sma->sem_perm.mode = (semflg & S_IRWXUGO); | 500 | sma->sem_perm.mode = (semflg & S_IRWXUGO); |
502 | sma->sem_perm.key = key; | 501 | sma->sem_perm.key = key; |
@@ -584,10 +583,11 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) | |||
584 | return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); | 583 | return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); |
585 | } | 584 | } |
586 | 585 | ||
587 | /** perform_atomic_semop - Perform (if possible) a semaphore operation | 586 | /** |
587 | * perform_atomic_semop - Perform (if possible) a semaphore operation | ||
588 | * @sma: semaphore array | 588 | * @sma: semaphore array |
589 | * @sops: array with operations that should be checked | 589 | * @sops: array with operations that should be checked |
590 | * @nsems: number of sops | 590 | * @nsops: number of operations |
591 | * @un: undo array | 591 | * @un: undo array |
592 | * @pid: pid that did the change | 592 | * @pid: pid that did the change |
593 | * | 593 | * |
@@ -595,19 +595,18 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) | |||
595 | * Returns 1 if the operation is impossible, the caller must sleep. | 595 | * Returns 1 if the operation is impossible, the caller must sleep. |
596 | * Negative values are error codes. | 596 | * Negative values are error codes. |
597 | */ | 597 | */ |
598 | |||
599 | static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, | 598 | static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, |
600 | int nsops, struct sem_undo *un, int pid) | 599 | int nsops, struct sem_undo *un, int pid) |
601 | { | 600 | { |
602 | int result, sem_op; | 601 | int result, sem_op; |
603 | struct sembuf *sop; | 602 | struct sembuf *sop; |
604 | struct sem * curr; | 603 | struct sem *curr; |
605 | 604 | ||
606 | for (sop = sops; sop < sops + nsops; sop++) { | 605 | for (sop = sops; sop < sops + nsops; sop++) { |
607 | curr = sma->sem_base + sop->sem_num; | 606 | curr = sma->sem_base + sop->sem_num; |
608 | sem_op = sop->sem_op; | 607 | sem_op = sop->sem_op; |
609 | result = curr->semval; | 608 | result = curr->semval; |
610 | 609 | ||
611 | if (!sem_op && result) | 610 | if (!sem_op && result) |
612 | goto would_block; | 611 | goto would_block; |
613 | 612 | ||
@@ -616,25 +615,24 @@ static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops, | |||
616 | goto would_block; | 615 | goto would_block; |
617 | if (result > SEMVMX) | 616 | if (result > SEMVMX) |
618 | goto out_of_range; | 617 | goto out_of_range; |
618 | |||
619 | if (sop->sem_flg & SEM_UNDO) { | 619 | if (sop->sem_flg & SEM_UNDO) { |
620 | int undo = un->semadj[sop->sem_num] - sem_op; | 620 | int undo = un->semadj[sop->sem_num] - sem_op; |
621 | /* | 621 | /* Exceeding the undo range is an error. */ |
622 | * Exceeding the undo range is an error. | ||
623 | */ | ||
624 | if (undo < (-SEMAEM - 1) || undo > SEMAEM) | 622 | if (undo < (-SEMAEM - 1) || undo > SEMAEM) |
625 | goto out_of_range; | 623 | goto out_of_range; |
624 | un->semadj[sop->sem_num] = undo; | ||
626 | } | 625 | } |
626 | |||
627 | curr->semval = result; | 627 | curr->semval = result; |
628 | } | 628 | } |
629 | 629 | ||
630 | sop--; | 630 | sop--; |
631 | while (sop >= sops) { | 631 | while (sop >= sops) { |
632 | sma->sem_base[sop->sem_num].sempid = pid; | 632 | sma->sem_base[sop->sem_num].sempid = pid; |
633 | if (sop->sem_flg & SEM_UNDO) | ||
634 | un->semadj[sop->sem_num] -= sop->sem_op; | ||
635 | sop--; | 633 | sop--; |
636 | } | 634 | } |
637 | 635 | ||
638 | return 0; | 636 | return 0; |
639 | 637 | ||
640 | out_of_range: | 638 | out_of_range: |
@@ -650,7 +648,10 @@ would_block: | |||
650 | undo: | 648 | undo: |
651 | sop--; | 649 | sop--; |
652 | while (sop >= sops) { | 650 | while (sop >= sops) { |
653 | sma->sem_base[sop->sem_num].semval -= sop->sem_op; | 651 | sem_op = sop->sem_op; |
652 | sma->sem_base[sop->sem_num].semval -= sem_op; | ||
653 | if (sop->sem_flg & SEM_UNDO) | ||
654 | un->semadj[sop->sem_num] += sem_op; | ||
654 | sop--; | 655 | sop--; |
655 | } | 656 | } |
656 | 657 | ||
@@ -680,7 +681,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, | |||
680 | } | 681 | } |
681 | 682 | ||
682 | /** | 683 | /** |
683 | * wake_up_sem_queue_do(pt) - do the actual wake-up | 684 | * wake_up_sem_queue_do - do the actual wake-up |
684 | * @pt: list of tasks to be woken up | 685 | * @pt: list of tasks to be woken up |
685 | * | 686 | * |
686 | * Do the actual wake-up. | 687 | * Do the actual wake-up. |
@@ -746,7 +747,7 @@ static int check_restart(struct sem_array *sma, struct sem_queue *q) | |||
746 | } | 747 | } |
747 | 748 | ||
748 | /** | 749 | /** |
749 | * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks | 750 | * wake_const_ops - wake up non-alter tasks |
750 | * @sma: semaphore array. | 751 | * @sma: semaphore array. |
751 | * @semnum: semaphore that was modified. | 752 | * @semnum: semaphore that was modified. |
752 | * @pt: list head for the tasks that must be woken up. | 753 | * @pt: list head for the tasks that must be woken up. |
@@ -796,15 +797,14 @@ static int wake_const_ops(struct sem_array *sma, int semnum, | |||
796 | } | 797 | } |
797 | 798 | ||
798 | /** | 799 | /** |
799 | * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks | 800 | * do_smart_wakeup_zero - wakeup all wait for zero tasks |
800 | * @sma: semaphore array | 801 | * @sma: semaphore array |
801 | * @sops: operations that were performed | 802 | * @sops: operations that were performed |
802 | * @nsops: number of operations | 803 | * @nsops: number of operations |
803 | * @pt: list head of the tasks that must be woken up. | 804 | * @pt: list head of the tasks that must be woken up. |
804 | * | 805 | * |
805 | * do_smart_wakeup_zero() checks all required queue for wait-for-zero | 806 | * Checks all required queue for wait-for-zero operations, based |
806 | * operations, based on the actual changes that were performed on the | 807 | * on the actual changes that were performed on the semaphore array. |
807 | * semaphore array. | ||
808 | * The function returns 1 if at least one operation was completed successfully. | 808 | * The function returns 1 if at least one operation was completed successfully. |
809 | */ | 809 | */ |
810 | static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, | 810 | static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, |
@@ -848,7 +848,7 @@ static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, | |||
848 | 848 | ||
849 | 849 | ||
850 | /** | 850 | /** |
851 | * update_queue(sma, semnum): Look for tasks that can be completed. | 851 | * update_queue - look for tasks that can be completed. |
852 | * @sma: semaphore array. | 852 | * @sma: semaphore array. |
853 | * @semnum: semaphore that was modified. | 853 | * @semnum: semaphore that was modified. |
854 | * @pt: list head for the tasks that must be woken up. | 854 | * @pt: list head for the tasks that must be woken up. |
@@ -918,7 +918,7 @@ again: | |||
918 | } | 918 | } |
919 | 919 | ||
920 | /** | 920 | /** |
921 | * set_semotime(sma, sops) - set sem_otime | 921 | * set_semotime - set sem_otime |
922 | * @sma: semaphore array | 922 | * @sma: semaphore array |
923 | * @sops: operations that modified the array, may be NULL | 923 | * @sops: operations that modified the array, may be NULL |
924 | * | 924 | * |
@@ -936,7 +936,7 @@ static void set_semotime(struct sem_array *sma, struct sembuf *sops) | |||
936 | } | 936 | } |
937 | 937 | ||
938 | /** | 938 | /** |
939 | * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue | 939 | * do_smart_update - optimized update_queue |
940 | * @sma: semaphore array | 940 | * @sma: semaphore array |
941 | * @sops: operations that were performed | 941 | * @sops: operations that were performed |
942 | * @nsops: number of operations | 942 | * @nsops: number of operations |
@@ -998,21 +998,21 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop | |||
998 | * The counts we return here are a rough approximation, but still | 998 | * The counts we return here are a rough approximation, but still |
999 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. | 999 | * warrant that semncnt+semzcnt>0 if the task is on the pending queue. |
1000 | */ | 1000 | */ |
1001 | static int count_semncnt (struct sem_array * sma, ushort semnum) | 1001 | static int count_semncnt(struct sem_array *sma, ushort semnum) |
1002 | { | 1002 | { |
1003 | int semncnt; | 1003 | int semncnt; |
1004 | struct sem_queue * q; | 1004 | struct sem_queue *q; |
1005 | 1005 | ||
1006 | semncnt = 0; | 1006 | semncnt = 0; |
1007 | list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { | 1007 | list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) { |
1008 | struct sembuf * sops = q->sops; | 1008 | struct sembuf *sops = q->sops; |
1009 | BUG_ON(sops->sem_num != semnum); | 1009 | BUG_ON(sops->sem_num != semnum); |
1010 | if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) | 1010 | if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT)) |
1011 | semncnt++; | 1011 | semncnt++; |
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | list_for_each_entry(q, &sma->pending_alter, list) { | 1014 | list_for_each_entry(q, &sma->pending_alter, list) { |
1015 | struct sembuf * sops = q->sops; | 1015 | struct sembuf *sops = q->sops; |
1016 | int nsops = q->nsops; | 1016 | int nsops = q->nsops; |
1017 | int i; | 1017 | int i; |
1018 | for (i = 0; i < nsops; i++) | 1018 | for (i = 0; i < nsops; i++) |
@@ -1024,21 +1024,21 @@ static int count_semncnt (struct sem_array * sma, ushort semnum) | |||
1024 | return semncnt; | 1024 | return semncnt; |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | static int count_semzcnt (struct sem_array * sma, ushort semnum) | 1027 | static int count_semzcnt(struct sem_array *sma, ushort semnum) |
1028 | { | 1028 | { |
1029 | int semzcnt; | 1029 | int semzcnt; |
1030 | struct sem_queue * q; | 1030 | struct sem_queue *q; |
1031 | 1031 | ||
1032 | semzcnt = 0; | 1032 | semzcnt = 0; |
1033 | list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { | 1033 | list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) { |
1034 | struct sembuf * sops = q->sops; | 1034 | struct sembuf *sops = q->sops; |
1035 | BUG_ON(sops->sem_num != semnum); | 1035 | BUG_ON(sops->sem_num != semnum); |
1036 | if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) | 1036 | if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT)) |
1037 | semzcnt++; | 1037 | semzcnt++; |
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | list_for_each_entry(q, &sma->pending_const, list) { | 1040 | list_for_each_entry(q, &sma->pending_const, list) { |
1041 | struct sembuf * sops = q->sops; | 1041 | struct sembuf *sops = q->sops; |
1042 | int nsops = q->nsops; | 1042 | int nsops = q->nsops; |
1043 | int i; | 1043 | int i; |
1044 | for (i = 0; i < nsops; i++) | 1044 | for (i = 0; i < nsops; i++) |
@@ -1108,7 +1108,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) | |||
1108 | 1108 | ||
1109 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) | 1109 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) |
1110 | { | 1110 | { |
1111 | switch(version) { | 1111 | switch (version) { |
1112 | case IPC_64: | 1112 | case IPC_64: |
1113 | return copy_to_user(buf, in, sizeof(*in)); | 1113 | return copy_to_user(buf, in, sizeof(*in)); |
1114 | case IPC_OLD: | 1114 | case IPC_OLD: |
@@ -1151,7 +1151,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, | |||
1151 | int err; | 1151 | int err; |
1152 | struct sem_array *sma; | 1152 | struct sem_array *sma; |
1153 | 1153 | ||
1154 | switch(cmd) { | 1154 | switch (cmd) { |
1155 | case IPC_INFO: | 1155 | case IPC_INFO: |
1156 | case SEM_INFO: | 1156 | case SEM_INFO: |
1157 | { | 1157 | { |
@@ -1162,7 +1162,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, | |||
1162 | if (err) | 1162 | if (err) |
1163 | return err; | 1163 | return err; |
1164 | 1164 | ||
1165 | memset(&seminfo,0,sizeof(seminfo)); | 1165 | memset(&seminfo, 0, sizeof(seminfo)); |
1166 | seminfo.semmni = ns->sc_semmni; | 1166 | seminfo.semmni = ns->sc_semmni; |
1167 | seminfo.semmns = ns->sc_semmns; | 1167 | seminfo.semmns = ns->sc_semmns; |
1168 | seminfo.semmsl = ns->sc_semmsl; | 1168 | seminfo.semmsl = ns->sc_semmsl; |
@@ -1183,7 +1183,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, | |||
1183 | up_read(&sem_ids(ns).rwsem); | 1183 | up_read(&sem_ids(ns).rwsem); |
1184 | if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) | 1184 | if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) |
1185 | return -EFAULT; | 1185 | return -EFAULT; |
1186 | return (max_id < 0) ? 0: max_id; | 1186 | return (max_id < 0) ? 0 : max_id; |
1187 | } | 1187 | } |
1188 | case IPC_STAT: | 1188 | case IPC_STAT: |
1189 | case SEM_STAT: | 1189 | case SEM_STAT: |
@@ -1239,7 +1239,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, | |||
1239 | { | 1239 | { |
1240 | struct sem_undo *un; | 1240 | struct sem_undo *un; |
1241 | struct sem_array *sma; | 1241 | struct sem_array *sma; |
1242 | struct sem* curr; | 1242 | struct sem *curr; |
1243 | int err; | 1243 | int err; |
1244 | struct list_head tasks; | 1244 | struct list_head tasks; |
1245 | int val; | 1245 | int val; |
@@ -1282,7 +1282,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, | |||
1282 | 1282 | ||
1283 | sem_lock(sma, NULL, -1); | 1283 | sem_lock(sma, NULL, -1); |
1284 | 1284 | ||
1285 | if (sma->sem_perm.deleted) { | 1285 | if (!ipc_valid_object(&sma->sem_perm)) { |
1286 | sem_unlock(sma, -1); | 1286 | sem_unlock(sma, -1); |
1287 | rcu_read_unlock(); | 1287 | rcu_read_unlock(); |
1288 | return -EIDRM; | 1288 | return -EIDRM; |
@@ -1309,10 +1309,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1309 | int cmd, void __user *p) | 1309 | int cmd, void __user *p) |
1310 | { | 1310 | { |
1311 | struct sem_array *sma; | 1311 | struct sem_array *sma; |
1312 | struct sem* curr; | 1312 | struct sem *curr; |
1313 | int err, nsems; | 1313 | int err, nsems; |
1314 | ushort fast_sem_io[SEMMSL_FAST]; | 1314 | ushort fast_sem_io[SEMMSL_FAST]; |
1315 | ushort* sem_io = fast_sem_io; | 1315 | ushort *sem_io = fast_sem_io; |
1316 | struct list_head tasks; | 1316 | struct list_head tasks; |
1317 | 1317 | ||
1318 | INIT_LIST_HEAD(&tasks); | 1318 | INIT_LIST_HEAD(&tasks); |
@@ -1342,11 +1342,11 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1342 | int i; | 1342 | int i; |
1343 | 1343 | ||
1344 | sem_lock(sma, NULL, -1); | 1344 | sem_lock(sma, NULL, -1); |
1345 | if (sma->sem_perm.deleted) { | 1345 | if (!ipc_valid_object(&sma->sem_perm)) { |
1346 | err = -EIDRM; | 1346 | err = -EIDRM; |
1347 | goto out_unlock; | 1347 | goto out_unlock; |
1348 | } | 1348 | } |
1349 | if(nsems > SEMMSL_FAST) { | 1349 | if (nsems > SEMMSL_FAST) { |
1350 | if (!ipc_rcu_getref(sma)) { | 1350 | if (!ipc_rcu_getref(sma)) { |
1351 | err = -EIDRM; | 1351 | err = -EIDRM; |
1352 | goto out_unlock; | 1352 | goto out_unlock; |
@@ -1354,14 +1354,14 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1354 | sem_unlock(sma, -1); | 1354 | sem_unlock(sma, -1); |
1355 | rcu_read_unlock(); | 1355 | rcu_read_unlock(); |
1356 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | 1356 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
1357 | if(sem_io == NULL) { | 1357 | if (sem_io == NULL) { |
1358 | ipc_rcu_putref(sma, ipc_rcu_free); | 1358 | ipc_rcu_putref(sma, ipc_rcu_free); |
1359 | return -ENOMEM; | 1359 | return -ENOMEM; |
1360 | } | 1360 | } |
1361 | 1361 | ||
1362 | rcu_read_lock(); | 1362 | rcu_read_lock(); |
1363 | sem_lock_and_putref(sma); | 1363 | sem_lock_and_putref(sma); |
1364 | if (sma->sem_perm.deleted) { | 1364 | if (!ipc_valid_object(&sma->sem_perm)) { |
1365 | err = -EIDRM; | 1365 | err = -EIDRM; |
1366 | goto out_unlock; | 1366 | goto out_unlock; |
1367 | } | 1367 | } |
@@ -1371,7 +1371,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1371 | sem_unlock(sma, -1); | 1371 | sem_unlock(sma, -1); |
1372 | rcu_read_unlock(); | 1372 | rcu_read_unlock(); |
1373 | err = 0; | 1373 | err = 0; |
1374 | if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) | 1374 | if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) |
1375 | err = -EFAULT; | 1375 | err = -EFAULT; |
1376 | goto out_free; | 1376 | goto out_free; |
1377 | } | 1377 | } |
@@ -1386,15 +1386,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1386 | } | 1386 | } |
1387 | rcu_read_unlock(); | 1387 | rcu_read_unlock(); |
1388 | 1388 | ||
1389 | if(nsems > SEMMSL_FAST) { | 1389 | if (nsems > SEMMSL_FAST) { |
1390 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | 1390 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
1391 | if(sem_io == NULL) { | 1391 | if (sem_io == NULL) { |
1392 | ipc_rcu_putref(sma, ipc_rcu_free); | 1392 | ipc_rcu_putref(sma, ipc_rcu_free); |
1393 | return -ENOMEM; | 1393 | return -ENOMEM; |
1394 | } | 1394 | } |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) { | 1397 | if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { |
1398 | ipc_rcu_putref(sma, ipc_rcu_free); | 1398 | ipc_rcu_putref(sma, ipc_rcu_free); |
1399 | err = -EFAULT; | 1399 | err = -EFAULT; |
1400 | goto out_free; | 1400 | goto out_free; |
@@ -1409,7 +1409,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1409 | } | 1409 | } |
1410 | rcu_read_lock(); | 1410 | rcu_read_lock(); |
1411 | sem_lock_and_putref(sma); | 1411 | sem_lock_and_putref(sma); |
1412 | if (sma->sem_perm.deleted) { | 1412 | if (!ipc_valid_object(&sma->sem_perm)) { |
1413 | err = -EIDRM; | 1413 | err = -EIDRM; |
1414 | goto out_unlock; | 1414 | goto out_unlock; |
1415 | } | 1415 | } |
@@ -1435,7 +1435,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1435 | goto out_rcu_wakeup; | 1435 | goto out_rcu_wakeup; |
1436 | 1436 | ||
1437 | sem_lock(sma, NULL, -1); | 1437 | sem_lock(sma, NULL, -1); |
1438 | if (sma->sem_perm.deleted) { | 1438 | if (!ipc_valid_object(&sma->sem_perm)) { |
1439 | err = -EIDRM; | 1439 | err = -EIDRM; |
1440 | goto out_unlock; | 1440 | goto out_unlock; |
1441 | } | 1441 | } |
@@ -1449,10 +1449,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
1449 | err = curr->sempid; | 1449 | err = curr->sempid; |
1450 | goto out_unlock; | 1450 | goto out_unlock; |
1451 | case GETNCNT: | 1451 | case GETNCNT: |
1452 | err = count_semncnt(sma,semnum); | 1452 | err = count_semncnt(sma, semnum); |
1453 | goto out_unlock; | 1453 | goto out_unlock; |
1454 | case GETZCNT: | 1454 | case GETZCNT: |
1455 | err = count_semzcnt(sma,semnum); | 1455 | err = count_semzcnt(sma, semnum); |
1456 | goto out_unlock; | 1456 | goto out_unlock; |
1457 | } | 1457 | } |
1458 | 1458 | ||
@@ -1462,7 +1462,7 @@ out_rcu_wakeup: | |||
1462 | rcu_read_unlock(); | 1462 | rcu_read_unlock(); |
1463 | wake_up_sem_queue_do(&tasks); | 1463 | wake_up_sem_queue_do(&tasks); |
1464 | out_free: | 1464 | out_free: |
1465 | if(sem_io != fast_sem_io) | 1465 | if (sem_io != fast_sem_io) |
1466 | ipc_free(sem_io, sizeof(ushort)*nsems); | 1466 | ipc_free(sem_io, sizeof(ushort)*nsems); |
1467 | return err; | 1467 | return err; |
1468 | } | 1468 | } |
@@ -1470,7 +1470,7 @@ out_free: | |||
1470 | static inline unsigned long | 1470 | static inline unsigned long |
1471 | copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) | 1471 | copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) |
1472 | { | 1472 | { |
1473 | switch(version) { | 1473 | switch (version) { |
1474 | case IPC_64: | 1474 | case IPC_64: |
1475 | if (copy_from_user(out, buf, sizeof(*out))) | 1475 | if (copy_from_user(out, buf, sizeof(*out))) |
1476 | return -EFAULT; | 1476 | return -EFAULT; |
@@ -1479,7 +1479,7 @@ copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) | |||
1479 | { | 1479 | { |
1480 | struct semid_ds tbuf_old; | 1480 | struct semid_ds tbuf_old; |
1481 | 1481 | ||
1482 | if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | 1482 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
1483 | return -EFAULT; | 1483 | return -EFAULT; |
1484 | 1484 | ||
1485 | out->sem_perm.uid = tbuf_old.sem_perm.uid; | 1485 | out->sem_perm.uid = tbuf_old.sem_perm.uid; |
@@ -1506,7 +1506,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid, | |||
1506 | struct semid64_ds semid64; | 1506 | struct semid64_ds semid64; |
1507 | struct kern_ipc_perm *ipcp; | 1507 | struct kern_ipc_perm *ipcp; |
1508 | 1508 | ||
1509 | if(cmd == IPC_SET) { | 1509 | if (cmd == IPC_SET) { |
1510 | if (copy_semid_from_user(&semid64, p, version)) | 1510 | if (copy_semid_from_user(&semid64, p, version)) |
1511 | return -EFAULT; | 1511 | return -EFAULT; |
1512 | } | 1512 | } |
@@ -1566,7 +1566,7 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) | |||
1566 | version = ipc_parse_version(&cmd); | 1566 | version = ipc_parse_version(&cmd); |
1567 | ns = current->nsproxy->ipc_ns; | 1567 | ns = current->nsproxy->ipc_ns; |
1568 | 1568 | ||
1569 | switch(cmd) { | 1569 | switch (cmd) { |
1570 | case IPC_INFO: | 1570 | case IPC_INFO: |
1571 | case SEM_INFO: | 1571 | case SEM_INFO: |
1572 | case IPC_STAT: | 1572 | case IPC_STAT: |
@@ -1634,7 +1634,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | |||
1634 | { | 1634 | { |
1635 | struct sem_undo *un; | 1635 | struct sem_undo *un; |
1636 | 1636 | ||
1637 | assert_spin_locked(&ulp->lock); | 1637 | assert_spin_locked(&ulp->lock); |
1638 | 1638 | ||
1639 | un = __lookup_undo(ulp, semid); | 1639 | un = __lookup_undo(ulp, semid); |
1640 | if (un) { | 1640 | if (un) { |
@@ -1645,7 +1645,7 @@ static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | |||
1645 | } | 1645 | } |
1646 | 1646 | ||
1647 | /** | 1647 | /** |
1648 | * find_alloc_undo - Lookup (and if not present create) undo array | 1648 | * find_alloc_undo - lookup (and if not present create) undo array |
1649 | * @ns: namespace | 1649 | * @ns: namespace |
1650 | * @semid: semaphore array id | 1650 | * @semid: semaphore array id |
1651 | * | 1651 | * |
@@ -1670,7 +1670,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) | |||
1670 | spin_lock(&ulp->lock); | 1670 | spin_lock(&ulp->lock); |
1671 | un = lookup_undo(ulp, semid); | 1671 | un = lookup_undo(ulp, semid); |
1672 | spin_unlock(&ulp->lock); | 1672 | spin_unlock(&ulp->lock); |
1673 | if (likely(un!=NULL)) | 1673 | if (likely(un != NULL)) |
1674 | goto out; | 1674 | goto out; |
1675 | 1675 | ||
1676 | /* no undo structure around - allocate one. */ | 1676 | /* no undo structure around - allocate one. */ |
@@ -1699,7 +1699,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) | |||
1699 | /* step 3: Acquire the lock on semaphore array */ | 1699 | /* step 3: Acquire the lock on semaphore array */ |
1700 | rcu_read_lock(); | 1700 | rcu_read_lock(); |
1701 | sem_lock_and_putref(sma); | 1701 | sem_lock_and_putref(sma); |
1702 | if (sma->sem_perm.deleted) { | 1702 | if (!ipc_valid_object(&sma->sem_perm)) { |
1703 | sem_unlock(sma, -1); | 1703 | sem_unlock(sma, -1); |
1704 | rcu_read_unlock(); | 1704 | rcu_read_unlock(); |
1705 | kfree(new); | 1705 | kfree(new); |
@@ -1735,7 +1735,7 @@ out: | |||
1735 | 1735 | ||
1736 | 1736 | ||
1737 | /** | 1737 | /** |
1738 | * get_queue_result - Retrieve the result code from sem_queue | 1738 | * get_queue_result - retrieve the result code from sem_queue |
1739 | * @q: Pointer to queue structure | 1739 | * @q: Pointer to queue structure |
1740 | * | 1740 | * |
1741 | * Retrieve the return code from the pending queue. If IN_WAKEUP is found in | 1741 | * Retrieve the return code from the pending queue. If IN_WAKEUP is found in |
@@ -1765,7 +1765,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1765 | int error = -EINVAL; | 1765 | int error = -EINVAL; |
1766 | struct sem_array *sma; | 1766 | struct sem_array *sma; |
1767 | struct sembuf fast_sops[SEMOPM_FAST]; | 1767 | struct sembuf fast_sops[SEMOPM_FAST]; |
1768 | struct sembuf* sops = fast_sops, *sop; | 1768 | struct sembuf *sops = fast_sops, *sop; |
1769 | struct sem_undo *un; | 1769 | struct sem_undo *un; |
1770 | int undos = 0, alter = 0, max, locknum; | 1770 | int undos = 0, alter = 0, max, locknum; |
1771 | struct sem_queue queue; | 1771 | struct sem_queue queue; |
@@ -1779,13 +1779,13 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1779 | return -EINVAL; | 1779 | return -EINVAL; |
1780 | if (nsops > ns->sc_semopm) | 1780 | if (nsops > ns->sc_semopm) |
1781 | return -E2BIG; | 1781 | return -E2BIG; |
1782 | if(nsops > SEMOPM_FAST) { | 1782 | if (nsops > SEMOPM_FAST) { |
1783 | sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); | 1783 | sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL); |
1784 | if(sops==NULL) | 1784 | if (sops == NULL) |
1785 | return -ENOMEM; | 1785 | return -ENOMEM; |
1786 | } | 1786 | } |
1787 | if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { | 1787 | if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { |
1788 | error=-EFAULT; | 1788 | error = -EFAULT; |
1789 | goto out_free; | 1789 | goto out_free; |
1790 | } | 1790 | } |
1791 | if (timeout) { | 1791 | if (timeout) { |
@@ -1846,7 +1846,15 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, | |||
1846 | 1846 | ||
1847 | error = -EIDRM; | 1847 | error = -EIDRM; |
1848 | locknum = sem_lock(sma, sops, nsops); | 1848 | locknum = sem_lock(sma, sops, nsops); |
1849 | if (sma->sem_perm.deleted) | 1849 | /* |
1850 | * We eventually might perform the following check in a lockless | ||
1851 | * fashion, considering ipc_valid_object() locking constraints. | ||
1852 | * If nsops == 1 and there is no contention for sem_perm.lock, then | ||
1853 | * only a per-semaphore lock is held and it's OK to proceed with the | ||
1854 | * check below. More details on the fine grained locking scheme | ||
1855 | * entangled here and why it's RMID race safe on comments at sem_lock() | ||
1856 | */ | ||
1857 | if (!ipc_valid_object(&sma->sem_perm)) | ||
1850 | goto out_unlock_free; | 1858 | goto out_unlock_free; |
1851 | /* | 1859 | /* |
1852 | * semid identifiers are not unique - find_alloc_undo may have | 1860 | * semid identifiers are not unique - find_alloc_undo may have |
@@ -1959,10 +1967,8 @@ sleep_again: | |||
1959 | * If queue.status != -EINTR we are woken up by another process. | 1967 | * If queue.status != -EINTR we are woken up by another process. |
1960 | * Leave without unlink_queue(), but with sem_unlock(). | 1968 | * Leave without unlink_queue(), but with sem_unlock(). |
1961 | */ | 1969 | */ |
1962 | 1970 | if (error != -EINTR) | |
1963 | if (error != -EINTR) { | ||
1964 | goto out_unlock_free; | 1971 | goto out_unlock_free; |
1965 | } | ||
1966 | 1972 | ||
1967 | /* | 1973 | /* |
1968 | * If an interrupt occurred we have to clean up the queue | 1974 | * If an interrupt occurred we have to clean up the queue |
@@ -1984,7 +1990,7 @@ out_rcu_wakeup: | |||
1984 | rcu_read_unlock(); | 1990 | rcu_read_unlock(); |
1985 | wake_up_sem_queue_do(&tasks); | 1991 | wake_up_sem_queue_do(&tasks); |
1986 | out_free: | 1992 | out_free: |
1987 | if(sops != fast_sops) | 1993 | if (sops != fast_sops) |
1988 | kfree(sops); | 1994 | kfree(sops); |
1989 | return error; | 1995 | return error; |
1990 | } | 1996 | } |
@@ -2068,7 +2074,7 @@ void exit_sem(struct task_struct *tsk) | |||
2068 | 2074 | ||
2069 | sem_lock(sma, NULL, -1); | 2075 | sem_lock(sma, NULL, -1); |
2070 | /* exit_sem raced with IPC_RMID, nothing to do */ | 2076 | /* exit_sem raced with IPC_RMID, nothing to do */ |
2071 | if (sma->sem_perm.deleted) { | 2077 | if (!ipc_valid_object(&sma->sem_perm)) { |
2072 | sem_unlock(sma, -1); | 2078 | sem_unlock(sma, -1); |
2073 | rcu_read_unlock(); | 2079 | rcu_read_unlock(); |
2074 | continue; | 2080 | continue; |
@@ -2093,7 +2099,7 @@ void exit_sem(struct task_struct *tsk) | |||
2093 | 2099 | ||
2094 | /* perform adjustments registered in un */ | 2100 | /* perform adjustments registered in un */ |
2095 | for (i = 0; i < sma->sem_nsems; i++) { | 2101 | for (i = 0; i < sma->sem_nsems; i++) { |
2096 | struct sem * semaphore = &sma->sem_base[i]; | 2102 | struct sem *semaphore = &sma->sem_base[i]; |
2097 | if (un->semadj[i]) { | 2103 | if (un->semadj[i]) { |
2098 | semaphore->semval += un->semadj[i]; | 2104 | semaphore->semval += un->semadj[i]; |
2099 | /* | 2105 | /* |
@@ -2107,7 +2113,7 @@ void exit_sem(struct task_struct *tsk) | |||
2107 | * Linux caps the semaphore value, both at 0 | 2113 | * Linux caps the semaphore value, both at 0 |
2108 | * and at SEMVMX. | 2114 | * and at SEMVMX. |
2109 | * | 2115 | * |
2110 | * Manfred <manfred@colorfullife.com> | 2116 | * Manfred <manfred@colorfullife.com> |
2111 | */ | 2117 | */ |
2112 | if (semaphore->semval < 0) | 2118 | if (semaphore->semval < 0) |
2113 | semaphore->semval = 0; | 2119 | semaphore->semval = 0; |