aboutsummaryrefslogtreecommitdiffstats
path: root/ipc/sem.c
diff options
context:
space:
mode:
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c316
1 files changed, 173 insertions, 143 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index e9418df5ff3e..bf1bc36cb7ee 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -272,9 +272,8 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
272 ns->used_sems += nsems; 272 ns->used_sems += nsems;
273 273
274 sma->sem_base = (struct sem *) &sma[1]; 274 sma->sem_base = (struct sem *) &sma[1];
275 /* sma->sem_pending = NULL; */ 275 INIT_LIST_HEAD(&sma->sem_pending);
276 sma->sem_pending_last = &sma->sem_pending; 276 INIT_LIST_HEAD(&sma->list_id);
277 /* sma->undo = NULL; */
278 sma->sem_nsems = nsems; 277 sma->sem_nsems = nsems;
279 sma->sem_ctime = get_seconds(); 278 sma->sem_ctime = get_seconds();
280 sem_unlock(sma); 279 sem_unlock(sma);
@@ -331,38 +330,6 @@ asmlinkage long sys_semget(key_t key, int nsems, int semflg)
331 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); 330 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
332} 331}
333 332
334/* Manage the doubly linked list sma->sem_pending as a FIFO:
335 * insert new queue elements at the tail sma->sem_pending_last.
336 */
337static inline void append_to_queue (struct sem_array * sma,
338 struct sem_queue * q)
339{
340 *(q->prev = sma->sem_pending_last) = q;
341 *(sma->sem_pending_last = &q->next) = NULL;
342}
343
344static inline void prepend_to_queue (struct sem_array * sma,
345 struct sem_queue * q)
346{
347 q->next = sma->sem_pending;
348 *(q->prev = &sma->sem_pending) = q;
349 if (q->next)
350 q->next->prev = &q->next;
351 else /* sma->sem_pending_last == &sma->sem_pending */
352 sma->sem_pending_last = &q->next;
353}
354
355static inline void remove_from_queue (struct sem_array * sma,
356 struct sem_queue * q)
357{
358 *(q->prev) = q->next;
359 if (q->next)
360 q->next->prev = q->prev;
361 else /* sma->sem_pending_last == &q->next */
362 sma->sem_pending_last = q->prev;
363 q->prev = NULL; /* mark as removed */
364}
365
366/* 333/*
367 * Determine whether a sequence of semaphore operations would succeed 334 * Determine whether a sequence of semaphore operations would succeed
368 * all at once. Return 0 if yes, 1 if need to sleep, else return error code. 335 * all at once. Return 0 if yes, 1 if need to sleep, else return error code.
@@ -438,16 +405,15 @@ static void update_queue (struct sem_array * sma)
438 int error; 405 int error;
439 struct sem_queue * q; 406 struct sem_queue * q;
440 407
441 q = sma->sem_pending; 408 q = list_entry(sma->sem_pending.next, struct sem_queue, list);
442 while(q) { 409 while (&q->list != &sma->sem_pending) {
443 error = try_atomic_semop(sma, q->sops, q->nsops, 410 error = try_atomic_semop(sma, q->sops, q->nsops,
444 q->undo, q->pid); 411 q->undo, q->pid);
445 412
446 /* Does q->sleeper still need to sleep? */ 413 /* Does q->sleeper still need to sleep? */
447 if (error <= 0) { 414 if (error <= 0) {
448 struct sem_queue *n; 415 struct sem_queue *n;
449 remove_from_queue(sma,q); 416
450 q->status = IN_WAKEUP;
451 /* 417 /*
452 * Continue scanning. The next operation 418 * Continue scanning. The next operation
453 * that must be checked depends on the type of the 419 * that must be checked depends on the type of the
@@ -458,11 +424,26 @@ static void update_queue (struct sem_array * sma)
458 * for semaphore values to become 0. 424 * for semaphore values to become 0.
459 * - if the operation didn't modify the array, 425 * - if the operation didn't modify the array,
460 * then just continue. 426 * then just continue.
427 * The order of list_del() and reading ->next
428 * is crucial: In the former case, the list_del()
429 * must be done first [because we might be the
430 * first entry in ->sem_pending], in the latter
431 * case the list_del() must be done last
432 * [because the list is invalid after the list_del()]
461 */ 433 */
462 if (q->alter) 434 if (q->alter) {
463 n = sma->sem_pending; 435 list_del(&q->list);
464 else 436 n = list_entry(sma->sem_pending.next,
465 n = q->next; 437 struct sem_queue, list);
438 } else {
439 n = list_entry(q->list.next, struct sem_queue,
440 list);
441 list_del(&q->list);
442 }
443
444 /* wake up the waiting thread */
445 q->status = IN_WAKEUP;
446
466 wake_up_process(q->sleeper); 447 wake_up_process(q->sleeper);
467 /* hands-off: q will disappear immediately after 448 /* hands-off: q will disappear immediately after
468 * writing q->status. 449 * writing q->status.
@@ -471,7 +452,7 @@ static void update_queue (struct sem_array * sma)
471 q->status = error; 452 q->status = error;
472 q = n; 453 q = n;
473 } else { 454 } else {
474 q = q->next; 455 q = list_entry(q->list.next, struct sem_queue, list);
475 } 456 }
476 } 457 }
477} 458}
@@ -491,7 +472,7 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
491 struct sem_queue * q; 472 struct sem_queue * q;
492 473
493 semncnt = 0; 474 semncnt = 0;
494 for (q = sma->sem_pending; q; q = q->next) { 475 list_for_each_entry(q, &sma->sem_pending, list) {
495 struct sembuf * sops = q->sops; 476 struct sembuf * sops = q->sops;
496 int nsops = q->nsops; 477 int nsops = q->nsops;
497 int i; 478 int i;
@@ -503,13 +484,14 @@ static int count_semncnt (struct sem_array * sma, ushort semnum)
503 } 484 }
504 return semncnt; 485 return semncnt;
505} 486}
487
506static int count_semzcnt (struct sem_array * sma, ushort semnum) 488static int count_semzcnt (struct sem_array * sma, ushort semnum)
507{ 489{
508 int semzcnt; 490 int semzcnt;
509 struct sem_queue * q; 491 struct sem_queue * q;
510 492
511 semzcnt = 0; 493 semzcnt = 0;
512 for (q = sma->sem_pending; q; q = q->next) { 494 list_for_each_entry(q, &sma->sem_pending, list) {
513 struct sembuf * sops = q->sops; 495 struct sembuf * sops = q->sops;
514 int nsops = q->nsops; 496 int nsops = q->nsops;
515 int i; 497 int i;
@@ -522,35 +504,41 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
522 return semzcnt; 504 return semzcnt;
523} 505}
524 506
507void free_un(struct rcu_head *head)
508{
509 struct sem_undo *un = container_of(head, struct sem_undo, rcu);
510 kfree(un);
511}
512
525/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked 513/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked
526 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex 514 * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex
527 * remains locked on exit. 515 * remains locked on exit.
528 */ 516 */
529static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 517static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
530{ 518{
531 struct sem_undo *un; 519 struct sem_undo *un, *tu;
532 struct sem_queue *q; 520 struct sem_queue *q, *tq;
533 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); 521 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
534 522
535 /* Invalidate the existing undo structures for this semaphore set. 523 /* Free the existing undo structures for this semaphore set. */
536 * (They will be freed without any further action in exit_sem() 524 assert_spin_locked(&sma->sem_perm.lock);
537 * or during the next semop.) 525 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
538 */ 526 list_del(&un->list_id);
539 for (un = sma->undo; un; un = un->id_next) 527 spin_lock(&un->ulp->lock);
540 un->semid = -1; 528 un->semid = -1;
529 list_del_rcu(&un->list_proc);
530 spin_unlock(&un->ulp->lock);
531 call_rcu(&un->rcu, free_un);
532 }
541 533
542 /* Wake up all pending processes and let them fail with EIDRM. */ 534 /* Wake up all pending processes and let them fail with EIDRM. */
543 q = sma->sem_pending; 535 list_for_each_entry_safe(q, tq, &sma->sem_pending, list) {
544 while(q) { 536 list_del(&q->list);
545 struct sem_queue *n; 537
546 /* lazy remove_from_queue: we are killing the whole queue */
547 q->prev = NULL;
548 n = q->next;
549 q->status = IN_WAKEUP; 538 q->status = IN_WAKEUP;
550 wake_up_process(q->sleeper); /* doesn't sleep */ 539 wake_up_process(q->sleeper); /* doesn't sleep */
551 smp_wmb(); 540 smp_wmb();
552 q->status = -EIDRM; /* hands-off q */ 541 q->status = -EIDRM; /* hands-off q */
553 q = n;
554 } 542 }
555 543
556 /* Remove the semaphore set from the IDR */ 544 /* Remove the semaphore set from the IDR */
@@ -763,9 +751,12 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
763 751
764 for (i = 0; i < nsems; i++) 752 for (i = 0; i < nsems; i++)
765 sma->sem_base[i].semval = sem_io[i]; 753 sma->sem_base[i].semval = sem_io[i];
766 for (un = sma->undo; un; un = un->id_next) 754
755 assert_spin_locked(&sma->sem_perm.lock);
756 list_for_each_entry(un, &sma->list_id, list_id) {
767 for (i = 0; i < nsems; i++) 757 for (i = 0; i < nsems; i++)
768 un->semadj[i] = 0; 758 un->semadj[i] = 0;
759 }
769 sma->sem_ctime = get_seconds(); 760 sma->sem_ctime = get_seconds();
770 /* maybe some queued-up processes were waiting for this */ 761 /* maybe some queued-up processes were waiting for this */
771 update_queue(sma); 762 update_queue(sma);
@@ -797,12 +788,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
797 { 788 {
798 int val = arg.val; 789 int val = arg.val;
799 struct sem_undo *un; 790 struct sem_undo *un;
791
800 err = -ERANGE; 792 err = -ERANGE;
801 if (val > SEMVMX || val < 0) 793 if (val > SEMVMX || val < 0)
802 goto out_unlock; 794 goto out_unlock;
803 795
804 for (un = sma->undo; un; un = un->id_next) 796 assert_spin_locked(&sma->sem_perm.lock);
797 list_for_each_entry(un, &sma->list_id, list_id)
805 un->semadj[semnum] = 0; 798 un->semadj[semnum] = 0;
799
806 curr->semval = val; 800 curr->semval = val;
807 curr->sempid = task_tgid_vnr(current); 801 curr->sempid = task_tgid_vnr(current);
808 sma->sem_ctime = get_seconds(); 802 sma->sem_ctime = get_seconds();
@@ -952,6 +946,8 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
952 return -ENOMEM; 946 return -ENOMEM;
953 spin_lock_init(&undo_list->lock); 947 spin_lock_init(&undo_list->lock);
954 atomic_set(&undo_list->refcnt, 1); 948 atomic_set(&undo_list->refcnt, 1);
949 INIT_LIST_HEAD(&undo_list->list_proc);
950
955 current->sysvsem.undo_list = undo_list; 951 current->sysvsem.undo_list = undo_list;
956 } 952 }
957 *undo_listp = undo_list; 953 *undo_listp = undo_list;
@@ -960,25 +956,27 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp)
960 956
961static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) 957static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
962{ 958{
963 struct sem_undo **last, *un; 959 struct sem_undo *walk;
964 960
965 last = &ulp->proc_list; 961 list_for_each_entry_rcu(walk, &ulp->list_proc, list_proc) {
966 un = *last; 962 if (walk->semid == semid)
967 while(un != NULL) { 963 return walk;
968 if(un->semid==semid)
969 break;
970 if(un->semid==-1) {
971 *last=un->proc_next;
972 kfree(un);
973 } else {
974 last=&un->proc_next;
975 }
976 un=*last;
977 } 964 }
978 return un; 965 return NULL;
979} 966}
980 967
981static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) 968/**
969 * find_alloc_undo - Lookup (and if not present create) undo array
970 * @ns: namespace
971 * @semid: semaphore array id
972 *
973 * The function looks up (and if not present creates) the undo structure.
974 * The size of the undo structure depends on the size of the semaphore
975 * array, thus the alloc path is not that straightforward.
976 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
977 * performs a rcu_read_lock().
978 */
979static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
982{ 980{
983 struct sem_array *sma; 981 struct sem_array *sma;
984 struct sem_undo_list *ulp; 982 struct sem_undo_list *ulp;
@@ -990,13 +988,16 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
990 if (error) 988 if (error)
991 return ERR_PTR(error); 989 return ERR_PTR(error);
992 990
991 rcu_read_lock();
993 spin_lock(&ulp->lock); 992 spin_lock(&ulp->lock);
994 un = lookup_undo(ulp, semid); 993 un = lookup_undo(ulp, semid);
995 spin_unlock(&ulp->lock); 994 spin_unlock(&ulp->lock);
996 if (likely(un!=NULL)) 995 if (likely(un!=NULL))
997 goto out; 996 goto out;
997 rcu_read_unlock();
998 998
999 /* no undo structure around - allocate one. */ 999 /* no undo structure around - allocate one. */
1000 /* step 1: figure out the size of the semaphore array */
1000 sma = sem_lock_check(ns, semid); 1001 sma = sem_lock_check(ns, semid);
1001 if (IS_ERR(sma)) 1002 if (IS_ERR(sma))
1002 return ERR_PTR(PTR_ERR(sma)); 1003 return ERR_PTR(PTR_ERR(sma));
@@ -1004,37 +1005,45 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1004 nsems = sma->sem_nsems; 1005 nsems = sma->sem_nsems;
1005 sem_getref_and_unlock(sma); 1006 sem_getref_and_unlock(sma);
1006 1007
1008 /* step 2: allocate new undo structure */
1007 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); 1009 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1008 if (!new) { 1010 if (!new) {
1009 sem_putref(sma); 1011 sem_putref(sma);
1010 return ERR_PTR(-ENOMEM); 1012 return ERR_PTR(-ENOMEM);
1011 } 1013 }
1012 new->semadj = (short *) &new[1];
1013 new->semid = semid;
1014 1014
1015 spin_lock(&ulp->lock); 1015 /* step 3: Acquire the lock on semaphore array */
1016 un = lookup_undo(ulp, semid);
1017 if (un) {
1018 spin_unlock(&ulp->lock);
1019 kfree(new);
1020 sem_putref(sma);
1021 goto out;
1022 }
1023 sem_lock_and_putref(sma); 1016 sem_lock_and_putref(sma);
1024 if (sma->sem_perm.deleted) { 1017 if (sma->sem_perm.deleted) {
1025 sem_unlock(sma); 1018 sem_unlock(sma);
1026 spin_unlock(&ulp->lock);
1027 kfree(new); 1019 kfree(new);
1028 un = ERR_PTR(-EIDRM); 1020 un = ERR_PTR(-EIDRM);
1029 goto out; 1021 goto out;
1030 } 1022 }
1031 new->proc_next = ulp->proc_list; 1023 spin_lock(&ulp->lock);
1032 ulp->proc_list = new; 1024
1033 new->id_next = sma->undo; 1025 /*
1034 sma->undo = new; 1026 * step 4: check for races: did someone else allocate the undo struct?
1035 sem_unlock(sma); 1027 */
1028 un = lookup_undo(ulp, semid);
1029 if (un) {
1030 kfree(new);
1031 goto success;
1032 }
1033 /* step 5: initialize & link new undo structure */
1034 new->semadj = (short *) &new[1];
1035 new->ulp = ulp;
1036 new->semid = semid;
1037 assert_spin_locked(&ulp->lock);
1038 list_add_rcu(&new->list_proc, &ulp->list_proc);
1039 assert_spin_locked(&sma->sem_perm.lock);
1040 list_add(&new->list_id, &sma->list_id);
1036 un = new; 1041 un = new;
1042
1043success:
1037 spin_unlock(&ulp->lock); 1044 spin_unlock(&ulp->lock);
1045 rcu_read_lock();
1046 sem_unlock(sma);
1038out: 1047out:
1039 return un; 1048 return un;
1040} 1049}
@@ -1090,9 +1099,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops,
1090 alter = 1; 1099 alter = 1;
1091 } 1100 }
1092 1101
1093retry_undos:
1094 if (undos) { 1102 if (undos) {
1095 un = find_undo(ns, semid); 1103 un = find_alloc_undo(ns, semid);
1096 if (IS_ERR(un)) { 1104 if (IS_ERR(un)) {
1097 error = PTR_ERR(un); 1105 error = PTR_ERR(un);
1098 goto out_free; 1106 goto out_free;
@@ -1102,19 +1110,37 @@ retry_undos:
1102 1110
1103 sma = sem_lock_check(ns, semid); 1111 sma = sem_lock_check(ns, semid);
1104 if (IS_ERR(sma)) { 1112 if (IS_ERR(sma)) {
1113 if (un)
1114 rcu_read_unlock();
1105 error = PTR_ERR(sma); 1115 error = PTR_ERR(sma);
1106 goto out_free; 1116 goto out_free;
1107 } 1117 }
1108 1118
1109 /* 1119 /*
1110 * semid identifiers are not unique - find_undo may have 1120 * semid identifiers are not unique - find_alloc_undo may have
1111 * allocated an undo structure, it was invalidated by an RMID 1121 * allocated an undo structure, it was invalidated by an RMID
1112 * and now a new array with received the same id. Check and retry. 1122 * and now a new array with received the same id. Check and fail.
1123 * This case can be detected checking un->semid. The existance of
1124 * "un" itself is guaranteed by rcu.
1113 */ 1125 */
1114 if (un && un->semid == -1) { 1126 error = -EIDRM;
1115 sem_unlock(sma); 1127 if (un) {
1116 goto retry_undos; 1128 if (un->semid == -1) {
1129 rcu_read_unlock();
1130 goto out_unlock_free;
1131 } else {
1132 /*
1133 * rcu lock can be released, "un" cannot disappear:
1134 * - sem_lock is acquired, thus IPC_RMID is
1135 * impossible.
1136 * - exit_sem is impossible, it always operates on
1137 * current (or a dead task).
1138 */
1139
1140 rcu_read_unlock();
1141 }
1117 } 1142 }
1143
1118 error = -EFBIG; 1144 error = -EFBIG;
1119 if (max >= sma->sem_nsems) 1145 if (max >= sma->sem_nsems)
1120 goto out_unlock_free; 1146 goto out_unlock_free;
@@ -1138,17 +1164,15 @@ retry_undos:
1138 * task into the pending queue and go to sleep. 1164 * task into the pending queue and go to sleep.
1139 */ 1165 */
1140 1166
1141 queue.sma = sma;
1142 queue.sops = sops; 1167 queue.sops = sops;
1143 queue.nsops = nsops; 1168 queue.nsops = nsops;
1144 queue.undo = un; 1169 queue.undo = un;
1145 queue.pid = task_tgid_vnr(current); 1170 queue.pid = task_tgid_vnr(current);
1146 queue.id = semid;
1147 queue.alter = alter; 1171 queue.alter = alter;
1148 if (alter) 1172 if (alter)
1149 append_to_queue(sma ,&queue); 1173 list_add_tail(&queue.list, &sma->sem_pending);
1150 else 1174 else
1151 prepend_to_queue(sma ,&queue); 1175 list_add(&queue.list, &sma->sem_pending);
1152 1176
1153 queue.status = -EINTR; 1177 queue.status = -EINTR;
1154 queue.sleeper = current; 1178 queue.sleeper = current;
@@ -1174,7 +1198,6 @@ retry_undos:
1174 1198
1175 sma = sem_lock(ns, semid); 1199 sma = sem_lock(ns, semid);
1176 if (IS_ERR(sma)) { 1200 if (IS_ERR(sma)) {
1177 BUG_ON(queue.prev != NULL);
1178 error = -EIDRM; 1201 error = -EIDRM;
1179 goto out_free; 1202 goto out_free;
1180 } 1203 }
@@ -1192,7 +1215,7 @@ retry_undos:
1192 */ 1215 */
1193 if (timeout && jiffies_left == 0) 1216 if (timeout && jiffies_left == 0)
1194 error = -EAGAIN; 1217 error = -EAGAIN;
1195 remove_from_queue(sma,&queue); 1218 list_del(&queue.list);
1196 goto out_unlock_free; 1219 goto out_unlock_free;
1197 1220
1198out_unlock_free: 1221out_unlock_free:
@@ -1243,56 +1266,62 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
1243 */ 1266 */
1244void exit_sem(struct task_struct *tsk) 1267void exit_sem(struct task_struct *tsk)
1245{ 1268{
1246 struct sem_undo_list *undo_list; 1269 struct sem_undo_list *ulp;
1247 struct sem_undo *u, **up;
1248 struct ipc_namespace *ns;
1249 1270
1250 undo_list = tsk->sysvsem.undo_list; 1271 ulp = tsk->sysvsem.undo_list;
1251 if (!undo_list) 1272 if (!ulp)
1252 return; 1273 return;
1253 tsk->sysvsem.undo_list = NULL; 1274 tsk->sysvsem.undo_list = NULL;
1254 1275
1255 if (!atomic_dec_and_test(&undo_list->refcnt)) 1276 if (!atomic_dec_and_test(&ulp->refcnt))
1256 return; 1277 return;
1257 1278
1258 ns = tsk->nsproxy->ipc_ns; 1279 for (;;) {
1259 /* There's no need to hold the semundo list lock, as current
1260 * is the last task exiting for this undo list.
1261 */
1262 for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) {
1263 struct sem_array *sma; 1280 struct sem_array *sma;
1264 int nsems, i; 1281 struct sem_undo *un;
1265 struct sem_undo *un, **unp;
1266 int semid; 1282 int semid;
1267 1283 int i;
1268 semid = u->semid;
1269 1284
1270 if(semid == -1) 1285 rcu_read_lock();
1271 continue; 1286 un = list_entry(rcu_dereference(ulp->list_proc.next),
1272 sma = sem_lock(ns, semid); 1287 struct sem_undo, list_proc);
1288 if (&un->list_proc == &ulp->list_proc)
1289 semid = -1;
1290 else
1291 semid = un->semid;
1292 rcu_read_unlock();
1293
1294 if (semid == -1)
1295 break;
1296
1297 sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
1298
1299 /* exit_sem raced with IPC_RMID, nothing to do */
1273 if (IS_ERR(sma)) 1300 if (IS_ERR(sma))
1274 continue; 1301 continue;
1275 1302
1276 if (u->semid == -1) 1303 un = lookup_undo(ulp, semid);
1277 goto next_entry; 1304 if (un == NULL) {
1305 /* exit_sem raced with IPC_RMID+semget() that created
1306 * exactly the same semid. Nothing to do.
1307 */
1308 sem_unlock(sma);
1309 continue;
1310 }
1278 1311
1279 BUG_ON(sem_checkid(sma, u->semid)); 1312 /* remove un from the linked lists */
1313 assert_spin_locked(&sma->sem_perm.lock);
1314 list_del(&un->list_id);
1280 1315
1281 /* remove u from the sma->undo list */ 1316 spin_lock(&ulp->lock);
1282 for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { 1317 list_del_rcu(&un->list_proc);
1283 if (u == un) 1318 spin_unlock(&ulp->lock);
1284 goto found; 1319
1285 } 1320 /* perform adjustments registered in un */
1286 printk ("exit_sem undo list error id=%d\n", u->semid); 1321 for (i = 0; i < sma->sem_nsems; i++) {
1287 goto next_entry;
1288found:
1289 *unp = un->id_next;
1290 /* perform adjustments registered in u */
1291 nsems = sma->sem_nsems;
1292 for (i = 0; i < nsems; i++) {
1293 struct sem * semaphore = &sma->sem_base[i]; 1322 struct sem * semaphore = &sma->sem_base[i];
1294 if (u->semadj[i]) { 1323 if (un->semadj[i]) {
1295 semaphore->semval += u->semadj[i]; 1324 semaphore->semval += un->semadj[i];
1296 /* 1325 /*
1297 * Range checks of the new semaphore value, 1326 * Range checks of the new semaphore value,
1298 * not defined by sus: 1327 * not defined by sus:
@@ -1316,10 +1345,11 @@ found:
1316 sma->sem_otime = get_seconds(); 1345 sma->sem_otime = get_seconds();
1317 /* maybe some queued-up processes were waiting for this */ 1346 /* maybe some queued-up processes were waiting for this */
1318 update_queue(sma); 1347 update_queue(sma);
1319next_entry:
1320 sem_unlock(sma); 1348 sem_unlock(sma);
1349
1350 call_rcu(&un->rcu, free_un);
1321 } 1351 }
1322 kfree(undo_list); 1352 kfree(ulp);
1323} 1353}
1324 1354
1325#ifdef CONFIG_PROC_FS 1355#ifdef CONFIG_PROC_FS