aboutsummaryrefslogtreecommitdiffstats
path: root/ipc
diff options
context:
space:
mode:
Diffstat (limited to 'ipc')
-rw-r--r--ipc/sem.c46
1 files changed, 6 insertions, 40 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 40ab34d832a6..7617f4f34edc 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -999,36 +999,6 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
999 } 999 }
1000} 1000}
1001 1001
1002static inline void lock_semundo(void)
1003{
1004 struct sem_undo_list *undo_list;
1005
1006 undo_list = current->sysvsem.undo_list;
1007 if (undo_list)
1008 spin_lock(&undo_list->lock);
1009}
1010
1011/* This code has an interaction with copy_semundo().
1012 * Consider; two tasks are sharing the undo_list. task1
1013 * acquires the undo_list lock in lock_semundo(). If task2 now
1014 * exits before task1 releases the lock (by calling
1015 * unlock_semundo()), then task1 will never call spin_unlock().
1016 * This leave the sem_undo_list in a locked state. If task1 now creats task3
1017 * and once again shares the sem_undo_list, the sem_undo_list will still be
1018 * locked, and future SEM_UNDO operations will deadlock. This case is
1019 * dealt with in copy_semundo() by having it reinitialize the spin lock when
1020 * the refcnt goes from 1 to 2.
1021 */
1022static inline void unlock_semundo(void)
1023{
1024 struct sem_undo_list *undo_list;
1025
1026 undo_list = current->sysvsem.undo_list;
1027 if (undo_list)
1028 spin_unlock(&undo_list->lock);
1029}
1030
1031
1032/* If the task doesn't already have a undo_list, then allocate one 1002/* If the task doesn't already have a undo_list, then allocate one
1033 * here. We guarantee there is only one thread using this undo list, 1003 * here. We guarantee there is only one thread using this undo list,
1034 * and current is THE ONE 1004 * and current is THE ONE
@@ -1089,9 +1059,9 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1089 if (error) 1059 if (error)
1090 return ERR_PTR(error); 1060 return ERR_PTR(error);
1091 1061
1092 lock_semundo(); 1062 spin_lock(&ulp->lock);
1093 un = lookup_undo(ulp, semid); 1063 un = lookup_undo(ulp, semid);
1094 unlock_semundo(); 1064 spin_unlock(&ulp->lock);
1095 if (likely(un!=NULL)) 1065 if (likely(un!=NULL))
1096 goto out; 1066 goto out;
1097 1067
@@ -1114,10 +1084,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1114 new->semadj = (short *) &new[1]; 1084 new->semadj = (short *) &new[1];
1115 new->semid = semid; 1085 new->semid = semid;
1116 1086
1117 lock_semundo(); 1087 spin_lock(&ulp->lock);
1118 un = lookup_undo(ulp, semid); 1088 un = lookup_undo(ulp, semid);
1119 if (un) { 1089 if (un) {
1120 unlock_semundo(); 1090 spin_unlock(&ulp->lock);
1121 kfree(new); 1091 kfree(new);
1122 ipc_lock_by_ptr(&sma->sem_perm); 1092 ipc_lock_by_ptr(&sma->sem_perm);
1123 ipc_rcu_putref(sma); 1093 ipc_rcu_putref(sma);
@@ -1128,7 +1098,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1128 ipc_rcu_putref(sma); 1098 ipc_rcu_putref(sma);
1129 if (sma->sem_perm.deleted) { 1099 if (sma->sem_perm.deleted) {
1130 sem_unlock(sma); 1100 sem_unlock(sma);
1131 unlock_semundo(); 1101 spin_unlock(&ulp->lock);
1132 kfree(new); 1102 kfree(new);
1133 un = ERR_PTR(-EIDRM); 1103 un = ERR_PTR(-EIDRM);
1134 goto out; 1104 goto out;
@@ -1139,7 +1109,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
1139 sma->undo = new; 1109 sma->undo = new;
1140 sem_unlock(sma); 1110 sem_unlock(sma);
1141 un = new; 1111 un = new;
1142 unlock_semundo(); 1112 spin_unlock(&ulp->lock);
1143out: 1113out:
1144 return un; 1114 return un;
1145} 1115}
@@ -1315,10 +1285,6 @@ asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsop
1315 1285
1316/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between 1286/* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
1317 * parent and child tasks. 1287 * parent and child tasks.
1318 *
1319 * See the notes above unlock_semundo() regarding the spin_lock_init()
1320 * in this code. Initialize the undo_list->lock here instead of get_undo_list()
1321 * because of the reasoning in the comment above unlock_semundo.
1322 */ 1288 */
1323 1289
1324int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) 1290int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)