diff options
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/sem.c | 163 |
1 files changed, 89 insertions, 74 deletions
@@ -274,7 +274,7 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
274 | sma->sem_base = (struct sem *) &sma[1]; | 274 | sma->sem_base = (struct sem *) &sma[1]; |
275 | /* sma->sem_pending = NULL; */ | 275 | /* sma->sem_pending = NULL; */ |
276 | sma->sem_pending_last = &sma->sem_pending; | 276 | sma->sem_pending_last = &sma->sem_pending; |
277 | /* sma->undo = NULL; */ | 277 | INIT_LIST_HEAD(&sma->list_id); |
278 | sma->sem_nsems = nsems; | 278 | sma->sem_nsems = nsems; |
279 | sma->sem_ctime = get_seconds(); | 279 | sma->sem_ctime = get_seconds(); |
280 | sem_unlock(sma); | 280 | sem_unlock(sma); |
@@ -536,7 +536,8 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) | |||
536 | * (They will be freed without any further action in exit_sem() | 536 | * (They will be freed without any further action in exit_sem() |
537 | * or during the next semop.) | 537 | * or during the next semop.) |
538 | */ | 538 | */ |
539 | for (un = sma->undo; un; un = un->id_next) | 539 | assert_spin_locked(&sma->sem_perm.lock); |
540 | list_for_each_entry(un, &sma->list_id, list_id) | ||
540 | un->semid = -1; | 541 | un->semid = -1; |
541 | 542 | ||
542 | /* Wake up all pending processes and let them fail with EIDRM. */ | 543 | /* Wake up all pending processes and let them fail with EIDRM. */ |
@@ -763,9 +764,12 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
763 | 764 | ||
764 | for (i = 0; i < nsems; i++) | 765 | for (i = 0; i < nsems; i++) |
765 | sma->sem_base[i].semval = sem_io[i]; | 766 | sma->sem_base[i].semval = sem_io[i]; |
766 | for (un = sma->undo; un; un = un->id_next) | 767 | |
768 | assert_spin_locked(&sma->sem_perm.lock); | ||
769 | list_for_each_entry(un, &sma->list_id, list_id) { | ||
767 | for (i = 0; i < nsems; i++) | 770 | for (i = 0; i < nsems; i++) |
768 | un->semadj[i] = 0; | 771 | un->semadj[i] = 0; |
772 | } | ||
769 | sma->sem_ctime = get_seconds(); | 773 | sma->sem_ctime = get_seconds(); |
770 | /* maybe some queued-up processes were waiting for this */ | 774 | /* maybe some queued-up processes were waiting for this */ |
771 | update_queue(sma); | 775 | update_queue(sma); |
@@ -797,12 +801,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
797 | { | 801 | { |
798 | int val = arg.val; | 802 | int val = arg.val; |
799 | struct sem_undo *un; | 803 | struct sem_undo *un; |
804 | |||
800 | err = -ERANGE; | 805 | err = -ERANGE; |
801 | if (val > SEMVMX || val < 0) | 806 | if (val > SEMVMX || val < 0) |
802 | goto out_unlock; | 807 | goto out_unlock; |
803 | 808 | ||
804 | for (un = sma->undo; un; un = un->id_next) | 809 | assert_spin_locked(&sma->sem_perm.lock); |
810 | list_for_each_entry(un, &sma->list_id, list_id) | ||
805 | un->semadj[semnum] = 0; | 811 | un->semadj[semnum] = 0; |
812 | |||
806 | curr->semval = val; | 813 | curr->semval = val; |
807 | curr->sempid = task_tgid_vnr(current); | 814 | curr->sempid = task_tgid_vnr(current); |
808 | sma->sem_ctime = get_seconds(); | 815 | sma->sem_ctime = get_seconds(); |
@@ -952,6 +959,8 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) | |||
952 | return -ENOMEM; | 959 | return -ENOMEM; |
953 | spin_lock_init(&undo_list->lock); | 960 | spin_lock_init(&undo_list->lock); |
954 | atomic_set(&undo_list->refcnt, 1); | 961 | atomic_set(&undo_list->refcnt, 1); |
962 | INIT_LIST_HEAD(&undo_list->list_proc); | ||
963 | |||
955 | current->sysvsem.undo_list = undo_list; | 964 | current->sysvsem.undo_list = undo_list; |
956 | } | 965 | } |
957 | *undo_listp = undo_list; | 966 | *undo_listp = undo_list; |
@@ -960,25 +969,30 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) | |||
960 | 969 | ||
961 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) | 970 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) |
962 | { | 971 | { |
963 | struct sem_undo **last, *un; | 972 | struct sem_undo *walk, *tmp; |
964 | 973 | ||
965 | last = &ulp->proc_list; | 974 | assert_spin_locked(&ulp->lock); |
966 | un = *last; | 975 | list_for_each_entry_safe(walk, tmp, &ulp->list_proc, list_proc) { |
967 | while(un != NULL) { | 976 | if (walk->semid == semid) |
968 | if(un->semid==semid) | 977 | return walk; |
969 | break; | 978 | if (walk->semid == -1) { |
970 | if(un->semid==-1) { | 979 | list_del(&walk->list_proc); |
971 | *last=un->proc_next; | 980 | kfree(walk); |
972 | kfree(un); | ||
973 | } else { | ||
974 | last=&un->proc_next; | ||
975 | } | 981 | } |
976 | un=*last; | ||
977 | } | 982 | } |
978 | return un; | 983 | return NULL; |
979 | } | 984 | } |
980 | 985 | ||
981 | static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | 986 | /** |
987 | * find_alloc_undo - Lookup (and if not present create) undo array | ||
988 | * @ns: namespace | ||
989 | * @semid: semaphore array id | ||
990 | * | ||
991 | * The function looks up (and if not present creates) the undo structure. | ||
992 | * The size of the undo structure depends on the size of the semaphore | ||
993 | * array, thus the alloc path is not that straightforward. | ||
994 | */ | ||
995 | static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) | ||
982 | { | 996 | { |
983 | struct sem_array *sma; | 997 | struct sem_array *sma; |
984 | struct sem_undo_list *ulp; | 998 | struct sem_undo_list *ulp; |
@@ -997,6 +1011,7 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
997 | goto out; | 1011 | goto out; |
998 | 1012 | ||
999 | /* no undo structure around - allocate one. */ | 1013 | /* no undo structure around - allocate one. */ |
1014 | /* step 1: figure out the size of the semaphore array */ | ||
1000 | sma = sem_lock_check(ns, semid); | 1015 | sma = sem_lock_check(ns, semid); |
1001 | if (IS_ERR(sma)) | 1016 | if (IS_ERR(sma)) |
1002 | return ERR_PTR(PTR_ERR(sma)); | 1017 | return ERR_PTR(PTR_ERR(sma)); |
@@ -1004,15 +1019,19 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1004 | nsems = sma->sem_nsems; | 1019 | nsems = sma->sem_nsems; |
1005 | sem_getref_and_unlock(sma); | 1020 | sem_getref_and_unlock(sma); |
1006 | 1021 | ||
1022 | /* step 2: allocate new undo structure */ | ||
1007 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); | 1023 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
1008 | if (!new) { | 1024 | if (!new) { |
1009 | sem_putref(sma); | 1025 | sem_putref(sma); |
1010 | return ERR_PTR(-ENOMEM); | 1026 | return ERR_PTR(-ENOMEM); |
1011 | } | 1027 | } |
1012 | new->semadj = (short *) &new[1]; | ||
1013 | new->semid = semid; | ||
1014 | 1028 | ||
1029 | /* step 3: Acquire the lock on the undo list pointer */ | ||
1015 | spin_lock(&ulp->lock); | 1030 | spin_lock(&ulp->lock); |
1031 | |||
1032 | /* step 4: check for races: someone else allocated the undo struct, | ||
1033 | * semaphore array was destroyed. | ||
1034 | */ | ||
1016 | un = lookup_undo(ulp, semid); | 1035 | un = lookup_undo(ulp, semid); |
1017 | if (un) { | 1036 | if (un) { |
1018 | spin_unlock(&ulp->lock); | 1037 | spin_unlock(&ulp->lock); |
@@ -1028,13 +1047,17 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1028 | un = ERR_PTR(-EIDRM); | 1047 | un = ERR_PTR(-EIDRM); |
1029 | goto out; | 1048 | goto out; |
1030 | } | 1049 | } |
1031 | new->proc_next = ulp->proc_list; | 1050 | /* step 5: initialize & link new undo structure */ |
1032 | ulp->proc_list = new; | 1051 | new->semadj = (short *) &new[1]; |
1033 | new->id_next = sma->undo; | 1052 | new->semid = semid; |
1034 | sma->undo = new; | 1053 | assert_spin_locked(&ulp->lock); |
1054 | list_add(&new->list_proc, &ulp->list_proc); | ||
1055 | assert_spin_locked(&sma->sem_perm.lock); | ||
1056 | list_add(&new->list_id, &sma->list_id); | ||
1057 | |||
1035 | sem_unlock(sma); | 1058 | sem_unlock(sma); |
1036 | un = new; | ||
1037 | spin_unlock(&ulp->lock); | 1059 | spin_unlock(&ulp->lock); |
1060 | un = new; | ||
1038 | out: | 1061 | out: |
1039 | return un; | 1062 | return un; |
1040 | } | 1063 | } |
@@ -1090,9 +1113,8 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, | |||
1090 | alter = 1; | 1113 | alter = 1; |
1091 | } | 1114 | } |
1092 | 1115 | ||
1093 | retry_undos: | ||
1094 | if (undos) { | 1116 | if (undos) { |
1095 | un = find_undo(ns, semid); | 1117 | un = find_alloc_undo(ns, semid); |
1096 | if (IS_ERR(un)) { | 1118 | if (IS_ERR(un)) { |
1097 | error = PTR_ERR(un); | 1119 | error = PTR_ERR(un); |
1098 | goto out_free; | 1120 | goto out_free; |
@@ -1107,14 +1129,14 @@ retry_undos: | |||
1107 | } | 1129 | } |
1108 | 1130 | ||
1109 | /* | 1131 | /* |
1110 | * semid identifiers are not unique - find_undo may have | 1132 | * semid identifiers are not unique - find_alloc_undo may have |
1111 | * allocated an undo structure, it was invalidated by an RMID | 1133 | * allocated an undo structure, it was invalidated by an RMID |
1112 | * and now a new array with received the same id. Check and retry. | 1134 | * and now a new array with received the same id. Check and fail. |
1113 | */ | 1135 | */ |
1114 | if (un && un->semid == -1) { | 1136 | error = -EIDRM; |
1115 | sem_unlock(sma); | 1137 | if (un && un->semid == -1) |
1116 | goto retry_undos; | 1138 | goto out_unlock_free; |
1117 | } | 1139 | |
1118 | error = -EFBIG; | 1140 | error = -EFBIG; |
1119 | if (max >= sma->sem_nsems) | 1141 | if (max >= sma->sem_nsems) |
1120 | goto out_unlock_free; | 1142 | goto out_unlock_free; |
@@ -1243,56 +1265,44 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) | |||
1243 | */ | 1265 | */ |
1244 | void exit_sem(struct task_struct *tsk) | 1266 | void exit_sem(struct task_struct *tsk) |
1245 | { | 1267 | { |
1246 | struct sem_undo_list *undo_list; | 1268 | struct sem_undo_list *ulp; |
1247 | struct sem_undo *u, **up; | 1269 | struct sem_undo *un, *tmp; |
1248 | struct ipc_namespace *ns; | ||
1249 | 1270 | ||
1250 | undo_list = tsk->sysvsem.undo_list; | 1271 | ulp = tsk->sysvsem.undo_list; |
1251 | if (!undo_list) | 1272 | if (!ulp) |
1252 | return; | 1273 | return; |
1253 | tsk->sysvsem.undo_list = NULL; | 1274 | tsk->sysvsem.undo_list = NULL; |
1254 | 1275 | ||
1255 | if (!atomic_dec_and_test(&undo_list->refcnt)) | 1276 | if (!atomic_dec_and_test(&ulp->refcnt)) |
1256 | return; | 1277 | return; |
1257 | 1278 | ||
1258 | ns = tsk->nsproxy->ipc_ns; | 1279 | spin_lock(&ulp->lock); |
1259 | /* There's no need to hold the semundo list lock, as current | 1280 | |
1260 | * is the last task exiting for this undo list. | 1281 | list_for_each_entry_safe(un, tmp, &ulp->list_proc, list_proc) { |
1261 | */ | ||
1262 | for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { | ||
1263 | struct sem_array *sma; | 1282 | struct sem_array *sma; |
1264 | int nsems, i; | 1283 | int i; |
1265 | struct sem_undo *un, **unp; | 1284 | |
1266 | int semid; | 1285 | if (un->semid == -1) |
1267 | 1286 | goto free; | |
1268 | semid = u->semid; | 1287 | |
1269 | 1288 | sma = sem_lock(tsk->nsproxy->ipc_ns, un->semid); | |
1270 | if(semid == -1) | ||
1271 | continue; | ||
1272 | sma = sem_lock(ns, semid); | ||
1273 | if (IS_ERR(sma)) | 1289 | if (IS_ERR(sma)) |
1274 | continue; | 1290 | goto free; |
1275 | 1291 | ||
1276 | if (u->semid == -1) | 1292 | if (un->semid == -1) |
1277 | goto next_entry; | 1293 | goto unlock_free; |
1278 | 1294 | ||
1279 | BUG_ON(sem_checkid(sma, u->semid)); | 1295 | BUG_ON(sem_checkid(sma, un->semid)); |
1280 | 1296 | ||
1281 | /* remove u from the sma->undo list */ | 1297 | /* remove un from sma->list_id */ |
1282 | for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { | 1298 | assert_spin_locked(&sma->sem_perm.lock); |
1283 | if (u == un) | 1299 | list_del(&un->list_id); |
1284 | goto found; | 1300 | |
1285 | } | 1301 | /* perform adjustments registered in un */ |
1286 | printk ("exit_sem undo list error id=%d\n", u->semid); | 1302 | for (i = 0; i < sma->sem_nsems; i++) { |
1287 | goto next_entry; | ||
1288 | found: | ||
1289 | *unp = un->id_next; | ||
1290 | /* perform adjustments registered in u */ | ||
1291 | nsems = sma->sem_nsems; | ||
1292 | for (i = 0; i < nsems; i++) { | ||
1293 | struct sem * semaphore = &sma->sem_base[i]; | 1303 | struct sem * semaphore = &sma->sem_base[i]; |
1294 | if (u->semadj[i]) { | 1304 | if (un->semadj[i]) { |
1295 | semaphore->semval += u->semadj[i]; | 1305 | semaphore->semval += un->semadj[i]; |
1296 | /* | 1306 | /* |
1297 | * Range checks of the new semaphore value, | 1307 | * Range checks of the new semaphore value, |
1298 | * not defined by sus: | 1308 | * not defined by sus: |
@@ -1316,10 +1326,15 @@ found: | |||
1316 | sma->sem_otime = get_seconds(); | 1326 | sma->sem_otime = get_seconds(); |
1317 | /* maybe some queued-up processes were waiting for this */ | 1327 | /* maybe some queued-up processes were waiting for this */ |
1318 | update_queue(sma); | 1328 | update_queue(sma); |
1319 | next_entry: | 1329 | unlock_free: |
1320 | sem_unlock(sma); | 1330 | sem_unlock(sma); |
1331 | free: | ||
1332 | assert_spin_locked(&ulp->lock); | ||
1333 | list_del(&un->list_proc); | ||
1334 | kfree(un); | ||
1321 | } | 1335 | } |
1322 | kfree(undo_list); | 1336 | spin_unlock(&ulp->lock); |
1337 | kfree(ulp); | ||
1323 | } | 1338 | } |
1324 | 1339 | ||
1325 | #ifdef CONFIG_PROC_FS | 1340 | #ifdef CONFIG_PROC_FS |